first commit
This commit is contained in:
74
.dockerignore
Normal file
74
.dockerignore
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Docker Ignore File for Sub2API
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
# Git
|
||||||
|
.git
|
||||||
|
.gitignore
|
||||||
|
.gitattributes
|
||||||
|
|
||||||
|
# Documentation
|
||||||
|
*.md
|
||||||
|
!deploy/DOCKER.md
|
||||||
|
docs/
|
||||||
|
|
||||||
|
# IDE
|
||||||
|
.idea/
|
||||||
|
.vscode/
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
|
||||||
|
# OS files
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
# Build artifacts
|
||||||
|
dist/
|
||||||
|
build/
|
||||||
|
|
||||||
|
# Node modules (will be installed in container)
|
||||||
|
frontend/node_modules/
|
||||||
|
node_modules/
|
||||||
|
|
||||||
|
# Go build cache (will be built in container)
|
||||||
|
backend/vendor/
|
||||||
|
|
||||||
|
# Test files
|
||||||
|
*_test.go
|
||||||
|
**/*.test.js
|
||||||
|
coverage/
|
||||||
|
.nyc_output/
|
||||||
|
|
||||||
|
# Environment files
|
||||||
|
.env
|
||||||
|
.env.*
|
||||||
|
!.env.example
|
||||||
|
|
||||||
|
# Local config
|
||||||
|
config.yaml
|
||||||
|
config.local.yaml
|
||||||
|
|
||||||
|
# Logs
|
||||||
|
*.log
|
||||||
|
logs/
|
||||||
|
|
||||||
|
# Temporary files
|
||||||
|
tmp/
|
||||||
|
temp/
|
||||||
|
*.tmp
|
||||||
|
|
||||||
|
# Deploy files (not needed in image)
|
||||||
|
deploy/install.sh
|
||||||
|
deploy/sub2api.service
|
||||||
|
deploy/sub2api-sudoers
|
||||||
|
|
||||||
|
# GoReleaser
|
||||||
|
.goreleaser.yaml
|
||||||
|
|
||||||
|
# GitHub
|
||||||
|
.github/
|
||||||
|
|
||||||
|
# Claude files
|
||||||
|
.claude/
|
||||||
|
issues/
|
||||||
|
CLAUDE.md
|
||||||
16
.github/audit-exceptions.yml
vendored
Normal file
16
.github/audit-exceptions.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
version: 1
|
||||||
|
exceptions:
|
||||||
|
- package: xlsx
|
||||||
|
advisory: "GHSA-4r6h-8v6p-xvw6"
|
||||||
|
severity: high
|
||||||
|
reason: "Admin export only; switched to dynamic import to reduce exposure (CVE-2023-30533)"
|
||||||
|
mitigation: "Load only on export; restrict export permissions and data scope"
|
||||||
|
expires_on: "2026-04-05"
|
||||||
|
owner: "security@your-domain"
|
||||||
|
- package: xlsx
|
||||||
|
advisory: "GHSA-5pgg-2g8v-p4x9"
|
||||||
|
severity: high
|
||||||
|
reason: "Admin export only; switched to dynamic import to reduce exposure (CVE-2024-22363)"
|
||||||
|
mitigation: "Load only on export; restrict export permissions and data scope"
|
||||||
|
expires_on: "2026-04-05"
|
||||||
|
owner: "security@your-domain"
|
||||||
47
.github/workflows/backend-ci.yml
vendored
Normal file
47
.github/workflows/backend-ci.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
name: CI
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: backend/go.mod
|
||||||
|
check-latest: false
|
||||||
|
cache: true
|
||||||
|
- name: Verify Go version
|
||||||
|
run: |
|
||||||
|
go version | grep -q 'go1.25.5'
|
||||||
|
- name: Unit tests
|
||||||
|
working-directory: backend
|
||||||
|
run: make test-unit
|
||||||
|
- name: Integration tests
|
||||||
|
working-directory: backend
|
||||||
|
run: make test-integration
|
||||||
|
|
||||||
|
golangci-lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: backend/go.mod
|
||||||
|
check-latest: false
|
||||||
|
cache: true
|
||||||
|
- name: Verify Go version
|
||||||
|
run: |
|
||||||
|
go version | grep -q 'go1.25.5'
|
||||||
|
- name: golangci-lint
|
||||||
|
uses: golangci/golangci-lint-action@v9
|
||||||
|
with:
|
||||||
|
version: v2.7
|
||||||
|
args: --timeout=5m
|
||||||
|
working-directory: backend
|
||||||
272
.github/workflows/release.yml
vendored
Normal file
272
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
name: Release
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
tag:
|
||||||
|
description: 'Tag to release (e.g., v1.0.0)'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
simple_release:
|
||||||
|
description: 'Simple release: only x86_64 GHCR image, skip other artifacts'
|
||||||
|
required: false
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
# 环境变量:合并 workflow_dispatch 输入和 repository variable
|
||||||
|
# tag push 触发时读取 vars.SIMPLE_RELEASE,workflow_dispatch 时使用输入参数
|
||||||
|
env:
|
||||||
|
SIMPLE_RELEASE: ${{ github.event.inputs.simple_release == 'true' || vars.SIMPLE_RELEASE == 'true' }}
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# Update VERSION file with tag version
|
||||||
|
update-version:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Update VERSION file
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
VERSION=${{ github.event.inputs.tag }}
|
||||||
|
VERSION=${VERSION#v}
|
||||||
|
else
|
||||||
|
VERSION=${GITHUB_REF#refs/tags/v}
|
||||||
|
fi
|
||||||
|
echo "$VERSION" > backend/cmd/server/VERSION
|
||||||
|
echo "Updated VERSION file to: $VERSION"
|
||||||
|
|
||||||
|
- name: Upload VERSION artifact
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: version-file
|
||||||
|
path: backend/cmd/server/VERSION
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
build-frontend:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Setup pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
|
||||||
|
- name: Setup Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '20'
|
||||||
|
cache: 'pnpm'
|
||||||
|
cache-dependency-path: frontend/pnpm-lock.yaml
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: pnpm install --frozen-lockfile
|
||||||
|
working-directory: frontend
|
||||||
|
|
||||||
|
- name: Build frontend
|
||||||
|
run: pnpm run build
|
||||||
|
working-directory: frontend
|
||||||
|
|
||||||
|
- name: Upload frontend artifact
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: frontend-dist
|
||||||
|
path: backend/internal/web/dist/
|
||||||
|
retention-days: 1
|
||||||
|
|
||||||
|
release:
|
||||||
|
needs: [update-version, build-frontend]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
ref: ${{ github.event.inputs.tag || github.ref }}
|
||||||
|
|
||||||
|
- name: Download VERSION artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: version-file
|
||||||
|
path: backend/cmd/server/
|
||||||
|
|
||||||
|
- name: Download frontend artifact
|
||||||
|
uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: frontend-dist
|
||||||
|
path: backend/internal/web/dist/
|
||||||
|
|
||||||
|
- name: Setup Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: backend/go.mod
|
||||||
|
check-latest: false
|
||||||
|
cache-dependency-path: backend/go.sum
|
||||||
|
|
||||||
|
- name: Verify Go version
|
||||||
|
run: |
|
||||||
|
go version | grep -q 'go1.25.5'
|
||||||
|
|
||||||
|
# Docker setup for GoReleaser
|
||||||
|
- name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v3
|
||||||
|
|
||||||
|
- name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
|
- name: Login to DockerHub
|
||||||
|
if: ${{ env.DOCKERHUB_USERNAME != '' }}
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
env:
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Login to GitHub Container Registry
|
||||||
|
uses: docker/login-action@v3
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.repository_owner }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Fetch tags with annotations
|
||||||
|
run: |
|
||||||
|
# 确保获取完整的 annotated tag 信息
|
||||||
|
git fetch --tags --force
|
||||||
|
|
||||||
|
- name: Get tag message
|
||||||
|
id: tag_message
|
||||||
|
run: |
|
||||||
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
TAG_NAME=${{ github.event.inputs.tag }}
|
||||||
|
else
|
||||||
|
TAG_NAME=${GITHUB_REF#refs/tags/}
|
||||||
|
fi
|
||||||
|
echo "Processing tag: $TAG_NAME"
|
||||||
|
|
||||||
|
# 获取完整的 tag message(跳过第一行标题)
|
||||||
|
TAG_MESSAGE=$(git tag -l --format='%(contents:body)' "$TAG_NAME")
|
||||||
|
|
||||||
|
# 调试输出
|
||||||
|
echo "Tag message length: ${#TAG_MESSAGE}"
|
||||||
|
echo "Tag message preview:"
|
||||||
|
echo "$TAG_MESSAGE" | head -10
|
||||||
|
|
||||||
|
# 使用 EOF 分隔符处理多行内容
|
||||||
|
echo "message<<EOF" >> $GITHUB_OUTPUT
|
||||||
|
echo "$TAG_MESSAGE" >> $GITHUB_OUTPUT
|
||||||
|
echo "EOF" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Set lowercase owner for GHCR
|
||||||
|
id: lowercase
|
||||||
|
run: echo "owner=$(echo '${{ github.repository_owner }}' | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
|
- name: Run GoReleaser
|
||||||
|
uses: goreleaser/goreleaser-action@v6
|
||||||
|
with:
|
||||||
|
version: '~> v2'
|
||||||
|
args: release --clean --skip=validate ${{ env.SIMPLE_RELEASE == 'true' && '--config=.goreleaser.simple.yaml' || '' }}
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
TAG_MESSAGE: ${{ steps.tag_message.outputs.message }}
|
||||||
|
GITHUB_REPO_OWNER: ${{ github.repository_owner }}
|
||||||
|
GITHUB_REPO_OWNER_LOWER: ${{ steps.lowercase.outputs.owner }}
|
||||||
|
GITHUB_REPO_NAME: ${{ github.event.repository.name }}
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME || 'skip' }}
|
||||||
|
|
||||||
|
# Update DockerHub description
|
||||||
|
- name: Update DockerHub description
|
||||||
|
if: ${{ env.SIMPLE_RELEASE != 'true' && env.DOCKERHUB_USERNAME != '' }}
|
||||||
|
uses: peter-evans/dockerhub-description@v4
|
||||||
|
env:
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
repository: ${{ secrets.DOCKERHUB_USERNAME }}/sub2api
|
||||||
|
short-description: "Sub2API - AI API Gateway Platform"
|
||||||
|
readme-filepath: ./deploy/DOCKER.md
|
||||||
|
|
||||||
|
# Send Telegram notification
|
||||||
|
- name: Send Telegram Notification
|
||||||
|
if: ${{ env.SIMPLE_RELEASE != 'true' }}
|
||||||
|
env:
|
||||||
|
TELEGRAM_BOT_TOKEN: ${{ secrets.TELEGRAM_BOT_TOKEN }}
|
||||||
|
TELEGRAM_CHAT_ID: ${{ secrets.TELEGRAM_CHAT_ID }}
|
||||||
|
DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
# 检查必要的环境变量
|
||||||
|
if [ -z "$TELEGRAM_BOT_TOKEN" ] || [ -z "$TELEGRAM_CHAT_ID" ]; then
|
||||||
|
echo "Telegram credentials not configured, skipping notification"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
|
||||||
|
TAG_NAME=${{ github.event.inputs.tag }}
|
||||||
|
else
|
||||||
|
TAG_NAME=${GITHUB_REF#refs/tags/}
|
||||||
|
fi
|
||||||
|
VERSION=${TAG_NAME#v}
|
||||||
|
REPO="${{ github.repository }}"
|
||||||
|
GHCR_IMAGE="ghcr.io/${REPO,,}" # ${,,} converts to lowercase
|
||||||
|
|
||||||
|
# 获取 tag message 内容
|
||||||
|
TAG_MESSAGE='${{ steps.tag_message.outputs.message }}'
|
||||||
|
|
||||||
|
# 限制消息长度(Telegram 消息限制 4096 字符,预留空间给头尾固定内容)
|
||||||
|
if [ ${#TAG_MESSAGE} -gt 3500 ]; then
|
||||||
|
TAG_MESSAGE="${TAG_MESSAGE:0:3500}..."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# 构建消息内容
|
||||||
|
MESSAGE="🚀 *Sub2API 新版本发布!*"$'\n'$'\n'
|
||||||
|
MESSAGE+="📦 版本号: \`${VERSION}\`"$'\n'$'\n'
|
||||||
|
|
||||||
|
# 添加更新内容
|
||||||
|
if [ -n "$TAG_MESSAGE" ]; then
|
||||||
|
MESSAGE+="${TAG_MESSAGE}"$'\n'$'\n'
|
||||||
|
fi
|
||||||
|
|
||||||
|
MESSAGE+="🐳 *Docker 部署:*"$'\n'
|
||||||
|
MESSAGE+="\`\`\`bash"$'\n'
|
||||||
|
# 根据是否配置 DockerHub 动态生成
|
||||||
|
if [ -n "$DOCKERHUB_USERNAME" ]; then
|
||||||
|
DOCKER_IMAGE="${DOCKERHUB_USERNAME}/sub2api"
|
||||||
|
MESSAGE+="# Docker Hub"$'\n'
|
||||||
|
MESSAGE+="docker pull ${DOCKER_IMAGE}:${TAG_NAME}"$'\n'
|
||||||
|
MESSAGE+="# GitHub Container Registry"$'\n'
|
||||||
|
fi
|
||||||
|
MESSAGE+="docker pull ${GHCR_IMAGE}:${TAG_NAME}"$'\n'
|
||||||
|
MESSAGE+="\`\`\`"$'\n'$'\n'
|
||||||
|
MESSAGE+="🔗 *相关链接:*"$'\n'
|
||||||
|
MESSAGE+="• [GitHub Release](https://github.com/${REPO}/releases/tag/${TAG_NAME})"$'\n'
|
||||||
|
if [ -n "$DOCKERHUB_USERNAME" ]; then
|
||||||
|
MESSAGE+="• [Docker Hub](https://hub.docker.com/r/${DOCKER_IMAGE})"$'\n'
|
||||||
|
fi
|
||||||
|
MESSAGE+="• [GitHub Packages](https://github.com/${REPO}/pkgs/container/sub2api)"$'\n'$'\n'
|
||||||
|
MESSAGE+="#Sub2API #Release #${TAG_NAME//./_}"
|
||||||
|
|
||||||
|
# 发送消息
|
||||||
|
curl -s -X POST "https://api.telegram.org/bot${TELEGRAM_BOT_TOKEN}/sendMessage" \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "$(jq -n \
|
||||||
|
--arg chat_id "${TELEGRAM_CHAT_ID}" \
|
||||||
|
--arg text "${MESSAGE}" \
|
||||||
|
'{
|
||||||
|
chat_id: $chat_id,
|
||||||
|
text: $text,
|
||||||
|
parse_mode: "Markdown",
|
||||||
|
disable_web_page_preview: true
|
||||||
|
}')"
|
||||||
62
.github/workflows/security-scan.yml
vendored
Normal file
62
.github/workflows/security-scan.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
name: Security Scan
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
pull_request:
|
||||||
|
schedule:
|
||||||
|
- cron: '0 3 * * 1'
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
backend-security:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: backend/go.mod
|
||||||
|
check-latest: false
|
||||||
|
cache-dependency-path: backend/go.sum
|
||||||
|
- name: Verify Go version
|
||||||
|
run: |
|
||||||
|
go version | grep -q 'go1.25.5'
|
||||||
|
- name: Run govulncheck
|
||||||
|
working-directory: backend
|
||||||
|
run: |
|
||||||
|
go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||||
|
govulncheck ./...
|
||||||
|
- name: Run gosec
|
||||||
|
working-directory: backend
|
||||||
|
run: |
|
||||||
|
go install github.com/securego/gosec/v2/cmd/gosec@latest
|
||||||
|
gosec -severity high -confidence high ./...
|
||||||
|
|
||||||
|
frontend-security:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up pnpm
|
||||||
|
uses: pnpm/action-setup@v4
|
||||||
|
with:
|
||||||
|
version: 9
|
||||||
|
- name: Set up Node.js
|
||||||
|
uses: actions/setup-node@v4
|
||||||
|
with:
|
||||||
|
node-version: '20'
|
||||||
|
cache: 'pnpm'
|
||||||
|
cache-dependency-path: frontend/pnpm-lock.yaml
|
||||||
|
- name: Install dependencies
|
||||||
|
working-directory: frontend
|
||||||
|
run: pnpm install --frozen-lockfile
|
||||||
|
- name: Run pnpm audit
|
||||||
|
working-directory: frontend
|
||||||
|
run: |
|
||||||
|
pnpm audit --prod --audit-level=high --json > audit.json || true
|
||||||
|
- name: Check audit exceptions
|
||||||
|
run: |
|
||||||
|
python tools/check_pnpm_audit_exceptions.py \
|
||||||
|
--audit frontend/audit.json \
|
||||||
|
--exceptions .github/audit-exceptions.yml
|
||||||
131
.gitignore
vendored
Normal file
131
.gitignore
vendored
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
docs/claude-relay-service/
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# Go 后端
|
||||||
|
# ===================
|
||||||
|
# 二进制文件
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
backend/bin/
|
||||||
|
backend/server
|
||||||
|
backend/sub2api
|
||||||
|
backend/main
|
||||||
|
|
||||||
|
# Go 测试二进制
|
||||||
|
*.test
|
||||||
|
|
||||||
|
# 测试覆盖率
|
||||||
|
*.out
|
||||||
|
coverage.html
|
||||||
|
|
||||||
|
# 依赖(使用 go mod)
|
||||||
|
vendor/
|
||||||
|
|
||||||
|
# Go 编译缓存
|
||||||
|
backend/.gocache/
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# Node.js / Vue 前端
|
||||||
|
# ===================
|
||||||
|
node_modules/
|
||||||
|
frontend/node_modules/
|
||||||
|
frontend/dist/
|
||||||
|
*.local
|
||||||
|
*.tsbuildinfo
|
||||||
|
vite.config.d.ts
|
||||||
|
vite.config.js.timestamp-*
|
||||||
|
|
||||||
|
# 日志
|
||||||
|
npm-debug.log*
|
||||||
|
yarn-debug.log*
|
||||||
|
yarn-error.log*
|
||||||
|
pnpm-debug.log*
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# 环境配置
|
||||||
|
# ===================
|
||||||
|
.env
|
||||||
|
.env.local
|
||||||
|
.env.*.local
|
||||||
|
*.env
|
||||||
|
!.env.example
|
||||||
|
docker-compose.override.yml
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# IDE / 编辑器
|
||||||
|
# ===================
|
||||||
|
.idea/
|
||||||
|
.vscode/
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
*~
|
||||||
|
.project
|
||||||
|
.settings/
|
||||||
|
.classpath
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# 操作系统
|
||||||
|
# ===================
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
Desktop.ini
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# 临时文件
|
||||||
|
# ===================
|
||||||
|
tmp/
|
||||||
|
temp/
|
||||||
|
*.tmp
|
||||||
|
*.temp
|
||||||
|
*.log
|
||||||
|
*.bak
|
||||||
|
.cache/
|
||||||
|
.dev/
|
||||||
|
.serena/
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# 构建产物
|
||||||
|
# ===================
|
||||||
|
dist/
|
||||||
|
build/
|
||||||
|
release/
|
||||||
|
|
||||||
|
# 后端嵌入的前端构建产物
|
||||||
|
# Keep a placeholder file so `//go:embed all:dist` always has a match in CI/lint,
|
||||||
|
# while still ignoring generated frontend build outputs.
|
||||||
|
backend/internal/web/dist/
|
||||||
|
!backend/internal/web/dist/
|
||||||
|
backend/internal/web/dist/*
|
||||||
|
!backend/internal/web/dist/.keep
|
||||||
|
|
||||||
|
# 后端运行时缓存数据
|
||||||
|
backend/data/
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# 本地配置文件(包含敏感信息)
|
||||||
|
# ===================
|
||||||
|
backend/config.yaml
|
||||||
|
deploy/config.yaml
|
||||||
|
backend/.installed
|
||||||
|
|
||||||
|
# ===================
|
||||||
|
# 其他
|
||||||
|
# ===================
|
||||||
|
tests
|
||||||
|
CLAUDE.md
|
||||||
|
AGENTS.md
|
||||||
|
.claude
|
||||||
|
scripts
|
||||||
|
.code-review-state
|
||||||
|
openspec/
|
||||||
|
docs/
|
||||||
|
code-reviews/
|
||||||
|
AGENTS.md
|
||||||
|
backend/cmd/server/server
|
||||||
|
deploy/docker-compose.override.yml
|
||||||
|
.gocache/
|
||||||
|
vite.config.js
|
||||||
|
docs/*
|
||||||
86
.goreleaser.simple.yaml
Normal file
86
.goreleaser.simple.yaml
Normal file
@@ -0,0 +1,86 @@
|
|||||||
|
# 简化版 GoReleaser 配置 - 仅发布 x86_64 GHCR 镜像
|
||||||
|
version: 2
|
||||||
|
|
||||||
|
project_name: sub2api
|
||||||
|
|
||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- go mod tidy -C backend
|
||||||
|
|
||||||
|
builds:
|
||||||
|
- id: sub2api
|
||||||
|
dir: backend
|
||||||
|
main: ./cmd/server
|
||||||
|
binary: sub2api
|
||||||
|
flags:
|
||||||
|
- -tags=embed
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
ldflags:
|
||||||
|
- -s -w
|
||||||
|
- -X main.Commit={{.Commit}}
|
||||||
|
- -X main.Date={{.Date}}
|
||||||
|
- -X main.BuildType=release
|
||||||
|
|
||||||
|
# 跳过 archives
|
||||||
|
archives: []
|
||||||
|
|
||||||
|
# 跳过 checksum
|
||||||
|
checksum:
|
||||||
|
disable: true
|
||||||
|
|
||||||
|
changelog:
|
||||||
|
disable: true
|
||||||
|
|
||||||
|
# 仅 GHCR x86_64 镜像
|
||||||
|
dockers:
|
||||||
|
- id: ghcr-amd64
|
||||||
|
goos: linux
|
||||||
|
goarch: amd64
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:latest"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/amd64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
- "--label=org.opencontainers.image.source=https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
|
|
||||||
|
# 跳过 manifests(单架构不需要)
|
||||||
|
docker_manifests: []
|
||||||
|
|
||||||
|
release:
|
||||||
|
github:
|
||||||
|
owner: "{{ .Env.GITHUB_REPO_OWNER }}"
|
||||||
|
name: "{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
|
draft: false
|
||||||
|
prerelease: auto
|
||||||
|
name_template: "Sub2API {{.Version}} (Simple)"
|
||||||
|
# 跳过上传二进制包
|
||||||
|
skip_upload: true
|
||||||
|
header: |
|
||||||
|
> AI API Gateway Platform - 将 AI 订阅配额分发和管理
|
||||||
|
> ⚡ Simple Release: 仅包含 x86_64 GHCR 镜像
|
||||||
|
|
||||||
|
{{ .Env.TAG_MESSAGE }}
|
||||||
|
|
||||||
|
footer: |
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📥 Installation
|
||||||
|
|
||||||
|
**Docker (x86_64 only):**
|
||||||
|
```bash
|
||||||
|
docker pull ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 📚 Documentation
|
||||||
|
|
||||||
|
- [GitHub Repository](https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }})
|
||||||
200
.goreleaser.yaml
Normal file
200
.goreleaser.yaml
Normal file
@@ -0,0 +1,200 @@
|
|||||||
|
version: 2
|
||||||
|
|
||||||
|
project_name: sub2api
|
||||||
|
|
||||||
|
before:
|
||||||
|
hooks:
|
||||||
|
- go mod tidy -C backend
|
||||||
|
|
||||||
|
builds:
|
||||||
|
- id: sub2api
|
||||||
|
dir: backend
|
||||||
|
main: ./cmd/server
|
||||||
|
binary: sub2api
|
||||||
|
flags:
|
||||||
|
- -tags=embed
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
- windows
|
||||||
|
- darwin
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
- arm64
|
||||||
|
ignore:
|
||||||
|
- goos: windows
|
||||||
|
goarch: arm64
|
||||||
|
ldflags:
|
||||||
|
- -s -w
|
||||||
|
- -X main.Commit={{.Commit}}
|
||||||
|
- -X main.Date={{.Date}}
|
||||||
|
- -X main.BuildType=release
|
||||||
|
|
||||||
|
archives:
|
||||||
|
- id: default
|
||||||
|
format: tar.gz
|
||||||
|
name_template: >-
|
||||||
|
{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}
|
||||||
|
format_overrides:
|
||||||
|
- goos: windows
|
||||||
|
format: zip
|
||||||
|
files:
|
||||||
|
- LICENSE*
|
||||||
|
- README*
|
||||||
|
- deploy/*
|
||||||
|
|
||||||
|
checksum:
|
||||||
|
name_template: 'checksums.txt'
|
||||||
|
algorithm: sha256
|
||||||
|
|
||||||
|
changelog:
|
||||||
|
# 禁用自动 changelog,完全使用 tag 消息
|
||||||
|
disable: true
|
||||||
|
|
||||||
|
# Docker images
|
||||||
|
dockers:
|
||||||
|
# DockerHub images (skipped if DOCKERHUB_USERNAME is 'skip')
|
||||||
|
- id: amd64
|
||||||
|
goos: linux
|
||||||
|
goarch: amd64
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/amd64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
|
||||||
|
- id: arm64
|
||||||
|
goos: linux
|
||||||
|
goarch: arm64
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/arm64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
|
||||||
|
# GHCR images (owner must be lowercase)
|
||||||
|
- id: ghcr-amd64
|
||||||
|
goos: linux
|
||||||
|
goarch: amd64
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/amd64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
- "--label=org.opencontainers.image.source=https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
|
|
||||||
|
- id: ghcr-arm64
|
||||||
|
goos: linux
|
||||||
|
goarch: arm64
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
dockerfile: Dockerfile.goreleaser
|
||||||
|
use: buildx
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/arm64"
|
||||||
|
- "--label=org.opencontainers.image.version={{ .Version }}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{ .Commit }}"
|
||||||
|
- "--label=org.opencontainers.image.source=https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
|
|
||||||
|
# Docker manifests for multi-arch support
|
||||||
|
docker_manifests:
|
||||||
|
# DockerHub manifests (skipped if DOCKERHUB_USERNAME is 'skip')
|
||||||
|
- name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}"
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:latest"
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Major }}.{{ .Minor }}"
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Major }}"
|
||||||
|
skip_push: '{{ if eq .Env.DOCKERHUB_USERNAME "skip" }}true{{ else }}false{{ end }}'
|
||||||
|
image_templates:
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "{{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
# GHCR manifests (owner must be lowercase)
|
||||||
|
- name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}"
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:latest"
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Major }}.{{ .Minor }}"
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
- name_template: "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Major }}"
|
||||||
|
image_templates:
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-amd64"
|
||||||
|
- "ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}-arm64"
|
||||||
|
|
||||||
|
release:
|
||||||
|
github:
|
||||||
|
owner: "{{ .Env.GITHUB_REPO_OWNER }}"
|
||||||
|
name: "{{ .Env.GITHUB_REPO_NAME }}"
|
||||||
|
draft: false
|
||||||
|
prerelease: auto
|
||||||
|
name_template: "Sub2API {{.Version}}"
|
||||||
|
# 完全使用 tag 消息作为 release 内容(通过环境变量传入)
|
||||||
|
header: |
|
||||||
|
> AI API Gateway Platform - 将 AI 订阅配额分发和管理
|
||||||
|
|
||||||
|
{{ .Env.TAG_MESSAGE }}
|
||||||
|
|
||||||
|
footer: |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 📥 Installation
|
||||||
|
|
||||||
|
**Docker:**
|
||||||
|
```bash
|
||||||
|
{{ if ne .Env.DOCKERHUB_USERNAME "skip" -}}
|
||||||
|
# Docker Hub
|
||||||
|
docker pull {{ .Env.DOCKERHUB_USERNAME }}/sub2api:{{ .Version }}
|
||||||
|
|
||||||
|
{{ end -}}
|
||||||
|
# GitHub Container Registry
|
||||||
|
docker pull ghcr.io/{{ .Env.GITHUB_REPO_OWNER_LOWER }}/sub2api:{{ .Version }}
|
||||||
|
```
|
||||||
|
|
||||||
|
**One-line install (Linux):**
|
||||||
|
```bash
|
||||||
|
curl -sSL https://raw.githubusercontent.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}/main/deploy/install.sh | sudo bash
|
||||||
|
```
|
||||||
|
|
||||||
|
**Manual download:**
|
||||||
|
Download the appropriate archive for your platform from the assets below.
|
||||||
|
|
||||||
|
## 📚 Documentation
|
||||||
|
|
||||||
|
- [GitHub Repository](https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }})
|
||||||
|
- [Installation Guide](https://github.com/{{ .Env.GITHUB_REPO_OWNER }}/{{ .Env.GITHUB_REPO_NAME }}/blob/main/deploy/README.md)
|
||||||
111
Dockerfile
Normal file
111
Dockerfile
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Sub2API Multi-Stage Dockerfile
|
||||||
|
# =============================================================================
|
||||||
|
# Stage 1: Build frontend
|
||||||
|
# Stage 2: Build Go backend with embedded frontend
|
||||||
|
# Stage 3: Final minimal image
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
ARG NODE_IMAGE=node:24-alpine
|
||||||
|
ARG GOLANG_IMAGE=golang:1.25.5-alpine
|
||||||
|
ARG ALPINE_IMAGE=alpine:3.20
|
||||||
|
ARG GOPROXY=https://goproxy.cn,direct
|
||||||
|
ARG GOSUMDB=sum.golang.google.cn
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Stage 1: Frontend Builder
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
FROM ${NODE_IMAGE} AS frontend-builder
|
||||||
|
|
||||||
|
WORKDIR /app/frontend
|
||||||
|
|
||||||
|
# Install pnpm
|
||||||
|
RUN corepack enable && corepack prepare pnpm@latest --activate
|
||||||
|
|
||||||
|
# Install dependencies first (better caching)
|
||||||
|
COPY frontend/package.json frontend/pnpm-lock.yaml ./
|
||||||
|
RUN pnpm install --frozen-lockfile
|
||||||
|
|
||||||
|
# Copy frontend source and build
|
||||||
|
COPY frontend/ ./
|
||||||
|
RUN pnpm run build
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Stage 2: Backend Builder
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
FROM ${GOLANG_IMAGE} AS backend-builder
|
||||||
|
|
||||||
|
# Build arguments for version info (set by CI)
|
||||||
|
ARG VERSION=docker
|
||||||
|
ARG COMMIT=docker
|
||||||
|
ARG DATE
|
||||||
|
ARG GOPROXY
|
||||||
|
ARG GOSUMDB
|
||||||
|
|
||||||
|
ENV GOPROXY=${GOPROXY}
|
||||||
|
ENV GOSUMDB=${GOSUMDB}
|
||||||
|
|
||||||
|
# Install build dependencies
|
||||||
|
RUN apk add --no-cache git ca-certificates tzdata
|
||||||
|
|
||||||
|
WORKDIR /app/backend
|
||||||
|
|
||||||
|
# Copy go mod files first (better caching)
|
||||||
|
COPY backend/go.mod backend/go.sum ./
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
# Copy backend source first
|
||||||
|
COPY backend/ ./
|
||||||
|
|
||||||
|
# Copy frontend dist from previous stage (must be after backend copy to avoid being overwritten)
|
||||||
|
COPY --from=frontend-builder /app/backend/internal/web/dist ./internal/web/dist
|
||||||
|
|
||||||
|
# Build the binary (BuildType=release for CI builds, embed frontend)
|
||||||
|
RUN CGO_ENABLED=0 GOOS=linux go build \
|
||||||
|
-tags embed \
|
||||||
|
-ldflags="-s -w -X main.Commit=${COMMIT} -X main.Date=${DATE:-$(date -u +%Y-%m-%dT%H:%M:%SZ)} -X main.BuildType=release" \
|
||||||
|
-o /app/sub2api \
|
||||||
|
./cmd/server
|
||||||
|
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
# Stage 3: Final Runtime Image
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
FROM ${ALPINE_IMAGE}
|
||||||
|
|
||||||
|
# Labels
|
||||||
|
LABEL maintainer="Wei-Shaw <github.com/Wei-Shaw>"
|
||||||
|
LABEL description="Sub2API - AI API Gateway Platform"
|
||||||
|
LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api"
|
||||||
|
|
||||||
|
# Install runtime dependencies
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
ca-certificates \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
&& rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
|
# Create non-root user
|
||||||
|
RUN addgroup -g 1000 sub2api && \
|
||||||
|
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy binary from builder
|
||||||
|
COPY --from=backend-builder /app/sub2api /app/sub2api
|
||||||
|
|
||||||
|
# Create data directory
|
||||||
|
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
||||||
|
|
||||||
|
# Switch to non-root user
|
||||||
|
USER sub2api
|
||||||
|
|
||||||
|
# Expose port (can be overridden by SERVER_PORT env var)
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||||
|
|
||||||
|
# Run the application
|
||||||
|
ENTRYPOINT ["/app/sub2api"]
|
||||||
40
Dockerfile.goreleaser
Normal file
40
Dockerfile.goreleaser
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# =============================================================================
|
||||||
|
# Sub2API Dockerfile for GoReleaser
|
||||||
|
# =============================================================================
|
||||||
|
# This Dockerfile is used by GoReleaser to build Docker images.
|
||||||
|
# It only packages the pre-built binary, no compilation needed.
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
FROM alpine:3.19
|
||||||
|
|
||||||
|
LABEL maintainer="Wei-Shaw <github.com/Wei-Shaw>"
|
||||||
|
LABEL description="Sub2API - AI API Gateway Platform"
|
||||||
|
LABEL org.opencontainers.image.source="https://github.com/Wei-Shaw/sub2api"
|
||||||
|
|
||||||
|
# Install runtime dependencies
|
||||||
|
RUN apk add --no-cache \
|
||||||
|
ca-certificates \
|
||||||
|
tzdata \
|
||||||
|
curl \
|
||||||
|
&& rm -rf /var/cache/apk/*
|
||||||
|
|
||||||
|
# Create non-root user
|
||||||
|
RUN addgroup -g 1000 sub2api && \
|
||||||
|
adduser -u 1000 -G sub2api -s /bin/sh -D sub2api
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Copy pre-built binary from GoReleaser
|
||||||
|
COPY sub2api /app/sub2api
|
||||||
|
|
||||||
|
# Create data directory
|
||||||
|
RUN mkdir -p /app/data && chown -R sub2api:sub2api /app
|
||||||
|
|
||||||
|
USER sub2api
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
|
||||||
|
CMD curl -f http://localhost:${SERVER_PORT:-8080}/health || exit 1
|
||||||
|
|
||||||
|
ENTRYPOINT ["/app/sub2api"]
|
||||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2025 Wesley Liddick
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
368
Linux DO Connect.md
Normal file
368
Linux DO Connect.md
Normal file
@@ -0,0 +1,368 @@
|
|||||||
|
# Linux DO Connect
|
||||||
|
|
||||||
|
OAuth(Open Authorization)是一个开放的网络授权标准,目前最新版本为 OAuth 2.0。我们日常使用的第三方登录(如 Google 账号登录)就采用了该标准。OAuth 允许用户授权第三方应用访问存储在其他服务提供商(如 Google)上的信息,无需在不同平台上重复填写注册信息。用户授权后,平台可以直接访问用户的账户信息进行身份验证,而用户无需向第三方应用提供密码。
|
||||||
|
|
||||||
|
目前系统已实现完整的 OAuth2 授权码(code)方式鉴权,但界面等配套功能还在持续完善中。让我们一起打造一个更完善的共享方案。
|
||||||
|
|
||||||
|
## 基本介绍
|
||||||
|
|
||||||
|
这是一套标准的 OAuth2 鉴权系统,可以让开发者共享论坛的用户基本信息。
|
||||||
|
|
||||||
|
- 可获取字段:
|
||||||
|
|
||||||
|
| 参数 | 说明 |
|
||||||
|
| ----------------- | ------------------------------- |
|
||||||
|
| `id` | 用户唯一标识(不可变) |
|
||||||
|
| `username` | 论坛用户名 |
|
||||||
|
| `name` | 论坛用户昵称(可变) |
|
||||||
|
| `avatar_template` | 用户头像模板URL(支持多种尺寸) |
|
||||||
|
| `active` | 账号活跃状态 |
|
||||||
|
| `trust_level` | 信任等级(0-4) |
|
||||||
|
| `silenced` | 禁言状态 |
|
||||||
|
| `external_ids` | 外部ID关联信息 |
|
||||||
|
| `api_key` | API访问密钥 |
|
||||||
|
|
||||||
|
通过这些信息,公益网站/接口可以实现:
|
||||||
|
|
||||||
|
1. 基于 `id` 的服务频率限制
|
||||||
|
2. 基于 `trust_level` 的服务额度分配
|
||||||
|
3. 基于用户信息的滥用举报机制
|
||||||
|
|
||||||
|
## 相关端点
|
||||||
|
|
||||||
|
- Authorize 端点: `https://connect.linux.do/oauth2/authorize`
|
||||||
|
- Token 端点:`https://connect.linux.do/oauth2/token`
|
||||||
|
- 用户信息 端点:`https://connect.linux.do/api/user`
|
||||||
|
|
||||||
|
## 申请使用
|
||||||
|
|
||||||
|
- 访问 [Connect.Linux.Do](https://connect.linux.do/) 申请接入你的应用。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
- 点击 **`我的应用接入`** - **`申请新接入`**,填写相关信息。其中 **`回调地址`** 是你的应用接收用户信息的地址。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
- 申请成功后,你将获得 **`Client Id`** 和 **`Client Secret`**,这是你应用的唯一身份凭证。
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
## 接入 Linux Do
|
||||||
|
|
||||||
|
JavaScript
|
||||||
|
```JavaScript
|
||||||
|
// 安装第三方请求库(或使用原生的 Fetch API),本例中使用 axios
|
||||||
|
// npm install axios
|
||||||
|
|
||||||
|
// 通过 OAuth2 获取 Linux Do 用户信息的参考流程
|
||||||
|
const axios = require('axios');
|
||||||
|
const readline = require('readline');
|
||||||
|
|
||||||
|
// 配置信息(建议通过环境变量配置,避免使用硬编码)
|
||||||
|
const CLIENT_ID = '你的 Client ID';
|
||||||
|
const CLIENT_SECRET = '你的 Client Secret';
|
||||||
|
const REDIRECT_URI = '你的回调地址';
|
||||||
|
const AUTH_URL = 'https://connect.linux.do/oauth2/authorize';
|
||||||
|
const TOKEN_URL = 'https://connect.linux.do/oauth2/token';
|
||||||
|
const USER_INFO_URL = 'https://connect.linux.do/api/user';
|
||||||
|
|
||||||
|
// 第一步:生成授权 URL
|
||||||
|
function getAuthUrl() {
|
||||||
|
const params = new URLSearchParams({
|
||||||
|
client_id: CLIENT_ID,
|
||||||
|
redirect_uri: REDIRECT_URI,
|
||||||
|
response_type: 'code',
|
||||||
|
scope: 'user'
|
||||||
|
});
|
||||||
|
|
||||||
|
return `${AUTH_URL}?${params.toString()}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 第二步:获取 code 参数
|
||||||
|
function getCode() {
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
// 本例中使用终端输入来模拟流程,仅供本地测试
|
||||||
|
// 请在实际应用中替换为真实的处理逻辑
|
||||||
|
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
||||||
|
rl.question('从回调 URL 中提取出 code,粘贴到此处并按回车:', (answer) => {
|
||||||
|
rl.close();
|
||||||
|
resolve(answer.trim());
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// 第三步:使用 code 参数获取访问令牌
|
||||||
|
async function getAccessToken(code) {
|
||||||
|
try {
|
||||||
|
const form = new URLSearchParams({
|
||||||
|
client_id: CLIENT_ID,
|
||||||
|
client_secret: CLIENT_SECRET,
|
||||||
|
code: code,
|
||||||
|
redirect_uri: REDIRECT_URI,
|
||||||
|
grant_type: 'authorization_code'
|
||||||
|
}).toString();
|
||||||
|
|
||||||
|
const response = await axios.post(TOKEN_URL, form, {
|
||||||
|
// 提醒:需正确配置请求头,否则无法正常获取访问令牌
|
||||||
|
headers: {
|
||||||
|
'Content-Type': 'application/x-www-form-urlencoded',
|
||||||
|
'Accept': 'application/json'
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return response.data;
|
||||||
|
} catch (error) {
|
||||||
|
console.error(`获取访问令牌失败:${error.response ? JSON.stringify(error.response.data) : error.message}`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 第四步:使用访问令牌获取用户信息
|
||||||
|
async function getUserInfo(accessToken) {
|
||||||
|
try {
|
||||||
|
const response = await axios.get(USER_INFO_URL, {
|
||||||
|
headers: {
|
||||||
|
Authorization: `Bearer ${accessToken}`
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return response.data;
|
||||||
|
} catch (error) {
|
||||||
|
console.error(`获取用户信息失败:${error.response ? JSON.stringify(error.response.data) : error.message}`);
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 主流程
|
||||||
|
async function main() {
|
||||||
|
// 1. 生成授权 URL,前端引导用户访问授权页
|
||||||
|
const authUrl = getAuthUrl();
|
||||||
|
console.log(`请访问此 URL 授权:${authUrl}
|
||||||
|
`);
|
||||||
|
|
||||||
|
// 2. 用户授权后,从回调 URL 获取 code 参数
|
||||||
|
const code = await getCode();
|
||||||
|
|
||||||
|
try {
|
||||||
|
// 3. 使用 code 参数获取访问令牌
|
||||||
|
const tokenData = await getAccessToken(code);
|
||||||
|
const accessToken = tokenData.access_token;
|
||||||
|
|
||||||
|
// 4. 使用访问令牌获取用户信息
|
||||||
|
if (accessToken) {
|
||||||
|
const userInfo = await getUserInfo(accessToken);
|
||||||
|
console.log(`
|
||||||
|
获取用户信息成功:${JSON.stringify(userInfo, null, 2)}`);
|
||||||
|
} else {
|
||||||
|
console.log(`
|
||||||
|
获取访问令牌失败:${JSON.stringify(tokenData)}`);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('发生错误:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Python
|
||||||
|
```python
|
||||||
|
# 安装第三方请求库,本例中使用 requests
|
||||||
|
# pip install requests
|
||||||
|
|
||||||
|
# 通过 OAuth2 获取 Linux Do 用户信息的参考流程
|
||||||
|
import requests
|
||||||
|
import json
|
||||||
|
|
||||||
|
# 配置信息(建议通过环境变量配置,避免使用硬编码)
|
||||||
|
CLIENT_ID = '你的 Client ID'
|
||||||
|
CLIENT_SECRET = '你的 Client Secret'
|
||||||
|
REDIRECT_URI = '你的回调地址'
|
||||||
|
AUTH_URL = 'https://connect.linux.do/oauth2/authorize'
|
||||||
|
TOKEN_URL = 'https://connect.linux.do/oauth2/token'
|
||||||
|
USER_INFO_URL = 'https://connect.linux.do/api/user'
|
||||||
|
|
||||||
|
# 第一步:生成授权 URL
|
||||||
|
def get_auth_url():
|
||||||
|
params = {
|
||||||
|
'client_id': CLIENT_ID,
|
||||||
|
'redirect_uri': REDIRECT_URI,
|
||||||
|
'response_type': 'code',
|
||||||
|
'scope': 'user'
|
||||||
|
}
|
||||||
|
auth_url = f"{AUTH_URL}?{'&'.join(f'{k}={v}' for k, v in params.items())}"
|
||||||
|
return auth_url
|
||||||
|
|
||||||
|
# 第二步:获取 code 参数
|
||||||
|
def get_code():
|
||||||
|
# 本例中使用终端输入来模拟流程,仅供本地测试
|
||||||
|
# 请在实际应用中替换为真实的处理逻辑
|
||||||
|
return input('从回调 URL 中提取出 code,粘贴到此处并按回车:').strip()
|
||||||
|
|
||||||
|
# 第三步:使用 code 参数获取访问令牌
|
||||||
|
def get_access_token(code):
|
||||||
|
try:
|
||||||
|
data = {
|
||||||
|
'client_id': CLIENT_ID,
|
||||||
|
'client_secret': CLIENT_SECRET,
|
||||||
|
'code': code,
|
||||||
|
'redirect_uri': REDIRECT_URI,
|
||||||
|
'grant_type': 'authorization_code'
|
||||||
|
}
|
||||||
|
# 提醒:需正确配置请求头,否则无法正常获取访问令牌
|
||||||
|
headers = {
|
||||||
|
'Content-Type': 'application/x-www-form-urlencoded',
|
||||||
|
'Accept': 'application/json'
|
||||||
|
}
|
||||||
|
response = requests.post(TOKEN_URL, data=data, headers=headers)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"获取访问令牌失败:{e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 第四步:使用访问令牌获取用户信息
|
||||||
|
def get_user_info(access_token):
|
||||||
|
try:
|
||||||
|
headers = {
|
||||||
|
'Authorization': f'Bearer {access_token}'
|
||||||
|
}
|
||||||
|
response = requests.get(USER_INFO_URL, headers=headers)
|
||||||
|
response.raise_for_status()
|
||||||
|
return response.json()
|
||||||
|
except requests.exceptions.RequestException as e:
|
||||||
|
print(f"获取用户信息失败:{e}")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# 主流程
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# 1. 生成授权 URL,前端引导用户访问授权页
|
||||||
|
auth_url = get_auth_url()
|
||||||
|
print(f'请访问此 URL 授权:{auth_url}
|
||||||
|
')
|
||||||
|
|
||||||
|
# 2. 用户授权后,从回调 URL 获取 code 参数
|
||||||
|
code = get_code()
|
||||||
|
|
||||||
|
# 3. 使用 code 参数获取访问令牌
|
||||||
|
token_data = get_access_token(code)
|
||||||
|
if token_data:
|
||||||
|
access_token = token_data.get('access_token')
|
||||||
|
|
||||||
|
# 4. 使用访问令牌获取用户信息
|
||||||
|
if access_token:
|
||||||
|
user_info = get_user_info(access_token)
|
||||||
|
if user_info:
|
||||||
|
print(f"
|
||||||
|
获取用户信息成功:{json.dumps(user_info, indent=2)}")
|
||||||
|
else:
|
||||||
|
print("
|
||||||
|
获取用户信息失败")
|
||||||
|
else:
|
||||||
|
print(f"
|
||||||
|
获取访问令牌失败:{json.dumps(token_data, indent=2)}")
|
||||||
|
else:
|
||||||
|
print("
|
||||||
|
获取访问令牌失败")
|
||||||
|
```
|
||||||
|
PHP
|
||||||
|
```php
|
||||||
|
// 通过 OAuth2 获取 Linux Do 用户信息的参考流程
|
||||||
|
|
||||||
|
// 配置信息
|
||||||
|
$CLIENT_ID = '你的 Client ID';
|
||||||
|
$CLIENT_SECRET = '你的 Client Secret';
|
||||||
|
$REDIRECT_URI = '你的回调地址';
|
||||||
|
$AUTH_URL = 'https://connect.linux.do/oauth2/authorize';
|
||||||
|
$TOKEN_URL = 'https://connect.linux.do/oauth2/token';
|
||||||
|
$USER_INFO_URL = 'https://connect.linux.do/api/user';
|
||||||
|
|
||||||
|
// 生成授权 URL
|
||||||
|
function getAuthUrl($clientId, $redirectUri) {
|
||||||
|
global $AUTH_URL;
|
||||||
|
return $AUTH_URL . '?' . http_build_query([
|
||||||
|
'client_id' => $clientId,
|
||||||
|
'redirect_uri' => $redirectUri,
|
||||||
|
'response_type' => 'code',
|
||||||
|
'scope' => 'user'
|
||||||
|
]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 使用 code 参数获取用户信息(合并获取令牌和获取用户信息的步骤)
|
||||||
|
function getUserInfoWithCode($code, $clientId, $clientSecret, $redirectUri) {
|
||||||
|
global $TOKEN_URL, $USER_INFO_URL;
|
||||||
|
|
||||||
|
// 1. 获取访问令牌
|
||||||
|
$ch = curl_init($TOKEN_URL);
|
||||||
|
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
|
||||||
|
curl_setopt($ch, CURLOPT_POST, true);
|
||||||
|
curl_setopt($ch, CURLOPT_POSTFIELDS, http_build_query([
|
||||||
|
'client_id' => $clientId,
|
||||||
|
'client_secret' => $clientSecret,
|
||||||
|
'code' => $code,
|
||||||
|
'redirect_uri' => $redirectUri,
|
||||||
|
'grant_type' => 'authorization_code'
|
||||||
|
]));
|
||||||
|
curl_setopt($ch, CURLOPT_HTTPHEADER, [
|
||||||
|
'Content-Type: application/x-www-form-urlencoded',
|
||||||
|
'Accept: application/json'
|
||||||
|
]);
|
||||||
|
|
||||||
|
$tokenResponse = curl_exec($ch);
|
||||||
|
curl_close($ch);
|
||||||
|
|
||||||
|
$tokenData = json_decode($tokenResponse, true);
|
||||||
|
if (!isset($tokenData['access_token'])) {
|
||||||
|
return ['error' => '获取访问令牌失败', 'details' => $tokenData];
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. 获取用户信息
|
||||||
|
$ch = curl_init($USER_INFO_URL);
|
||||||
|
curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
|
||||||
|
curl_setopt($ch, CURLOPT_HTTPHEADER, [
|
||||||
|
'Authorization: Bearer ' . $tokenData['access_token']
|
||||||
|
]);
|
||||||
|
|
||||||
|
$userResponse = curl_exec($ch);
|
||||||
|
curl_close($ch);
|
||||||
|
|
||||||
|
return json_decode($userResponse, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
// 主流程
|
||||||
|
// 1. 生成授权 URL
|
||||||
|
$authUrl = getAuthUrl($CLIENT_ID, $REDIRECT_URI);
|
||||||
|
echo "<a href='$authUrl'>使用 Linux Do 登录</a>";
|
||||||
|
|
||||||
|
// 2. 处理回调并获取用户信息
|
||||||
|
if (isset($_GET['code'])) {
|
||||||
|
$userInfo = getUserInfoWithCode(
|
||||||
|
$_GET['code'],
|
||||||
|
$CLIENT_ID,
|
||||||
|
$CLIENT_SECRET,
|
||||||
|
$REDIRECT_URI
|
||||||
|
);
|
||||||
|
|
||||||
|
if (isset($userInfo['error'])) {
|
||||||
|
echo '错误: ' . $userInfo['error'];
|
||||||
|
} else {
|
||||||
|
echo '欢迎, ' . $userInfo['name'] . '!';
|
||||||
|
// 处理用户登录逻辑...
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## 使用说明
|
||||||
|
|
||||||
|
### 授权流程
|
||||||
|
|
||||||
|
1. 用户点击应用中的’使用 Linux Do 登录’按钮
|
||||||
|
2. 系统将用户重定向至 Linux Do 的授权页面
|
||||||
|
3. 用户完成授权后,系统自动重定向回应用并携带授权码
|
||||||
|
4. 应用使用授权码获取访问令牌
|
||||||
|
5. 使用访问令牌获取用户信息
|
||||||
|
|
||||||
|
### 安全建议
|
||||||
|
|
||||||
|
- 切勿在前端代码中暴露 Client Secret
|
||||||
|
- 对所有用户输入数据进行严格验证
|
||||||
|
- 确保使用 HTTPS 协议传输数据
|
||||||
|
- 定期更新并妥善保管 Client Secret
|
||||||
22
Makefile
Normal file
22
Makefile
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
.PHONY: build build-backend build-frontend test test-backend test-frontend
|
||||||
|
|
||||||
|
# 一键编译前后端
|
||||||
|
build: build-backend build-frontend
|
||||||
|
|
||||||
|
# 编译后端(复用 backend/Makefile)
|
||||||
|
build-backend:
|
||||||
|
@$(MAKE) -C backend build
|
||||||
|
|
||||||
|
# 编译前端(需要已安装依赖)
|
||||||
|
build-frontend:
|
||||||
|
@pnpm --dir frontend run build
|
||||||
|
|
||||||
|
# 运行测试(后端 + 前端)
|
||||||
|
test: test-backend test-frontend
|
||||||
|
|
||||||
|
test-backend:
|
||||||
|
@$(MAKE) -C backend test
|
||||||
|
|
||||||
|
test-frontend:
|
||||||
|
@pnpm --dir frontend run lint:check
|
||||||
|
@pnpm --dir frontend run typecheck
|
||||||
164
PR_DESCRIPTION.md
Normal file
164
PR_DESCRIPTION.md
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
## 概述
|
||||||
|
|
||||||
|
全面增强运维监控系统(Ops)的错误日志管理和告警静默功能,优化前端 UI 组件代码质量和用户体验。本次更新重构了核心服务层和数据访问层,提升系统可维护性和运维效率。
|
||||||
|
|
||||||
|
## 主要改动
|
||||||
|
|
||||||
|
### 1. 错误日志查询优化
|
||||||
|
|
||||||
|
**功能特性:**
|
||||||
|
- 新增 GetErrorLogByID 接口,支持按 ID 精确查询错误详情
|
||||||
|
- 优化错误日志过滤逻辑,支持多维度筛选(平台、阶段、来源、所有者等)
|
||||||
|
- 改进查询参数处理,简化代码结构
|
||||||
|
- 增强错误分类和标准化处理
|
||||||
|
- 支持错误解决状态追踪(resolved 字段)
|
||||||
|
|
||||||
|
**技术实现:**
|
||||||
|
- `ops_handler.go` - 新增单条错误日志查询接口
|
||||||
|
- `ops_repo.go` - 优化数据查询和过滤条件构建
|
||||||
|
- `ops_models.go` - 扩展错误日志数据模型
|
||||||
|
- 前端 API 接口同步更新
|
||||||
|
|
||||||
|
### 2. 告警静默功能
|
||||||
|
|
||||||
|
**功能特性:**
|
||||||
|
- 支持按规则、平台、分组、区域等维度静默告警
|
||||||
|
- 可设置静默时长和原因说明
|
||||||
|
- 静默记录可追溯,记录创建人和创建时间
|
||||||
|
- 自动过期机制,避免永久静默
|
||||||
|
|
||||||
|
**技术实现:**
|
||||||
|
- `037_ops_alert_silences.sql` - 新增告警静默表
|
||||||
|
- `ops_alerts.go` - 告警静默逻辑实现
|
||||||
|
- `ops_alerts_handler.go` - 告警静默 API 接口
|
||||||
|
- `OpsAlertEventsCard.vue` - 前端告警静默操作界面
|
||||||
|
|
||||||
|
**数据库结构:**
|
||||||
|
|
||||||
|
| 字段 | 类型 | 说明 |
|
||||||
|
|------|------|------|
|
||||||
|
| rule_id | BIGINT | 告警规则 ID |
|
||||||
|
| platform | VARCHAR(64) | 平台标识 |
|
||||||
|
| group_id | BIGINT | 分组 ID(可选) |
|
||||||
|
| region | VARCHAR(64) | 区域(可选) |
|
||||||
|
| until | TIMESTAMPTZ | 静默截止时间 |
|
||||||
|
| reason | TEXT | 静默原因 |
|
||||||
|
| created_by | BIGINT | 创建人 ID |
|
||||||
|
|
||||||
|
### 3. 错误分类标准化
|
||||||
|
|
||||||
|
**功能特性:**
|
||||||
|
- 统一错误阶段分类(request|auth|routing|upstream|network|internal)
|
||||||
|
- 规范错误归属分类(client|provider|platform)
|
||||||
|
- 标准化错误来源分类(client_request|upstream_http|gateway)
|
||||||
|
- 自动迁移历史数据到新分类体系
|
||||||
|
|
||||||
|
**技术实现:**
|
||||||
|
- `038_ops_errors_resolution_retry_results_and_standardize_classification.sql` - 分类标准化迁移
|
||||||
|
- 自动映射历史遗留分类到新标准
|
||||||
|
- 自动解决已恢复的上游错误(客户端状态码 < 400)
|
||||||
|
|
||||||
|
### 4. Gateway 服务集成
|
||||||
|
|
||||||
|
**功能特性:**
|
||||||
|
- 完善各 Gateway 服务的 Ops 集成
|
||||||
|
- 统一错误日志记录接口
|
||||||
|
- 增强上游错误追踪能力
|
||||||
|
|
||||||
|
**涉及服务:**
|
||||||
|
- `antigravity_gateway_service.go` - Antigravity 网关集成
|
||||||
|
- `gateway_service.go` - 通用网关集成
|
||||||
|
- `gemini_messages_compat_service.go` - Gemini 兼容层集成
|
||||||
|
- `openai_gateway_service.go` - OpenAI 网关集成
|
||||||
|
|
||||||
|
### 5. 前端 UI 优化
|
||||||
|
|
||||||
|
**代码重构:**
|
||||||
|
- 大幅简化错误详情模态框代码(从 828 行优化到 450 行)
|
||||||
|
- 优化错误日志表格组件,提升可读性
|
||||||
|
- 清理未使用的 i18n 翻译,减少冗余
|
||||||
|
- 统一组件代码风格和格式
|
||||||
|
- 优化骨架屏组件,更好匹配实际看板布局
|
||||||
|
|
||||||
|
**布局改进:**
|
||||||
|
- 修复模态框内容溢出和滚动问题
|
||||||
|
- 优化表格布局,使用 flex 布局确保正确显示
|
||||||
|
- 改进看板头部布局和交互
|
||||||
|
- 提升响应式体验
|
||||||
|
- 骨架屏支持全屏模式适配
|
||||||
|
|
||||||
|
**交互优化:**
|
||||||
|
- 优化告警事件卡片功能和展示
|
||||||
|
- 改进错误详情展示逻辑
|
||||||
|
- 增强请求详情模态框
|
||||||
|
- 完善运行时设置卡片
|
||||||
|
- 改进加载动画效果
|
||||||
|
|
||||||
|
### 6. 国际化完善
|
||||||
|
|
||||||
|
**文案补充:**
|
||||||
|
- 补充错误日志相关的英文翻译
|
||||||
|
- 添加告警静默功能的中英文文案
|
||||||
|
- 完善提示文本和错误信息
|
||||||
|
- 统一术语翻译标准
|
||||||
|
|
||||||
|
## 文件变更
|
||||||
|
|
||||||
|
**后端(26 个文件):**
|
||||||
|
- `backend/internal/handler/admin/ops_alerts_handler.go` - 告警接口增强
|
||||||
|
- `backend/internal/handler/admin/ops_handler.go` - 错误日志接口优化
|
||||||
|
- `backend/internal/handler/ops_error_logger.go` - 错误记录器增强
|
||||||
|
- `backend/internal/repository/ops_repo.go` - 数据访问层重构
|
||||||
|
- `backend/internal/repository/ops_repo_alerts.go` - 告警数据访问增强
|
||||||
|
- `backend/internal/service/ops_*.go` - 核心服务层重构(10 个文件)
|
||||||
|
- `backend/internal/service/*_gateway_service.go` - Gateway 集成(4 个文件)
|
||||||
|
- `backend/internal/server/routes/admin.go` - 路由配置更新
|
||||||
|
- `backend/migrations/*.sql` - 数据库迁移(2 个文件)
|
||||||
|
- 测试文件更新(5 个文件)
|
||||||
|
|
||||||
|
**前端(13 个文件):**
|
||||||
|
- `frontend/src/views/admin/ops/OpsDashboard.vue` - 看板主页优化
|
||||||
|
- `frontend/src/views/admin/ops/components/*.vue` - 组件重构(10 个文件)
|
||||||
|
- `frontend/src/api/admin/ops.ts` - API 接口扩展
|
||||||
|
- `frontend/src/i18n/locales/*.ts` - 国际化文本(2 个文件)
|
||||||
|
|
||||||
|
## 代码统计
|
||||||
|
|
||||||
|
- 44 个文件修改
|
||||||
|
- 3733 行新增
|
||||||
|
- 995 行删除
|
||||||
|
- 净增加 2738 行
|
||||||
|
|
||||||
|
## 核心改进
|
||||||
|
|
||||||
|
**可维护性提升:**
|
||||||
|
- 重构核心服务层,职责更清晰
|
||||||
|
- 简化前端组件代码,降低复杂度
|
||||||
|
- 统一代码风格和命名规范
|
||||||
|
- 清理冗余代码和未使用的翻译
|
||||||
|
- 标准化错误分类体系
|
||||||
|
|
||||||
|
**功能完善:**
|
||||||
|
- 告警静默功能,减少告警噪音
|
||||||
|
- 错误日志查询优化,提升运维效率
|
||||||
|
- Gateway 服务集成完善,统一监控能力
|
||||||
|
- 错误解决状态追踪,便于问题管理
|
||||||
|
|
||||||
|
**用户体验优化:**
|
||||||
|
- 修复多个 UI 布局问题
|
||||||
|
- 优化交互流程
|
||||||
|
- 完善国际化支持
|
||||||
|
- 提升响应式体验
|
||||||
|
- 改进加载状态展示
|
||||||
|
|
||||||
|
## 测试验证
|
||||||
|
|
||||||
|
- ✅ 错误日志查询和过滤功能
|
||||||
|
- ✅ 告警静默创建和自动过期
|
||||||
|
- ✅ 错误分类标准化迁移
|
||||||
|
- ✅ Gateway 服务错误日志记录
|
||||||
|
- ✅ 前端组件布局和交互
|
||||||
|
- ✅ 骨架屏全屏模式适配
|
||||||
|
- ✅ 国际化文本完整性
|
||||||
|
- ✅ API 接口功能正确性
|
||||||
|
- ✅ 数据库迁移执行成功
|
||||||
458
README.md
Normal file
458
README.md
Normal file
@@ -0,0 +1,458 @@
|
|||||||
|
# Sub2API
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
[](https://golang.org/)
|
||||||
|
[](https://vuejs.org/)
|
||||||
|
[](https://www.postgresql.org/)
|
||||||
|
[](https://redis.io/)
|
||||||
|
[](https://www.docker.com/)
|
||||||
|
|
||||||
|
**AI API Gateway Platform for Subscription Quota Distribution**
|
||||||
|
|
||||||
|
English | [中文](README_CN.md)
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Demo
|
||||||
|
|
||||||
|
Try Sub2API online: **https://v2.pincc.ai/**
|
||||||
|
|
||||||
|
Demo credentials (shared demo environment; **not** created automatically for self-hosted installs):
|
||||||
|
|
||||||
|
| Email | Password |
|
||||||
|
|-------|----------|
|
||||||
|
| admin@sub2api.com | admin123 |
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Sub2API is an AI API gateway platform designed to distribute and manage API quotas from AI product subscriptions (like Claude Code $200/month). Users can access upstream AI services through platform-generated API Keys, while the platform handles authentication, billing, load balancing, and request forwarding.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Multi-Account Management** - Support multiple upstream account types (OAuth, API Key)
|
||||||
|
- **API Key Distribution** - Generate and manage API Keys for users
|
||||||
|
- **Precise Billing** - Token-level usage tracking and cost calculation
|
||||||
|
- **Smart Scheduling** - Intelligent account selection with sticky sessions
|
||||||
|
- **Concurrency Control** - Per-user and per-account concurrency limits
|
||||||
|
- **Rate Limiting** - Configurable request and token rate limits
|
||||||
|
- **Admin Dashboard** - Web interface for monitoring and management
|
||||||
|
|
||||||
|
## Tech Stack
|
||||||
|
|
||||||
|
| Component | Technology |
|
||||||
|
|-----------|------------|
|
||||||
|
| Backend | Go 1.25.5, Gin, Ent |
|
||||||
|
| Frontend | Vue 3.4+, Vite 5+, TailwindCSS |
|
||||||
|
| Database | PostgreSQL 15+ |
|
||||||
|
| Cache/Queue | Redis 7+ |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
- Dependency Security: `docs/dependency-security.md`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Deployment
|
||||||
|
|
||||||
|
### Method 1: Script Installation (Recommended)
|
||||||
|
|
||||||
|
One-click installation script that downloads pre-built binaries from GitHub Releases.
|
||||||
|
|
||||||
|
#### Prerequisites
|
||||||
|
|
||||||
|
- Linux server (amd64 or arm64)
|
||||||
|
- PostgreSQL 15+ (installed and running)
|
||||||
|
- Redis 7+ (installed and running)
|
||||||
|
- Root privileges
|
||||||
|
|
||||||
|
#### Installation Steps
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash
|
||||||
|
```
|
||||||
|
|
||||||
|
The script will:
|
||||||
|
1. Detect your system architecture
|
||||||
|
2. Download the latest release
|
||||||
|
3. Install binary to `/opt/sub2api`
|
||||||
|
4. Create systemd service
|
||||||
|
5. Configure system user and permissions
|
||||||
|
|
||||||
|
#### Post-Installation
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Start the service
|
||||||
|
sudo systemctl start sub2api
|
||||||
|
|
||||||
|
# 2. Enable auto-start on boot
|
||||||
|
sudo systemctl enable sub2api
|
||||||
|
|
||||||
|
# 3. Open Setup Wizard in browser
|
||||||
|
# http://YOUR_SERVER_IP:8080
|
||||||
|
```
|
||||||
|
|
||||||
|
The Setup Wizard will guide you through:
|
||||||
|
- Database configuration
|
||||||
|
- Redis configuration
|
||||||
|
- Admin account creation
|
||||||
|
|
||||||
|
#### Upgrade
|
||||||
|
|
||||||
|
You can upgrade directly from the **Admin Dashboard** by clicking the **Check for Updates** button in the top-left corner.
|
||||||
|
|
||||||
|
The web interface will:
|
||||||
|
- Check for new versions automatically
|
||||||
|
- Download and apply updates with one click
|
||||||
|
- Support rollback if needed
|
||||||
|
|
||||||
|
#### Useful Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check status
|
||||||
|
sudo systemctl status sub2api
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
sudo journalctl -u sub2api -f
|
||||||
|
|
||||||
|
# Restart service
|
||||||
|
sudo systemctl restart sub2api
|
||||||
|
|
||||||
|
# Uninstall
|
||||||
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash -s -- uninstall -y
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Method 2: Docker Compose
|
||||||
|
|
||||||
|
Deploy with Docker Compose, including PostgreSQL and Redis containers.
|
||||||
|
|
||||||
|
#### Prerequisites
|
||||||
|
|
||||||
|
- Docker 20.10+
|
||||||
|
- Docker Compose v2+
|
||||||
|
|
||||||
|
#### Installation Steps
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Clone the repository
|
||||||
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
|
cd sub2api
|
||||||
|
|
||||||
|
# 2. Enter the deploy directory
|
||||||
|
cd deploy
|
||||||
|
|
||||||
|
# 3. Copy environment configuration
|
||||||
|
cp .env.example .env
|
||||||
|
|
||||||
|
# 4. Edit configuration (set your passwords)
|
||||||
|
nano .env
|
||||||
|
```
|
||||||
|
|
||||||
|
**Required configuration in `.env`:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# PostgreSQL password (REQUIRED - change this!)
|
||||||
|
POSTGRES_PASSWORD=your_secure_password_here
|
||||||
|
|
||||||
|
# Optional: Admin account
|
||||||
|
ADMIN_EMAIL=admin@example.com
|
||||||
|
ADMIN_PASSWORD=your_admin_password
|
||||||
|
|
||||||
|
# Optional: Custom port
|
||||||
|
SERVER_PORT=8080
|
||||||
|
|
||||||
|
# Optional: Security configuration
|
||||||
|
# Enable URL allowlist validation (false to skip allowlist checks, only basic format validation)
|
||||||
|
SECURITY_URL_ALLOWLIST_ENABLED=false
|
||||||
|
|
||||||
|
# Allow insecure HTTP URLs when allowlist is disabled (default: false, requires https)
|
||||||
|
# ⚠️ WARNING: Enabling this allows HTTP (plaintext) URLs which can expose API keys
|
||||||
|
# Only recommended for:
|
||||||
|
# - Development/testing environments
|
||||||
|
# - Internal networks with trusted endpoints
|
||||||
|
# - When using local test servers (http://localhost)
|
||||||
|
# PRODUCTION: Keep this false or use HTTPS URLs only
|
||||||
|
SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=false
|
||||||
|
|
||||||
|
# Allow private IP addresses for upstream/pricing/CRS (for internal deployments)
|
||||||
|
SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=false
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 5. Start all services
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
# 6. Check status
|
||||||
|
docker-compose ps
|
||||||
|
|
||||||
|
# 7. View logs
|
||||||
|
docker-compose logs -f sub2api
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Access
|
||||||
|
|
||||||
|
Open `http://YOUR_SERVER_IP:8080` in your browser.
|
||||||
|
|
||||||
|
#### Upgrade
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Pull latest image and recreate container
|
||||||
|
docker-compose pull
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Useful Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Stop all services
|
||||||
|
docker-compose down
|
||||||
|
|
||||||
|
# Restart
|
||||||
|
docker-compose restart
|
||||||
|
|
||||||
|
# View all logs
|
||||||
|
docker-compose logs -f
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Method 3: Build from Source
|
||||||
|
|
||||||
|
Build and run from source code for development or customization.
|
||||||
|
|
||||||
|
#### Prerequisites
|
||||||
|
|
||||||
|
- Go 1.21+
|
||||||
|
- Node.js 18+
|
||||||
|
- PostgreSQL 15+
|
||||||
|
- Redis 7+
|
||||||
|
|
||||||
|
#### Build Steps
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. Clone the repository
|
||||||
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
|
cd sub2api
|
||||||
|
|
||||||
|
# 2. Install pnpm (if not already installed)
|
||||||
|
npm install -g pnpm
|
||||||
|
|
||||||
|
# 3. Build frontend
|
||||||
|
cd frontend
|
||||||
|
pnpm install
|
||||||
|
pnpm run build
|
||||||
|
# Output will be in ../backend/internal/web/dist/
|
||||||
|
|
||||||
|
# 4. Build backend with embedded frontend
|
||||||
|
cd ../backend
|
||||||
|
go build -tags embed -o sub2api ./cmd/server
|
||||||
|
|
||||||
|
# 5. Create configuration file
|
||||||
|
cp ../deploy/config.example.yaml ./config.yaml
|
||||||
|
|
||||||
|
# 6. Edit configuration
|
||||||
|
nano config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note:** The `-tags embed` flag embeds the frontend into the binary. Without this flag, the binary will not serve the frontend UI.
|
||||||
|
|
||||||
|
**Key configuration in `config.yaml`:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
server:
|
||||||
|
host: "0.0.0.0"
|
||||||
|
port: 8080
|
||||||
|
mode: "release"
|
||||||
|
|
||||||
|
database:
|
||||||
|
host: "localhost"
|
||||||
|
port: 5432
|
||||||
|
user: "postgres"
|
||||||
|
password: "your_password"
|
||||||
|
dbname: "sub2api"
|
||||||
|
|
||||||
|
redis:
|
||||||
|
host: "localhost"
|
||||||
|
port: 6379
|
||||||
|
password: ""
|
||||||
|
|
||||||
|
jwt:
|
||||||
|
secret: "change-this-to-a-secure-random-string"
|
||||||
|
expire_hour: 24
|
||||||
|
|
||||||
|
default:
|
||||||
|
user_concurrency: 5
|
||||||
|
user_balance: 0
|
||||||
|
api_key_prefix: "sk-"
|
||||||
|
rate_multiplier: 1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
Additional security-related options are available in `config.yaml`:
|
||||||
|
|
||||||
|
- `cors.allowed_origins` for CORS allowlist
|
||||||
|
- `security.url_allowlist` for upstream/pricing/CRS host allowlists
|
||||||
|
- `security.url_allowlist.enabled` to disable URL validation (use with caution)
|
||||||
|
- `security.url_allowlist.allow_insecure_http` to allow HTTP URLs when validation is disabled
|
||||||
|
- `security.url_allowlist.allow_private_hosts` to allow private/local IP addresses
|
||||||
|
- `security.response_headers.enabled` to enable configurable response header filtering (disabled uses default allowlist)
|
||||||
|
- `security.csp` to control Content-Security-Policy headers
|
||||||
|
- `billing.circuit_breaker` to fail closed on billing errors
|
||||||
|
- `server.trusted_proxies` to enable X-Forwarded-For parsing
|
||||||
|
- `turnstile.required` to require Turnstile in release mode
|
||||||
|
|
||||||
|
**⚠️ Security Warning: HTTP URL Configuration**
|
||||||
|
|
||||||
|
When `security.url_allowlist.enabled=false`, the system performs minimal URL validation by default, **rejecting HTTP URLs** and only allowing HTTPS. To allow HTTP URLs (e.g., for development or internal testing), you must explicitly set:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
security:
|
||||||
|
url_allowlist:
|
||||||
|
enabled: false # Disable allowlist checks
|
||||||
|
allow_insecure_http: true # Allow HTTP URLs (⚠️ INSECURE)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Or via environment variable:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SECURITY_URL_ALLOWLIST_ENABLED=false
|
||||||
|
SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true
|
||||||
|
```
|
||||||
|
|
||||||
|
**Risks of allowing HTTP:**
|
||||||
|
- API keys and data transmitted in **plaintext** (vulnerable to interception)
|
||||||
|
- Susceptible to **man-in-the-middle (MITM) attacks**
|
||||||
|
- **NOT suitable for production** environments
|
||||||
|
|
||||||
|
**When to use HTTP:**
|
||||||
|
- ✅ Development/testing with local servers (http://localhost)
|
||||||
|
- ✅ Internal networks with trusted endpoints
|
||||||
|
- ✅ Testing account connectivity before obtaining HTTPS
|
||||||
|
- ❌ Production environments (use HTTPS only)
|
||||||
|
|
||||||
|
**Example error without this setting:**
|
||||||
|
```
|
||||||
|
Invalid base URL: invalid url scheme: http
|
||||||
|
```
|
||||||
|
|
||||||
|
If you disable URL validation or response header filtering, harden your network layer:
|
||||||
|
- Enforce an egress allowlist for upstream domains/IPs
|
||||||
|
- Block private/loopback/link-local ranges
|
||||||
|
- Enforce TLS-only outbound traffic
|
||||||
|
- Strip sensitive upstream response headers at the proxy
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 6. Run the application
|
||||||
|
./sub2api
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Development Mode
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Backend (with hot reload)
|
||||||
|
cd backend
|
||||||
|
go run ./cmd/server
|
||||||
|
|
||||||
|
# Frontend (with hot reload)
|
||||||
|
cd frontend
|
||||||
|
pnpm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Code Generation
|
||||||
|
|
||||||
|
When editing `backend/ent/schema`, regenerate Ent + Wire:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd backend
|
||||||
|
go generate ./ent
|
||||||
|
go generate ./cmd/server
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Simple Mode
|
||||||
|
|
||||||
|
Simple Mode is designed for individual developers or internal teams who want quick access without full SaaS features.
|
||||||
|
|
||||||
|
- Enable: Set environment variable `RUN_MODE=simple`
|
||||||
|
- Difference: Hides SaaS-related features and skips billing process
|
||||||
|
- Security note: In production, you must also set `SIMPLE_MODE_CONFIRM=true` to allow startup
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Antigravity Support
|
||||||
|
|
||||||
|
Sub2API supports [Antigravity](https://antigravity.so/) accounts. After authorization, dedicated endpoints are available for Claude and Gemini models.
|
||||||
|
|
||||||
|
### Dedicated Endpoints
|
||||||
|
|
||||||
|
| Endpoint | Model |
|
||||||
|
|----------|-------|
|
||||||
|
| `/antigravity/v1/messages` | Claude models |
|
||||||
|
| `/antigravity/v1beta/` | Gemini models |
|
||||||
|
|
||||||
|
### Claude Code Configuration
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export ANTHROPIC_BASE_URL="http://localhost:8080/antigravity"
|
||||||
|
export ANTHROPIC_AUTH_TOKEN="sk-xxx"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Hybrid Scheduling Mode
|
||||||
|
|
||||||
|
Antigravity accounts support optional **hybrid scheduling**. When enabled, the general endpoints `/v1/messages` and `/v1beta/` will also route requests to Antigravity accounts.
|
||||||
|
|
||||||
|
> **⚠️ Warning**: Anthropic Claude and Antigravity Claude **cannot be mixed within the same conversation context**. Use groups to isolate them properly.
|
||||||
|
|
||||||
|
### Known Issues
|
||||||
|
|
||||||
|
In Claude Code, Plan Mode cannot exit automatically. (Normally when using the native Claude API, after planning is complete, Claude Code will pop up options for users to approve or reject the plan.)
|
||||||
|
|
||||||
|
**Workaround**: Press `Shift + Tab` to manually exit Plan Mode, then type your response to approve or reject the plan.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Project Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
sub2api/
|
||||||
|
├── backend/ # Go backend service
|
||||||
|
│ ├── cmd/server/ # Application entry
|
||||||
|
│ ├── internal/ # Internal modules
|
||||||
|
│ │ ├── config/ # Configuration
|
||||||
|
│ │ ├── model/ # Data models
|
||||||
|
│ │ ├── service/ # Business logic
|
||||||
|
│ │ ├── handler/ # HTTP handlers
|
||||||
|
│ │ └── gateway/ # API gateway core
|
||||||
|
│ └── resources/ # Static resources
|
||||||
|
│
|
||||||
|
├── frontend/ # Vue 3 frontend
|
||||||
|
│ └── src/
|
||||||
|
│ ├── api/ # API calls
|
||||||
|
│ ├── stores/ # State management
|
||||||
|
│ ├── views/ # Page components
|
||||||
|
│ └── components/ # Reusable components
|
||||||
|
│
|
||||||
|
└── deploy/ # Deployment files
|
||||||
|
├── docker-compose.yml # Docker Compose configuration
|
||||||
|
├── .env.example # Environment variables for Docker Compose
|
||||||
|
├── config.example.yaml # Full config file for binary deployment
|
||||||
|
└── install.sh # One-click installation script
|
||||||
|
```
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
**If you find this project useful, please give it a star!**
|
||||||
|
|
||||||
|
</div>
|
||||||
463
README_CN.md
Normal file
463
README_CN.md
Normal file
@@ -0,0 +1,463 @@
|
|||||||
|
# Sub2API
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
[](https://golang.org/)
|
||||||
|
[](https://vuejs.org/)
|
||||||
|
[](https://www.postgresql.org/)
|
||||||
|
[](https://redis.io/)
|
||||||
|
[](https://www.docker.com/)
|
||||||
|
|
||||||
|
**AI API 网关平台 - 订阅配额分发管理**
|
||||||
|
|
||||||
|
[English](README.md) | 中文
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 在线体验
|
||||||
|
|
||||||
|
体验地址:**https://v2.pincc.ai/**
|
||||||
|
|
||||||
|
演示账号(共享演示环境;自建部署不会自动创建该账号):
|
||||||
|
|
||||||
|
| 邮箱 | 密码 |
|
||||||
|
|------|------|
|
||||||
|
| admin@sub2api.com | admin123 |
|
||||||
|
|
||||||
|
## 项目概述
|
||||||
|
|
||||||
|
Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅(如 Claude Code $200/月)的 API 配额。用户通过平台生成的 API Key 调用上游 AI 服务,平台负责鉴权、计费、负载均衡和请求转发。
|
||||||
|
|
||||||
|
## 核心功能
|
||||||
|
|
||||||
|
- **多账号管理** - 支持多种上游账号类型(OAuth、API Key)
|
||||||
|
- **API Key 分发** - 为用户生成和管理 API Key
|
||||||
|
- **精确计费** - Token 级别的用量追踪和成本计算
|
||||||
|
- **智能调度** - 智能账号选择,支持粘性会话
|
||||||
|
- **并发控制** - 用户级和账号级并发限制
|
||||||
|
- **速率限制** - 可配置的请求和 Token 速率限制
|
||||||
|
- **管理后台** - Web 界面进行监控和管理
|
||||||
|
|
||||||
|
## 技术栈
|
||||||
|
|
||||||
|
| 组件 | 技术 |
|
||||||
|
|------|------|
|
||||||
|
| 后端 | Go 1.25.5, Gin, Ent |
|
||||||
|
| 前端 | Vue 3.4+, Vite 5+, TailwindCSS |
|
||||||
|
| 数据库 | PostgreSQL 15+ |
|
||||||
|
| 缓存/队列 | Redis 7+ |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 文档
|
||||||
|
|
||||||
|
- 依赖安全:`docs/dependency-security.md`
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## OpenAI Responses 兼容注意事项
|
||||||
|
|
||||||
|
- 当请求包含 `function_call_output` 时,需要携带 `previous_response_id`,或在 `input` 中包含带 `call_id` 的 `tool_call`/`function_call`,或带非空 `id` 且与 `function_call_output.call_id` 匹配的 `item_reference`。
|
||||||
|
- 若依赖上游历史记录,网关会强制 `store=true` 并需要复用 `previous_response_id`,以避免出现 “No tool call found for function call output” 错误。
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 部署方式
|
||||||
|
|
||||||
|
### 方式一:脚本安装(推荐)
|
||||||
|
|
||||||
|
一键安装脚本,自动从 GitHub Releases 下载预编译的二进制文件。
|
||||||
|
|
||||||
|
#### 前置条件
|
||||||
|
|
||||||
|
- Linux 服务器(amd64 或 arm64)
|
||||||
|
- PostgreSQL 15+(已安装并运行)
|
||||||
|
- Redis 7+(已安装并运行)
|
||||||
|
- Root 权限
|
||||||
|
|
||||||
|
#### 安装步骤
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash
|
||||||
|
```
|
||||||
|
|
||||||
|
脚本会自动:
|
||||||
|
1. 检测系统架构
|
||||||
|
2. 下载最新版本
|
||||||
|
3. 安装二进制文件到 `/opt/sub2api`
|
||||||
|
4. 创建 systemd 服务
|
||||||
|
5. 配置系统用户和权限
|
||||||
|
|
||||||
|
#### 安装后配置
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. 启动服务
|
||||||
|
sudo systemctl start sub2api
|
||||||
|
|
||||||
|
# 2. 设置开机自启
|
||||||
|
sudo systemctl enable sub2api
|
||||||
|
|
||||||
|
# 3. 在浏览器中打开设置向导
|
||||||
|
# http://你的服务器IP:8080
|
||||||
|
```
|
||||||
|
|
||||||
|
设置向导将引导你完成:
|
||||||
|
- 数据库配置
|
||||||
|
- Redis 配置
|
||||||
|
- 管理员账号创建
|
||||||
|
|
||||||
|
#### 升级
|
||||||
|
|
||||||
|
可以直接在 **管理后台** 左上角点击 **检测更新** 按钮进行在线升级。
|
||||||
|
|
||||||
|
网页升级功能支持:
|
||||||
|
- 自动检测新版本
|
||||||
|
- 一键下载并应用更新
|
||||||
|
- 支持回滚
|
||||||
|
|
||||||
|
#### 常用命令
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 查看状态
|
||||||
|
sudo systemctl status sub2api
|
||||||
|
|
||||||
|
# 查看日志
|
||||||
|
sudo journalctl -u sub2api -f
|
||||||
|
|
||||||
|
# 重启服务
|
||||||
|
sudo systemctl restart sub2api
|
||||||
|
|
||||||
|
# 卸载
|
||||||
|
curl -sSL https://raw.githubusercontent.com/Wei-Shaw/sub2api/main/deploy/install.sh | sudo bash -s -- uninstall -y
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 方式二:Docker Compose
|
||||||
|
|
||||||
|
使用 Docker Compose 部署,包含 PostgreSQL 和 Redis 容器。
|
||||||
|
|
||||||
|
#### 前置条件
|
||||||
|
|
||||||
|
- Docker 20.10+
|
||||||
|
- Docker Compose v2+
|
||||||
|
|
||||||
|
#### 安装步骤
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. 克隆仓库
|
||||||
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
|
cd sub2api
|
||||||
|
|
||||||
|
# 2. 进入 deploy 目录
|
||||||
|
cd deploy
|
||||||
|
|
||||||
|
# 3. 复制环境配置文件
|
||||||
|
cp .env.example .env
|
||||||
|
|
||||||
|
# 4. 编辑配置(设置密码等)
|
||||||
|
nano .env
|
||||||
|
```
|
||||||
|
|
||||||
|
**`.env` 必须配置项:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# PostgreSQL 密码(必须修改!)
|
||||||
|
POSTGRES_PASSWORD=your_secure_password_here
|
||||||
|
|
||||||
|
# 可选:管理员账号
|
||||||
|
ADMIN_EMAIL=admin@example.com
|
||||||
|
ADMIN_PASSWORD=your_admin_password
|
||||||
|
|
||||||
|
# 可选:自定义端口
|
||||||
|
SERVER_PORT=8080
|
||||||
|
|
||||||
|
# 可选:安全配置
|
||||||
|
# 启用 URL 白名单验证(false 则跳过白名单检查,仅做基本格式校验)
|
||||||
|
SECURITY_URL_ALLOWLIST_ENABLED=false
|
||||||
|
|
||||||
|
# 关闭白名单时,是否允许 http:// URL(默认 false,只允许 https://)
|
||||||
|
# ⚠️ 警告:允许 HTTP 会暴露 API 密钥(明文传输)
|
||||||
|
# 仅建议在以下场景使用:
|
||||||
|
# - 开发/测试环境
|
||||||
|
# - 内部可信网络
|
||||||
|
# - 本地测试服务器(http://localhost)
|
||||||
|
# 生产环境:保持 false 或仅使用 HTTPS URL
|
||||||
|
SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=false
|
||||||
|
|
||||||
|
# 是否允许私有 IP 地址用于上游/定价/CRS(内网部署时使用)
|
||||||
|
SECURITY_URL_ALLOWLIST_ALLOW_PRIVATE_HOSTS=false
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 5. 启动所有服务
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
# 6. 查看状态
|
||||||
|
docker-compose ps
|
||||||
|
|
||||||
|
# 7. 查看日志
|
||||||
|
docker-compose logs -f sub2api
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 访问
|
||||||
|
|
||||||
|
在浏览器中打开 `http://你的服务器IP:8080`
|
||||||
|
|
||||||
|
#### 升级
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 拉取最新镜像并重建容器
|
||||||
|
docker-compose pull
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 常用命令
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 停止所有服务
|
||||||
|
docker-compose down
|
||||||
|
|
||||||
|
# 重启
|
||||||
|
docker-compose restart
|
||||||
|
|
||||||
|
# 查看所有日志
|
||||||
|
docker-compose logs -f
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 方式三:源码编译
|
||||||
|
|
||||||
|
从源码编译安装,适合开发或定制需求。
|
||||||
|
|
||||||
|
#### 前置条件
|
||||||
|
|
||||||
|
- Go 1.21+
|
||||||
|
- Node.js 18+
|
||||||
|
- PostgreSQL 15+
|
||||||
|
- Redis 7+
|
||||||
|
|
||||||
|
#### 编译步骤
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1. 克隆仓库
|
||||||
|
git clone https://github.com/Wei-Shaw/sub2api.git
|
||||||
|
cd sub2api
|
||||||
|
|
||||||
|
# 2. 安装 pnpm(如果还没有安装)
|
||||||
|
npm install -g pnpm
|
||||||
|
|
||||||
|
# 3. 编译前端
|
||||||
|
cd frontend
|
||||||
|
pnpm install
|
||||||
|
pnpm run build
|
||||||
|
# 构建产物输出到 ../backend/internal/web/dist/
|
||||||
|
|
||||||
|
# 4. 编译后端(嵌入前端)
|
||||||
|
cd ../backend
|
||||||
|
go build -tags embed -o sub2api ./cmd/server
|
||||||
|
|
||||||
|
# 5. 创建配置文件
|
||||||
|
cp ../deploy/config.example.yaml ./config.yaml
|
||||||
|
|
||||||
|
# 6. 编辑配置
|
||||||
|
nano config.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
> **注意:** `-tags embed` 参数会将前端嵌入到二进制文件中。不使用此参数编译的程序将不包含前端界面。
|
||||||
|
|
||||||
|
**`config.yaml` 关键配置:**
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
server:
|
||||||
|
host: "0.0.0.0"
|
||||||
|
port: 8080
|
||||||
|
mode: "release"
|
||||||
|
|
||||||
|
database:
|
||||||
|
host: "localhost"
|
||||||
|
port: 5432
|
||||||
|
user: "postgres"
|
||||||
|
password: "your_password"
|
||||||
|
dbname: "sub2api"
|
||||||
|
|
||||||
|
redis:
|
||||||
|
host: "localhost"
|
||||||
|
port: 6379
|
||||||
|
password: ""
|
||||||
|
|
||||||
|
jwt:
|
||||||
|
secret: "change-this-to-a-secure-random-string"
|
||||||
|
expire_hour: 24
|
||||||
|
|
||||||
|
default:
|
||||||
|
user_concurrency: 5
|
||||||
|
user_balance: 0
|
||||||
|
api_key_prefix: "sk-"
|
||||||
|
rate_multiplier: 1.0
|
||||||
|
```
|
||||||
|
|
||||||
|
`config.yaml` 还支持以下安全相关配置:
|
||||||
|
|
||||||
|
- `cors.allowed_origins` 配置 CORS 白名单
|
||||||
|
- `security.url_allowlist` 配置上游/价格数据/CRS 主机白名单
|
||||||
|
- `security.url_allowlist.enabled` 可关闭 URL 校验(慎用)
|
||||||
|
- `security.url_allowlist.allow_insecure_http` 关闭校验时允许 HTTP URL
|
||||||
|
- `security.url_allowlist.allow_private_hosts` 允许私有/本地 IP 地址
|
||||||
|
- `security.response_headers.enabled` 可启用可配置响应头过滤(关闭时使用默认白名单)
|
||||||
|
- `security.csp` 配置 Content-Security-Policy
|
||||||
|
- `billing.circuit_breaker` 计费异常时 fail-closed
|
||||||
|
- `server.trusted_proxies` 启用可信代理解析 X-Forwarded-For
|
||||||
|
- `turnstile.required` 在 release 模式强制启用 Turnstile
|
||||||
|
|
||||||
|
**⚠️ 安全警告:HTTP URL 配置**
|
||||||
|
|
||||||
|
当 `security.url_allowlist.enabled=false` 时,系统默认执行最小 URL 校验,**拒绝 HTTP URL**,仅允许 HTTPS。要允许 HTTP URL(例如用于开发或内网测试),必须显式设置:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
security:
|
||||||
|
url_allowlist:
|
||||||
|
enabled: false # 禁用白名单检查
|
||||||
|
allow_insecure_http: true # 允许 HTTP URL(⚠️ 不安全)
|
||||||
|
```
|
||||||
|
|
||||||
|
**或通过环境变量:**
|
||||||
|
|
||||||
|
```bash
|
||||||
|
SECURITY_URL_ALLOWLIST_ENABLED=false
|
||||||
|
SECURITY_URL_ALLOWLIST_ALLOW_INSECURE_HTTP=true
|
||||||
|
```
|
||||||
|
|
||||||
|
**允许 HTTP 的风险:**
|
||||||
|
- API 密钥和数据以**明文传输**(可被截获)
|
||||||
|
- 易受**中间人攻击 (MITM)**
|
||||||
|
- **不适合生产环境**
|
||||||
|
|
||||||
|
**适用场景:**
|
||||||
|
- ✅ 开发/测试环境的本地服务器(http://localhost)
|
||||||
|
- ✅ 内网可信端点
|
||||||
|
- ✅ 获取 HTTPS 前测试账号连通性
|
||||||
|
- ❌ 生产环境(仅使用 HTTPS)
|
||||||
|
|
||||||
|
**未设置此项时的错误示例:**
|
||||||
|
```
|
||||||
|
Invalid base URL: invalid url scheme: http
|
||||||
|
```
|
||||||
|
|
||||||
|
如关闭 URL 校验或响应头过滤,请加强网络层防护:
|
||||||
|
- 出站访问白名单限制上游域名/IP
|
||||||
|
- 阻断私网/回环/链路本地地址
|
||||||
|
- 强制仅允许 TLS 出站
|
||||||
|
- 在反向代理层移除敏感响应头
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 6. 运行应用
|
||||||
|
./sub2api
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 开发模式
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 后端(支持热重载)
|
||||||
|
cd backend
|
||||||
|
go run ./cmd/server
|
||||||
|
|
||||||
|
# 前端(支持热重载)
|
||||||
|
cd frontend
|
||||||
|
pnpm run dev
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 代码生成
|
||||||
|
|
||||||
|
修改 `backend/ent/schema` 后,需要重新生成 Ent + Wire:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cd backend
|
||||||
|
go generate ./ent
|
||||||
|
go generate ./cmd/server
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## 简易模式
|
||||||
|
|
||||||
|
简易模式适合个人开发者或内部团队快速使用,不依赖完整 SaaS 功能。
|
||||||
|
|
||||||
|
- 启用方式:设置环境变量 `RUN_MODE=simple`
|
||||||
|
- 功能差异:隐藏 SaaS 相关功能,跳过计费流程
|
||||||
|
- 安全注意事项:生产环境需同时设置 `SIMPLE_MODE_CONFIRM=true` 才允许启动
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
## Antigravity 使用说明
|
||||||
|
|
||||||
|
Sub2API 支持 [Antigravity](https://antigravity.so/) 账户,授权后可通过专用端点访问 Claude 和 Gemini 模型。
|
||||||
|
|
||||||
|
### 专用端点
|
||||||
|
|
||||||
|
| 端点 | 模型 |
|
||||||
|
|------|------|
|
||||||
|
| `/antigravity/v1/messages` | Claude 模型 |
|
||||||
|
| `/antigravity/v1beta/` | Gemini 模型 |
|
||||||
|
|
||||||
|
### Claude Code 配置示例
|
||||||
|
|
||||||
|
```bash
|
||||||
|
export ANTHROPIC_BASE_URL="http://localhost:8080/antigravity"
|
||||||
|
export ANTHROPIC_AUTH_TOKEN="sk-xxx"
|
||||||
|
```
|
||||||
|
|
||||||
|
### 混合调度模式
|
||||||
|
|
||||||
|
Antigravity 账户支持可选的**混合调度**功能。开启后,通用端点 `/v1/messages` 和 `/v1beta/` 也会调度该账户。
|
||||||
|
|
||||||
|
> **⚠️ 注意**:Anthropic Claude 和 Antigravity Claude **不能在同一上下文中混合使用**,请通过分组功能做好隔离。
|
||||||
|
|
||||||
|
|
||||||
|
### 已知问题
|
||||||
|
在 Claude Code 中,无法自动退出Plan Mode。(正常使用原生Claude Api时,Plan 完成后,Claude Code会弹出弹出选项让用户同意或拒绝Plan。)
|
||||||
|
解决办法:shift + Tab,手动退出Plan mode,然后输入内容 告诉 Claude Code 同意或拒绝 Plan
|
||||||
|
---
|
||||||
|
|
||||||
|
## 项目结构
|
||||||
|
|
||||||
|
```
|
||||||
|
sub2api/
|
||||||
|
├── backend/ # Go 后端服务
|
||||||
|
│ ├── cmd/server/ # 应用入口
|
||||||
|
│ ├── internal/ # 内部模块
|
||||||
|
│ │ ├── config/ # 配置管理
|
||||||
|
│ │ ├── model/ # 数据模型
|
||||||
|
│ │ ├── service/ # 业务逻辑
|
||||||
|
│ │ ├── handler/ # HTTP 处理器
|
||||||
|
│ │ └── gateway/ # API 网关核心
|
||||||
|
│ └── resources/ # 静态资源
|
||||||
|
│
|
||||||
|
├── frontend/ # Vue 3 前端
|
||||||
|
│ └── src/
|
||||||
|
│ ├── api/ # API 调用
|
||||||
|
│ ├── stores/ # 状态管理
|
||||||
|
│ ├── views/ # 页面组件
|
||||||
|
│ └── components/ # 通用组件
|
||||||
|
│
|
||||||
|
└── deploy/ # 部署文件
|
||||||
|
├── docker-compose.yml # Docker Compose 配置
|
||||||
|
├── .env.example # Docker Compose 环境变量
|
||||||
|
├── config.example.yaml # 二进制部署完整配置文件
|
||||||
|
└── install.sh # 一键安装脚本
|
||||||
|
```
|
||||||
|
|
||||||
|
## 许可证
|
||||||
|
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
<div align="center">
|
||||||
|
|
||||||
|
**如果觉得有用,请给个 Star 支持一下!**
|
||||||
|
|
||||||
|
</div>
|
||||||
2
backend/.dockerignore
Normal file
2
backend/.dockerignore
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
.cache/
|
||||||
|
.DS_Store
|
||||||
599
backend/.golangci.yml
Normal file
599
backend/.golangci.yml
Normal file
@@ -0,0 +1,599 @@
|
|||||||
|
version: "2"
|
||||||
|
|
||||||
|
linters:
|
||||||
|
default: none
|
||||||
|
enable:
|
||||||
|
- depguard
|
||||||
|
- errcheck
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
- staticcheck
|
||||||
|
- unused
|
||||||
|
|
||||||
|
settings:
|
||||||
|
depguard:
|
||||||
|
rules:
|
||||||
|
# Enforce: service must not depend on repository.
|
||||||
|
service-no-repository:
|
||||||
|
list-mode: original
|
||||||
|
files:
|
||||||
|
- "**/internal/service/**"
|
||||||
|
- "!**/internal/service/ops_aggregation_service.go"
|
||||||
|
- "!**/internal/service/ops_alert_evaluator_service.go"
|
||||||
|
- "!**/internal/service/ops_cleanup_service.go"
|
||||||
|
- "!**/internal/service/ops_metrics_collector.go"
|
||||||
|
- "!**/internal/service/ops_scheduled_report_service.go"
|
||||||
|
- "!**/internal/service/wire.go"
|
||||||
|
deny:
|
||||||
|
- pkg: github.com/Wei-Shaw/sub2api/internal/repository
|
||||||
|
desc: "service must not import repository"
|
||||||
|
- pkg: gorm.io/gorm
|
||||||
|
desc: "service must not import gorm"
|
||||||
|
- pkg: github.com/redis/go-redis/v9
|
||||||
|
desc: "service must not import redis"
|
||||||
|
handler-no-repository:
|
||||||
|
list-mode: original
|
||||||
|
files:
|
||||||
|
- "**/internal/handler/**"
|
||||||
|
deny:
|
||||||
|
- pkg: github.com/Wei-Shaw/sub2api/internal/repository
|
||||||
|
desc: "handler must not import repository"
|
||||||
|
- pkg: gorm.io/gorm
|
||||||
|
desc: "handler must not import gorm"
|
||||||
|
- pkg: github.com/redis/go-redis/v9
|
||||||
|
desc: "handler must not import redis"
|
||||||
|
errcheck:
|
||||||
|
# Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
|
||||||
|
# Such cases aren't reported by default.
|
||||||
|
# Default: false
|
||||||
|
check-type-assertions: true
|
||||||
|
# report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`.
|
||||||
|
# Such cases aren't reported by default.
|
||||||
|
# Default: false
|
||||||
|
check-blank: false
|
||||||
|
# To disable the errcheck built-in exclude list.
|
||||||
|
# See `-excludeonly` option in https://github.com/kisielk/errcheck#excluding-functions for details.
|
||||||
|
# Default: false
|
||||||
|
disable-default-exclusions: true
|
||||||
|
# List of functions to exclude from checking, where each entry is a single function to exclude.
|
||||||
|
# See https://github.com/kisielk/errcheck#excluding-functions for details.
|
||||||
|
exclude-functions:
|
||||||
|
- io/ioutil.ReadFile
|
||||||
|
- io.Copy(*bytes.Buffer)
|
||||||
|
- io.Copy(os.Stdout)
|
||||||
|
- fmt.Println
|
||||||
|
- fmt.Print
|
||||||
|
- fmt.Printf
|
||||||
|
- fmt.Fprint
|
||||||
|
- fmt.Fprintf
|
||||||
|
- fmt.Fprintln
|
||||||
|
# Display function signature instead of selector.
|
||||||
|
# Default: false
|
||||||
|
verbose: true
|
||||||
|
ineffassign:
|
||||||
|
# Check escaping variables of type error, may cause false positives.
|
||||||
|
# Default: false
|
||||||
|
check-escaping-errors: true
|
||||||
|
staticcheck:
|
||||||
|
# https://staticcheck.dev/docs/configuration/options/#dot_import_whitelist
|
||||||
|
# Default: ["github.com/mmcloughlin/avo/build", "github.com/mmcloughlin/avo/operand", "github.com/mmcloughlin/avo/reg"]
|
||||||
|
dot-import-whitelist:
|
||||||
|
- fmt
|
||||||
|
# https://staticcheck.dev/docs/configuration/options/#initialisms
|
||||||
|
# Default: ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS"]
|
||||||
|
initialisms: [ "ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS" ]
|
||||||
|
# https://staticcheck.dev/docs/configuration/options/#http_status_code_whitelist
|
||||||
|
# Default: ["200", "400", "404", "500"]
|
||||||
|
http-status-code-whitelist: [ "200", "400", "404", "500" ]
|
||||||
|
# SAxxxx checks in https://staticcheck.dev/docs/configuration/options/#checks
|
||||||
|
# Example (to disable some checks): [ "all", "-SA1000", "-SA1001"]
|
||||||
|
# Run `GL_DEBUG=staticcheck golangci-lint run --enable=staticcheck` to see all available checks and enabled by config checks.
|
||||||
|
# Default: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"]
|
||||||
|
# Temporarily disable style checks to allow CI to pass
|
||||||
|
checks:
|
||||||
|
- all
|
||||||
|
- -ST1000 # Package comment format
|
||||||
|
- -ST1003 # Poorly chosen identifier (ApiKey vs APIKey)
|
||||||
|
- -ST1020 # Comment on exported method format
|
||||||
|
- -ST1021 # Comment on exported type format
|
||||||
|
- -ST1022 # Comment on exported variable format
|
||||||
|
# Invalid regular expression.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1000
|
||||||
|
- SA1000
|
||||||
|
# Invalid template.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1001
|
||||||
|
- SA1001
|
||||||
|
# Invalid format in 'time.Parse'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1002
|
||||||
|
- SA1002
|
||||||
|
# Unsupported argument to functions in 'encoding/binary'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1003
|
||||||
|
- SA1003
|
||||||
|
# Suspiciously small untyped constant in 'time.Sleep'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1004
|
||||||
|
- SA1004
|
||||||
|
# Invalid first argument to 'exec.Command'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1005
|
||||||
|
- SA1005
|
||||||
|
# 'Printf' with dynamic first argument and no further arguments.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1006
|
||||||
|
- SA1006
|
||||||
|
# Invalid URL in 'net/url.Parse'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1007
|
||||||
|
- SA1007
|
||||||
|
# Non-canonical key in 'http.Header' map.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1008
|
||||||
|
- SA1008
|
||||||
|
# '(*regexp.Regexp).FindAll' called with 'n == 0', which will always return zero results.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1010
|
||||||
|
- SA1010
|
||||||
|
# Various methods in the "strings" package expect valid UTF-8, but invalid input is provided.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1011
|
||||||
|
- SA1011
|
||||||
|
# A nil 'context.Context' is being passed to a function, consider using 'context.TODO' instead.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1012
|
||||||
|
- SA1012
|
||||||
|
# 'io.Seeker.Seek' is being called with the whence constant as the first argument, but it should be the second.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1013
|
||||||
|
- SA1013
|
||||||
|
# Non-pointer value passed to 'Unmarshal' or 'Decode'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1014
|
||||||
|
- SA1014
|
||||||
|
# Using 'time.Tick' in a way that will leak. Consider using 'time.NewTicker', and only use 'time.Tick' in tests, commands and endless functions.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1015
|
||||||
|
- SA1015
|
||||||
|
# Trapping a signal that cannot be trapped.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1016
|
||||||
|
- SA1016
|
||||||
|
# Channels used with 'os/signal.Notify' should be buffered.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1017
|
||||||
|
- SA1017
|
||||||
|
# 'strings.Replace' called with 'n == 0', which does nothing.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1018
|
||||||
|
- SA1018
|
||||||
|
# Using a deprecated function, variable, constant or field.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1019
|
||||||
|
- SA1019
|
||||||
|
# Using an invalid host:port pair with a 'net.Listen'-related function.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1020
|
||||||
|
- SA1020
|
||||||
|
# Using 'bytes.Equal' to compare two 'net.IP'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1021
|
||||||
|
- SA1021
|
||||||
|
# Modifying the buffer in an 'io.Writer' implementation.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1023
|
||||||
|
- SA1023
|
||||||
|
# A string cutset contains duplicate characters.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1024
|
||||||
|
- SA1024
|
||||||
|
# It is not possible to use '(*time.Timer).Reset''s return value correctly.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1025
|
||||||
|
- SA1025
|
||||||
|
# Cannot marshal channels or functions.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1026
|
||||||
|
- SA1026
|
||||||
|
# Atomic access to 64-bit variable must be 64-bit aligned.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1027
|
||||||
|
- SA1027
|
||||||
|
# 'sort.Slice' can only be used on slices.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1028
|
||||||
|
- SA1028
|
||||||
|
# Inappropriate key in call to 'context.WithValue'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1029
|
||||||
|
- SA1029
|
||||||
|
# Invalid argument in call to a 'strconv' function.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1030
|
||||||
|
- SA1030
|
||||||
|
# Overlapping byte slices passed to an encoder.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1031
|
||||||
|
- SA1031
|
||||||
|
# Wrong order of arguments to 'errors.Is'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA1032
|
||||||
|
- SA1032
|
||||||
|
# 'sync.WaitGroup.Add' called inside the goroutine, leading to a race condition.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA2000
|
||||||
|
- SA2000
|
||||||
|
# Empty critical section, did you mean to defer the unlock?.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA2001
|
||||||
|
- SA2001
|
||||||
|
# Called 'testing.T.FailNow' or 'SkipNow' in a goroutine, which isn't allowed.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA2002
|
||||||
|
- SA2002
|
||||||
|
# Deferred 'Lock' right after locking, likely meant to defer 'Unlock' instead.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA2003
|
||||||
|
- SA2003
|
||||||
|
# 'TestMain' doesn't call 'os.Exit', hiding test failures.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA3000
|
||||||
|
- SA3000
|
||||||
|
# Assigning to 'b.N' in benchmarks distorts the results.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA3001
|
||||||
|
- SA3001
|
||||||
|
# Binary operator has identical expressions on both sides.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4000
|
||||||
|
- SA4000
|
||||||
|
# '&*x' gets simplified to 'x', it does not copy 'x'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4001
|
||||||
|
- SA4001
|
||||||
|
# Comparing unsigned values against negative values is pointless.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4003
|
||||||
|
- SA4003
|
||||||
|
# The loop exits unconditionally after one iteration.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4004
|
||||||
|
- SA4004
|
||||||
|
# Field assignment that will never be observed. Did you mean to use a pointer receiver?.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4005
|
||||||
|
- SA4005
|
||||||
|
# A value assigned to a variable is never read before being overwritten. Forgotten error check or dead code?.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4006
|
||||||
|
- SA4006
|
||||||
|
# The variable in the loop condition never changes, are you incrementing the wrong variable?.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4008
|
||||||
|
- SA4008
|
||||||
|
# A function argument is overwritten before its first use.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4009
|
||||||
|
- SA4009
|
||||||
|
# The result of 'append' will never be observed anywhere.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4010
|
||||||
|
- SA4010
|
||||||
|
# Break statement with no effect. Did you mean to break out of an outer loop?.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4011
|
||||||
|
- SA4011
|
||||||
|
# Comparing a value against NaN even though no value is equal to NaN.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4012
|
||||||
|
- SA4012
|
||||||
|
# Negating a boolean twice ('!!b') is the same as writing 'b'. This is either redundant, or a typo.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4013
|
||||||
|
- SA4013
|
||||||
|
# An if/else if chain has repeated conditions and no side-effects; if the condition didn't match the first time, it won't match the second time, either.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4014
|
||||||
|
- SA4014
|
||||||
|
# Calling functions like 'math.Ceil' on floats converted from integers doesn't do anything useful.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4015
|
||||||
|
- SA4015
|
||||||
|
# Certain bitwise operations, such as 'x ^ 0', do not do anything useful.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4016
|
||||||
|
- SA4016
|
||||||
|
# Discarding the return values of a function without side effects, making the call pointless.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4017
|
||||||
|
- SA4017
|
||||||
|
# Self-assignment of variables.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4018
|
||||||
|
- SA4018
|
||||||
|
# Multiple, identical build constraints in the same file.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4019
|
||||||
|
- SA4019
|
||||||
|
# Unreachable case clause in a type switch.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4020
|
||||||
|
- SA4020
|
||||||
|
# "x = append(y)" is equivalent to "x = y".
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4021
|
||||||
|
- SA4021
|
||||||
|
# Comparing the address of a variable against nil.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4022
|
||||||
|
- SA4022
|
||||||
|
# Impossible comparison of interface value with untyped nil.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4023
|
||||||
|
- SA4023
|
||||||
|
# Checking for impossible return value from a builtin function.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4024
|
||||||
|
- SA4024
|
||||||
|
# Integer division of literals that results in zero.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4025
|
||||||
|
- SA4025
|
||||||
|
# Go constants cannot express negative zero.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4026
|
||||||
|
- SA4026
|
||||||
|
# '(*net/url.URL).Query' returns a copy, modifying it doesn't change the URL.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4027
|
||||||
|
- SA4027
|
||||||
|
# 'x % 1' is always zero.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4028
|
||||||
|
- SA4028
|
||||||
|
# Ineffective attempt at sorting slice.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4029
|
||||||
|
- SA4029
|
||||||
|
# Ineffective attempt at generating random number.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4030
|
||||||
|
- SA4030
|
||||||
|
# Checking never-nil value against nil.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4031
|
||||||
|
- SA4031
|
||||||
|
# Comparing 'runtime.GOOS' or 'runtime.GOARCH' against impossible value.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA4032
|
||||||
|
- SA4032
|
||||||
|
# Assignment to nil map.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5000
|
||||||
|
- SA5000
|
||||||
|
# Deferring 'Close' before checking for a possible error.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5001
|
||||||
|
- SA5001
|
||||||
|
# The empty for loop ("for {}") spins and can block the scheduler.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5002
|
||||||
|
- SA5002
|
||||||
|
# Defers in infinite loops will never execute.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5003
|
||||||
|
- SA5003
|
||||||
|
# "for { select { ..." with an empty default branch spins.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5004
|
||||||
|
- SA5004
|
||||||
|
# The finalizer references the finalized object, preventing garbage collection.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5005
|
||||||
|
- SA5005
|
||||||
|
# Infinite recursive call.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5007
|
||||||
|
- SA5007
|
||||||
|
# Invalid struct tag.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5008
|
||||||
|
- SA5008
|
||||||
|
# Invalid Printf call.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5009
|
||||||
|
- SA5009
|
||||||
|
# Impossible type assertion.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5010
|
||||||
|
- SA5010
|
||||||
|
# Possible nil pointer dereference.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5011
|
||||||
|
- SA5011
|
||||||
|
# Passing odd-sized slice to function expecting even size.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA5012
|
||||||
|
- SA5012
|
||||||
|
# Using 'regexp.Match' or related in a loop, should use 'regexp.Compile'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA6000
|
||||||
|
- SA6000
|
||||||
|
# Missing an optimization opportunity when indexing maps by byte slices.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA6001
|
||||||
|
- SA6001
|
||||||
|
# Storing non-pointer values in 'sync.Pool' allocates memory.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA6002
|
||||||
|
- SA6002
|
||||||
|
# Converting a string to a slice of runes before ranging over it.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA6003
|
||||||
|
- SA6003
|
||||||
|
# Inefficient string comparison with 'strings.ToLower' or 'strings.ToUpper'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA6005
|
||||||
|
- SA6005
|
||||||
|
# Using io.WriteString to write '[]byte'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA6006
|
||||||
|
- SA6006
|
||||||
|
# Defers in range loops may not run when you expect them to.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA9001
|
||||||
|
- SA9001
|
||||||
|
# Using a non-octal 'os.FileMode' that looks like it was meant to be in octal.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA9002
|
||||||
|
- SA9002
|
||||||
|
# Empty body in an if or else branch.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA9003
|
||||||
|
- SA9003
|
||||||
|
# Only the first constant has an explicit type.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA9004
|
||||||
|
- SA9004
|
||||||
|
# Trying to marshal a struct with no public fields nor custom marshaling.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA9005
|
||||||
|
- SA9005
|
||||||
|
# Dubious bit shifting of a fixed size integer value.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA9006
|
||||||
|
- SA9006
|
||||||
|
# Deleting a directory that shouldn't be deleted.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA9007
|
||||||
|
- SA9007
|
||||||
|
# 'else' branch of a type assertion is probably not reading the right value.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA9008
|
||||||
|
- SA9008
|
||||||
|
# Ineffectual Go compiler directive.
|
||||||
|
# https://staticcheck.dev/docs/checks/#SA9009
|
||||||
|
- SA9009
|
||||||
|
# NOTE: ST1000, ST1001, ST1003, ST1020, ST1021, ST1022 are disabled above
|
||||||
|
# Incorrectly formatted error string.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1005
|
||||||
|
- ST1005
|
||||||
|
# Poorly chosen receiver name.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1006
|
||||||
|
- ST1006
|
||||||
|
# A function's error value should be its last return value.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1008
|
||||||
|
- ST1008
|
||||||
|
# Poorly chosen name for variable of type 'time.Duration'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1011
|
||||||
|
- ST1011
|
||||||
|
# Poorly chosen name for error variable.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1012
|
||||||
|
- ST1012
|
||||||
|
# Should use constants for HTTP error codes, not magic numbers.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1013
|
||||||
|
- ST1013
|
||||||
|
# A switch's default case should be the first or last case.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1015
|
||||||
|
- ST1015
|
||||||
|
# Use consistent method receiver names.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1016
|
||||||
|
- ST1016
|
||||||
|
# Don't use Yoda conditions.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1017
|
||||||
|
- ST1017
|
||||||
|
# Avoid zero-width and control characters in string literals.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1018
|
||||||
|
- ST1018
|
||||||
|
# Importing the same package multiple times.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1019
|
||||||
|
- ST1019
|
||||||
|
# NOTE: ST1020, ST1021, ST1022 removed (disabled above)
|
||||||
|
# Redundant type in variable declaration.
|
||||||
|
# https://staticcheck.dev/docs/checks/#ST1023
|
||||||
|
- ST1023
|
||||||
|
# Use plain channel send or receive instead of single-case select.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1000
|
||||||
|
- S1000
|
||||||
|
# Replace for loop with call to copy.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1001
|
||||||
|
- S1001
|
||||||
|
# Omit comparison with boolean constant.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1002
|
||||||
|
- S1002
|
||||||
|
# Replace call to 'strings.Index' with 'strings.Contains'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1003
|
||||||
|
- S1003
|
||||||
|
# Replace call to 'bytes.Compare' with 'bytes.Equal'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1004
|
||||||
|
- S1004
|
||||||
|
# Drop unnecessary use of the blank identifier.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1005
|
||||||
|
- S1005
|
||||||
|
# Use "for { ... }" for infinite loops.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1006
|
||||||
|
- S1006
|
||||||
|
# Simplify regular expression by using raw string literal.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1007
|
||||||
|
- S1007
|
||||||
|
# Simplify returning boolean expression.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1008
|
||||||
|
- S1008
|
||||||
|
# Omit redundant nil check on slices, maps, and channels.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1009
|
||||||
|
- S1009
|
||||||
|
# Omit default slice index.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1010
|
||||||
|
- S1010
|
||||||
|
# Use a single 'append' to concatenate two slices.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1011
|
||||||
|
- S1011
|
||||||
|
# Replace 'time.Now().Sub(x)' with 'time.Since(x)'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1012
|
||||||
|
- S1012
|
||||||
|
# Use a type conversion instead of manually copying struct fields.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1016
|
||||||
|
- S1016
|
||||||
|
# Replace manual trimming with 'strings.TrimPrefix'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1017
|
||||||
|
- S1017
|
||||||
|
# Use "copy" for sliding elements.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1018
|
||||||
|
- S1018
|
||||||
|
# Simplify "make" call by omitting redundant arguments.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1019
|
||||||
|
- S1019
|
||||||
|
# Omit redundant nil check in type assertion.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1020
|
||||||
|
- S1020
|
||||||
|
# Merge variable declaration and assignment.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1021
|
||||||
|
- S1021
|
||||||
|
# Omit redundant control flow.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1023
|
||||||
|
- S1023
|
||||||
|
# Replace 'x.Sub(time.Now())' with 'time.Until(x)'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1024
|
||||||
|
- S1024
|
||||||
|
# Don't use 'fmt.Sprintf("%s", x)' unnecessarily.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1025
|
||||||
|
- S1025
|
||||||
|
# Simplify error construction with 'fmt.Errorf'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1028
|
||||||
|
- S1028
|
||||||
|
# Range over the string directly.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1029
|
||||||
|
- S1029
|
||||||
|
# Use 'bytes.Buffer.String' or 'bytes.Buffer.Bytes'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1030
|
||||||
|
- S1030
|
||||||
|
# Omit redundant nil check around loop.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1031
|
||||||
|
- S1031
|
||||||
|
# Use 'sort.Ints(x)', 'sort.Float64s(x)', and 'sort.Strings(x)'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1032
|
||||||
|
- S1032
|
||||||
|
# Unnecessary guard around call to "delete".
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1033
|
||||||
|
- S1033
|
||||||
|
# Use result of type assertion to simplify cases.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1034
|
||||||
|
- S1034
|
||||||
|
# Redundant call to 'net/http.CanonicalHeaderKey' in method call on 'net/http.Header'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1035
|
||||||
|
- S1035
|
||||||
|
# Unnecessary guard around map access.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1036
|
||||||
|
- S1036
|
||||||
|
# Elaborate way of sleeping.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1037
|
||||||
|
- S1037
|
||||||
|
# Unnecessarily complex way of printing formatted string.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1038
|
||||||
|
- S1038
|
||||||
|
# Unnecessary use of 'fmt.Sprint'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1039
|
||||||
|
- S1039
|
||||||
|
# Type assertion to current type.
|
||||||
|
# https://staticcheck.dev/docs/checks/#S1040
|
||||||
|
- S1040
|
||||||
|
# Apply De Morgan's law.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1001
|
||||||
|
- QF1001
|
||||||
|
# Convert untagged switch to tagged switch.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1002
|
||||||
|
- QF1002
|
||||||
|
# Convert if/else-if chain to tagged switch.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1003
|
||||||
|
- QF1003
|
||||||
|
# Use 'strings.ReplaceAll' instead of 'strings.Replace' with 'n == -1'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1004
|
||||||
|
- QF1004
|
||||||
|
# Expand call to 'math.Pow'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1005
|
||||||
|
- QF1005
|
||||||
|
# Lift 'if'+'break' into loop condition.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1006
|
||||||
|
- QF1006
|
||||||
|
# Merge conditional assignment into variable declaration.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1007
|
||||||
|
- QF1007
|
||||||
|
# Omit embedded fields from selector expression.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1008
|
||||||
|
- QF1008
|
||||||
|
# Use 'time.Time.Equal' instead of '==' operator.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1009
|
||||||
|
- QF1009
|
||||||
|
# Convert slice of bytes to string when printing it.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1010
|
||||||
|
- QF1010
|
||||||
|
# Omit redundant type from variable declaration.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1011
|
||||||
|
- QF1011
|
||||||
|
# Use 'fmt.Fprintf(x, ...)' instead of 'x.Write(fmt.Sprintf(...))'.
|
||||||
|
# https://staticcheck.dev/docs/checks/#QF1012
|
||||||
|
- QF1012
|
||||||
|
unused:
|
||||||
|
# Mark all struct fields that have been written to as used.
|
||||||
|
# Default: true
|
||||||
|
field-writes-are-uses: false
|
||||||
|
# Treat IncDec statement (e.g. `i++` or `i--`) as both read and write operation instead of just write.
|
||||||
|
# Default: false
|
||||||
|
post-statements-are-reads: true
|
||||||
|
# Mark all exported fields as used.
|
||||||
|
# default: true
|
||||||
|
exported-fields-are-used: false
|
||||||
|
# Mark all function parameters as used.
|
||||||
|
# default: true
|
||||||
|
parameters-are-used: true
|
||||||
|
# Mark all local variables as used.
|
||||||
|
# default: true
|
||||||
|
local-variables-are-used: false
|
||||||
|
# Mark all identifiers inside generated files as used.
|
||||||
|
# Default: true
|
||||||
|
generated-is-used: false
|
||||||
|
|
||||||
|
formatters:
|
||||||
|
enable:
|
||||||
|
- gofmt
|
||||||
|
settings:
|
||||||
|
gofmt:
|
||||||
|
# Simplify code: gofmt with `-s` option.
|
||||||
|
# Default: true
|
||||||
|
simplify: false
|
||||||
|
# Apply the rewrite rules to the source before reformatting.
|
||||||
|
# https://pkg.go.dev/cmd/gofmt
|
||||||
|
# Default: []
|
||||||
|
rewrite-rules:
|
||||||
|
- pattern: 'interface{}'
|
||||||
|
replacement: 'any'
|
||||||
|
- pattern: 'a[b:len(a)]'
|
||||||
|
replacement: 'a[b:]'
|
||||||
24
backend/Dockerfile
Normal file
24
backend/Dockerfile
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
FROM golang:1.25.5-alpine
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# 安装必要的工具
|
||||||
|
RUN apk add --no-cache git
|
||||||
|
|
||||||
|
# 复制go.mod和go.sum
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
|
||||||
|
# 下载依赖
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
# 复制源代码
|
||||||
|
COPY . .
|
||||||
|
|
||||||
|
# 构建应用
|
||||||
|
RUN go build -o main cmd/server/main.go
|
||||||
|
|
||||||
|
# 暴露端口
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
# 运行应用
|
||||||
|
CMD ["./main"]
|
||||||
17
backend/Makefile
Normal file
17
backend/Makefile
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
.PHONY: build test test-unit test-integration test-e2e
|
||||||
|
|
||||||
|
build:
|
||||||
|
go build -o bin/server ./cmd/server
|
||||||
|
|
||||||
|
test:
|
||||||
|
go test ./...
|
||||||
|
golangci-lint run ./...
|
||||||
|
|
||||||
|
test-unit:
|
||||||
|
go test -tags=unit ./...
|
||||||
|
|
||||||
|
test-integration:
|
||||||
|
go test -tags=integration ./...
|
||||||
|
|
||||||
|
test-e2e:
|
||||||
|
go test -tags=e2e ./...
|
||||||
57
backend/cmd/jwtgen/main.go
Normal file
57
backend/cmd/jwtgen/main.go
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/repository"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
email := flag.String("email", "", "Admin email to issue a JWT for (defaults to first active admin)")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
cfg, err := config.Load()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
client, sqlDB, err := repository.InitEnt(cfg)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to init db: %v", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := client.Close(); err != nil {
|
||||||
|
log.Printf("failed to close db: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
userRepo := repository.NewUserRepository(client, sqlDB)
|
||||||
|
authService := service.NewAuthService(userRepo, cfg, nil, nil, nil, nil, nil)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var user *service.User
|
||||||
|
if *email != "" {
|
||||||
|
user, err = userRepo.GetByEmail(ctx, *email)
|
||||||
|
} else {
|
||||||
|
user, err = userRepo.GetFirstAdmin(ctx)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to resolve admin user: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
token, err := authService.GenerateToken(user)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to generate token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("ADMIN_EMAIL=%s\nADMIN_USER_ID=%d\nJWT=%s\n", user.Email, user.ID, token)
|
||||||
|
}
|
||||||
1
backend/cmd/server/VERSION
Normal file
1
backend/cmd/server/VERSION
Normal file
@@ -0,0 +1 @@
|
|||||||
|
0.1.46
|
||||||
155
backend/cmd/server/main.go
Normal file
155
backend/cmd/server/main.go
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
//go:generate go run github.com/google/wire/cmd/wire
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
_ "embed"
|
||||||
|
"errors"
|
||||||
|
"flag"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
_ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/setup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/web"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:embed VERSION
|
||||||
|
var embeddedVersion string
|
||||||
|
|
||||||
|
// Build-time variables (can be set by ldflags)
|
||||||
|
var (
|
||||||
|
Version = ""
|
||||||
|
Commit = "unknown"
|
||||||
|
Date = "unknown"
|
||||||
|
BuildType = "source" // "source" for manual builds, "release" for CI builds (set by ldflags)
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Read version from embedded VERSION file
|
||||||
|
Version = strings.TrimSpace(embeddedVersion)
|
||||||
|
if Version == "" {
|
||||||
|
Version = "0.0.0-dev"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// Parse command line flags
|
||||||
|
setupMode := flag.Bool("setup", false, "Run setup wizard in CLI mode")
|
||||||
|
showVersion := flag.Bool("version", false, "Show version information")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
if *showVersion {
|
||||||
|
log.Printf("Sub2API %s (commit: %s, built: %s)\n", Version, Commit, Date)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// CLI setup mode
|
||||||
|
if *setupMode {
|
||||||
|
if err := setup.RunCLI(); err != nil {
|
||||||
|
log.Fatalf("Setup failed: %v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if setup is needed
|
||||||
|
if setup.NeedsSetup() {
|
||||||
|
// Check if auto-setup is enabled (for Docker deployment)
|
||||||
|
if setup.AutoSetupEnabled() {
|
||||||
|
log.Println("Auto setup mode enabled...")
|
||||||
|
if err := setup.AutoSetupFromEnv(); err != nil {
|
||||||
|
log.Fatalf("Auto setup failed: %v", err)
|
||||||
|
}
|
||||||
|
// Continue to main server after auto-setup
|
||||||
|
} else {
|
||||||
|
log.Println("First run detected, starting setup wizard...")
|
||||||
|
runSetupServer()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normal server mode
|
||||||
|
runMainServer()
|
||||||
|
}
|
||||||
|
|
||||||
|
func runSetupServer() {
|
||||||
|
r := gin.New()
|
||||||
|
r.Use(middleware.Recovery())
|
||||||
|
r.Use(middleware.CORS(config.CORSConfig{}))
|
||||||
|
r.Use(middleware.SecurityHeaders(config.CSPConfig{Enabled: true, Policy: config.DefaultCSPPolicy}))
|
||||||
|
|
||||||
|
// Register setup routes
|
||||||
|
setup.RegisterRoutes(r)
|
||||||
|
|
||||||
|
// Serve embedded frontend if available
|
||||||
|
if web.HasEmbeddedFrontend() {
|
||||||
|
r.Use(web.ServeEmbeddedFrontend())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get server address from config.yaml or environment variables (SERVER_HOST, SERVER_PORT)
|
||||||
|
// This allows users to run setup on a different address if needed
|
||||||
|
addr := config.GetServerAddress()
|
||||||
|
log.Printf("Setup wizard available at http://%s", addr)
|
||||||
|
log.Println("Complete the setup wizard to configure Sub2API")
|
||||||
|
|
||||||
|
if err := r.Run(addr); err != nil {
|
||||||
|
log.Fatalf("Failed to start setup server: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func runMainServer() {
|
||||||
|
cfg, err := config.Load()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
if cfg.RunMode == config.RunModeSimple {
|
||||||
|
log.Println("⚠️ WARNING: Running in SIMPLE mode - billing and quota checks are DISABLED")
|
||||||
|
}
|
||||||
|
|
||||||
|
buildInfo := handler.BuildInfo{
|
||||||
|
Version: Version,
|
||||||
|
BuildType: BuildType,
|
||||||
|
}
|
||||||
|
|
||||||
|
app, err := initializeApplication(buildInfo)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to initialize application: %v", err)
|
||||||
|
}
|
||||||
|
defer app.Cleanup()
|
||||||
|
|
||||||
|
// 启动服务器
|
||||||
|
go func() {
|
||||||
|
if err := app.Server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
log.Fatalf("Failed to start server: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
log.Printf("Server started on %s", app.Server.Addr)
|
||||||
|
|
||||||
|
// 等待中断信号
|
||||||
|
quit := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||||
|
<-quit
|
||||||
|
|
||||||
|
log.Println("Shutting down server...")
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err := app.Server.Shutdown(ctx); err != nil {
|
||||||
|
log.Fatalf("Server forced to shutdown: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Println("Server exited")
|
||||||
|
}
|
||||||
187
backend/cmd/server/wire.go
Normal file
187
backend/cmd/server/wire.go
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
//go:build wireinject
|
||||||
|
// +build wireinject
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/repository"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
|
||||||
|
"github.com/google/wire"
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Application struct {
|
||||||
|
Server *http.Server
|
||||||
|
Cleanup func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||||
|
wire.Build(
|
||||||
|
// Infrastructure layer ProviderSets
|
||||||
|
config.ProviderSet,
|
||||||
|
|
||||||
|
// Business layer ProviderSets
|
||||||
|
repository.ProviderSet,
|
||||||
|
service.ProviderSet,
|
||||||
|
middleware.ProviderSet,
|
||||||
|
handler.ProviderSet,
|
||||||
|
|
||||||
|
// Server layer ProviderSet
|
||||||
|
server.ProviderSet,
|
||||||
|
|
||||||
|
// BuildInfo provider
|
||||||
|
provideServiceBuildInfo,
|
||||||
|
|
||||||
|
// Cleanup function provider
|
||||||
|
provideCleanup,
|
||||||
|
|
||||||
|
// Application struct
|
||||||
|
wire.Struct(new(Application), "Server", "Cleanup"),
|
||||||
|
)
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
||||||
|
return service.BuildInfo{
|
||||||
|
Version: buildInfo.Version,
|
||||||
|
BuildType: buildInfo.BuildType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func provideCleanup(
|
||||||
|
entClient *ent.Client,
|
||||||
|
rdb *redis.Client,
|
||||||
|
opsMetricsCollector *service.OpsMetricsCollector,
|
||||||
|
opsAggregation *service.OpsAggregationService,
|
||||||
|
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
||||||
|
opsCleanup *service.OpsCleanupService,
|
||||||
|
opsScheduledReport *service.OpsScheduledReportService,
|
||||||
|
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||||
|
tokenRefresh *service.TokenRefreshService,
|
||||||
|
accountExpiry *service.AccountExpiryService,
|
||||||
|
pricing *service.PricingService,
|
||||||
|
emailQueue *service.EmailQueueService,
|
||||||
|
billingCache *service.BillingCacheService,
|
||||||
|
oauth *service.OAuthService,
|
||||||
|
openaiOAuth *service.OpenAIOAuthService,
|
||||||
|
geminiOAuth *service.GeminiOAuthService,
|
||||||
|
antigravityOAuth *service.AntigravityOAuthService,
|
||||||
|
) func() {
|
||||||
|
return func() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
// Cleanup steps in reverse dependency order
|
||||||
|
cleanupSteps := []struct {
|
||||||
|
name string
|
||||||
|
fn func() error
|
||||||
|
}{
|
||||||
|
{"OpsScheduledReportService", func() error {
|
||||||
|
if opsScheduledReport != nil {
|
||||||
|
opsScheduledReport.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsCleanupService", func() error {
|
||||||
|
if opsCleanup != nil {
|
||||||
|
opsCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAlertEvaluatorService", func() error {
|
||||||
|
if opsAlertEvaluator != nil {
|
||||||
|
opsAlertEvaluator.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAggregationService", func() error {
|
||||||
|
if opsAggregation != nil {
|
||||||
|
opsAggregation.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsMetricsCollector", func() error {
|
||||||
|
if opsMetricsCollector != nil {
|
||||||
|
opsMetricsCollector.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SchedulerSnapshotService", func() error {
|
||||||
|
if schedulerSnapshot != nil {
|
||||||
|
schedulerSnapshot.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"TokenRefreshService", func() error {
|
||||||
|
tokenRefresh.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"AccountExpiryService", func() error {
|
||||||
|
accountExpiry.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"PricingService", func() error {
|
||||||
|
pricing.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"EmailQueueService", func() error {
|
||||||
|
emailQueue.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"BillingCacheService", func() error {
|
||||||
|
billingCache.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OAuthService", func() error {
|
||||||
|
oauth.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpenAIOAuthService", func() error {
|
||||||
|
openaiOAuth.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"GeminiOAuthService", func() error {
|
||||||
|
geminiOAuth.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"AntigravityOAuthService", func() error {
|
||||||
|
antigravityOAuth.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"Redis", func() error {
|
||||||
|
return rdb.Close()
|
||||||
|
}},
|
||||||
|
{"Ent", func() error {
|
||||||
|
return entClient.Close()
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, step := range cleanupSteps {
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
// Continue with remaining cleanup steps even if one fails
|
||||||
|
} else {
|
||||||
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if context timed out
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds")
|
||||||
|
default:
|
||||||
|
log.Printf("[Cleanup] All cleanup steps completed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
318
backend/cmd/server/wire_gen.go
Normal file
318
backend/cmd/server/wire_gen.go
Normal file
@@ -0,0 +1,318 @@
|
|||||||
|
// Code generated by Wire. DO NOT EDIT.
|
||||||
|
|
||||||
|
//go:generate go run -mod=mod github.com/google/wire/cmd/wire
|
||||||
|
//go:build !wireinject
|
||||||
|
// +build !wireinject
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/config"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/handler"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/handler/admin"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/repository"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "embed"
|
||||||
|
_ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Injectors from wire.go:
|
||||||
|
|
||||||
|
func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
|
||||||
|
configConfig, err := config.ProvideConfig()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client, err := repository.ProvideEnt(configConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
db, err := repository.ProvideSQLDB(client)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
userRepository := repository.NewUserRepository(client, db)
|
||||||
|
settingRepository := repository.NewSettingRepository(client)
|
||||||
|
settingService := service.NewSettingService(settingRepository, configConfig)
|
||||||
|
redisClient := repository.ProvideRedis(configConfig)
|
||||||
|
emailCache := repository.NewEmailCache(redisClient)
|
||||||
|
emailService := service.NewEmailService(settingRepository, emailCache)
|
||||||
|
turnstileVerifier := repository.NewTurnstileVerifier()
|
||||||
|
turnstileService := service.NewTurnstileService(settingService, turnstileVerifier)
|
||||||
|
emailQueueService := service.ProvideEmailQueueService(emailService)
|
||||||
|
promoCodeRepository := repository.NewPromoCodeRepository(client)
|
||||||
|
billingCache := repository.NewBillingCache(redisClient)
|
||||||
|
userSubscriptionRepository := repository.NewUserSubscriptionRepository(client)
|
||||||
|
billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, configConfig)
|
||||||
|
apiKeyRepository := repository.NewAPIKeyRepository(client)
|
||||||
|
groupRepository := repository.NewGroupRepository(client, db)
|
||||||
|
apiKeyCache := repository.NewAPIKeyCache(redisClient)
|
||||||
|
apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, apiKeyCache, configConfig)
|
||||||
|
apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService)
|
||||||
|
promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||||
|
authService := service.NewAuthService(userRepository, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService)
|
||||||
|
userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator)
|
||||||
|
authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService)
|
||||||
|
userHandler := handler.NewUserHandler(userService)
|
||||||
|
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
|
||||||
|
usageLogRepository := repository.NewUsageLogRepository(client, db)
|
||||||
|
usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator)
|
||||||
|
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
|
||||||
|
redeemCodeRepository := repository.NewRedeemCodeRepository(client)
|
||||||
|
subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService)
|
||||||
|
redeemCache := repository.NewRedeemCache(redisClient)
|
||||||
|
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator)
|
||||||
|
redeemHandler := handler.NewRedeemHandler(redeemService)
|
||||||
|
subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService)
|
||||||
|
dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db)
|
||||||
|
dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig)
|
||||||
|
dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig)
|
||||||
|
timingWheelService := service.ProvideTimingWheelService()
|
||||||
|
dashboardAggregationService := service.ProvideDashboardAggregationService(dashboardAggregationRepository, timingWheelService, configConfig)
|
||||||
|
dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService)
|
||||||
|
accountRepository := repository.NewAccountRepository(client, db)
|
||||||
|
proxyRepository := repository.NewProxyRepository(client, db)
|
||||||
|
proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig)
|
||||||
|
proxyLatencyCache := repository.NewProxyLatencyCache(redisClient)
|
||||||
|
adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator)
|
||||||
|
adminUserHandler := admin.NewUserHandler(adminService)
|
||||||
|
groupHandler := admin.NewGroupHandler(adminService)
|
||||||
|
claudeOAuthClient := repository.NewClaudeOAuthClient()
|
||||||
|
oAuthService := service.NewOAuthService(proxyRepository, claudeOAuthClient)
|
||||||
|
openAIOAuthClient := repository.NewOpenAIOAuthClient()
|
||||||
|
openAIOAuthService := service.NewOpenAIOAuthService(proxyRepository, openAIOAuthClient)
|
||||||
|
geminiOAuthClient := repository.NewGeminiOAuthClient(configConfig)
|
||||||
|
geminiCliCodeAssistClient := repository.NewGeminiCliCodeAssistClient()
|
||||||
|
geminiOAuthService := service.NewGeminiOAuthService(proxyRepository, geminiOAuthClient, geminiCliCodeAssistClient, configConfig)
|
||||||
|
antigravityOAuthService := service.NewAntigravityOAuthService(proxyRepository)
|
||||||
|
geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository)
|
||||||
|
tempUnschedCache := repository.NewTempUnschedCache(redisClient)
|
||||||
|
timeoutCounterCache := repository.NewTimeoutCounterCache(redisClient)
|
||||||
|
geminiTokenCache := repository.NewGeminiTokenCache(redisClient)
|
||||||
|
tokenCacheInvalidator := service.NewCompositeTokenCacheInvalidator(geminiTokenCache)
|
||||||
|
rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService, tokenCacheInvalidator)
|
||||||
|
claudeUsageFetcher := repository.NewClaudeUsageFetcher()
|
||||||
|
antigravityQuotaFetcher := service.NewAntigravityQuotaFetcher(proxyRepository)
|
||||||
|
usageCache := service.NewUsageCache()
|
||||||
|
accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache)
|
||||||
|
geminiTokenProvider := service.NewGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService)
|
||||||
|
gatewayCache := repository.NewGatewayCache(redisClient)
|
||||||
|
antigravityTokenProvider := service.NewAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService)
|
||||||
|
httpUpstream := repository.NewHTTPUpstream(configConfig)
|
||||||
|
antigravityGatewayService := service.NewAntigravityGatewayService(accountRepository, gatewayCache, antigravityTokenProvider, rateLimitService, httpUpstream, settingService)
|
||||||
|
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig)
|
||||||
|
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
|
||||||
|
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
|
||||||
|
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
|
||||||
|
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService)
|
||||||
|
oAuthHandler := admin.NewOAuthHandler(oAuthService)
|
||||||
|
openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService)
|
||||||
|
geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService)
|
||||||
|
antigravityOAuthHandler := admin.NewAntigravityOAuthHandler(antigravityOAuthService)
|
||||||
|
proxyHandler := admin.NewProxyHandler(adminService)
|
||||||
|
adminRedeemHandler := admin.NewRedeemHandler(adminService)
|
||||||
|
promoHandler := admin.NewPromoHandler(promoService)
|
||||||
|
opsRepository := repository.NewOpsRepository(db)
|
||||||
|
schedulerCache := repository.NewSchedulerCache(redisClient)
|
||||||
|
schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db)
|
||||||
|
schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig)
|
||||||
|
pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig)
|
||||||
|
pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
billingService := service.NewBillingService(configConfig, pricingService)
|
||||||
|
identityCache := repository.NewIdentityCache(redisClient)
|
||||||
|
identityService := service.NewIdentityService(identityCache)
|
||||||
|
deferredService := service.ProvideDeferredService(accountRepository, timingWheelService)
|
||||||
|
gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService)
|
||||||
|
openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService)
|
||||||
|
geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig)
|
||||||
|
opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService)
|
||||||
|
settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService)
|
||||||
|
opsHandler := admin.NewOpsHandler(opsService)
|
||||||
|
updateCache := repository.NewUpdateCache(redisClient)
|
||||||
|
gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig)
|
||||||
|
serviceBuildInfo := provideServiceBuildInfo(buildInfo)
|
||||||
|
updateService := service.ProvideUpdateService(updateCache, gitHubReleaseClient, serviceBuildInfo)
|
||||||
|
systemHandler := handler.ProvideSystemHandler(updateService)
|
||||||
|
adminSubscriptionHandler := admin.NewSubscriptionHandler(subscriptionService)
|
||||||
|
adminUsageHandler := admin.NewUsageHandler(usageService, apiKeyService, adminService)
|
||||||
|
userAttributeDefinitionRepository := repository.NewUserAttributeDefinitionRepository(client)
|
||||||
|
userAttributeValueRepository := repository.NewUserAttributeValueRepository(client)
|
||||||
|
userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository)
|
||||||
|
userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService)
|
||||||
|
adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler)
|
||||||
|
gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, configConfig)
|
||||||
|
openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, configConfig)
|
||||||
|
handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo)
|
||||||
|
handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler)
|
||||||
|
jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService)
|
||||||
|
adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService)
|
||||||
|
apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig)
|
||||||
|
engine := server.ProvideRouter(configConfig, handlers, jwtAuthMiddleware, adminAuthMiddleware, apiKeyAuthMiddleware, apiKeyService, subscriptionService, opsService, settingService, redisClient)
|
||||||
|
httpServer := server.ProvideHTTPServer(configConfig, engine)
|
||||||
|
opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig)
|
||||||
|
opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig)
|
||||||
|
opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig)
|
||||||
|
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig)
|
||||||
|
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
|
||||||
|
tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, tokenCacheInvalidator, configConfig)
|
||||||
|
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
|
||||||
|
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService)
|
||||||
|
application := &Application{
|
||||||
|
Server: httpServer,
|
||||||
|
Cleanup: v,
|
||||||
|
}
|
||||||
|
return application, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// wire.go:
|
||||||
|
|
||||||
|
type Application struct {
|
||||||
|
Server *http.Server
|
||||||
|
Cleanup func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo {
|
||||||
|
return service.BuildInfo{
|
||||||
|
Version: buildInfo.Version,
|
||||||
|
BuildType: buildInfo.BuildType,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func provideCleanup(
|
||||||
|
entClient *ent.Client,
|
||||||
|
rdb *redis.Client,
|
||||||
|
opsMetricsCollector *service.OpsMetricsCollector,
|
||||||
|
opsAggregation *service.OpsAggregationService,
|
||||||
|
opsAlertEvaluator *service.OpsAlertEvaluatorService,
|
||||||
|
opsCleanup *service.OpsCleanupService,
|
||||||
|
opsScheduledReport *service.OpsScheduledReportService,
|
||||||
|
schedulerSnapshot *service.SchedulerSnapshotService,
|
||||||
|
tokenRefresh *service.TokenRefreshService,
|
||||||
|
accountExpiry *service.AccountExpiryService,
|
||||||
|
pricing *service.PricingService,
|
||||||
|
emailQueue *service.EmailQueueService,
|
||||||
|
billingCache *service.BillingCacheService,
|
||||||
|
oauth *service.OAuthService,
|
||||||
|
openaiOAuth *service.OpenAIOAuthService,
|
||||||
|
geminiOAuth *service.GeminiOAuthService,
|
||||||
|
antigravityOAuth *service.AntigravityOAuthService,
|
||||||
|
) func() {
|
||||||
|
return func() {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cleanupSteps := []struct {
|
||||||
|
name string
|
||||||
|
fn func() error
|
||||||
|
}{
|
||||||
|
{"OpsScheduledReportService", func() error {
|
||||||
|
if opsScheduledReport != nil {
|
||||||
|
opsScheduledReport.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsCleanupService", func() error {
|
||||||
|
if opsCleanup != nil {
|
||||||
|
opsCleanup.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAlertEvaluatorService", func() error {
|
||||||
|
if opsAlertEvaluator != nil {
|
||||||
|
opsAlertEvaluator.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsAggregationService", func() error {
|
||||||
|
if opsAggregation != nil {
|
||||||
|
opsAggregation.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpsMetricsCollector", func() error {
|
||||||
|
if opsMetricsCollector != nil {
|
||||||
|
opsMetricsCollector.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"SchedulerSnapshotService", func() error {
|
||||||
|
if schedulerSnapshot != nil {
|
||||||
|
schedulerSnapshot.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"TokenRefreshService", func() error {
|
||||||
|
tokenRefresh.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"AccountExpiryService", func() error {
|
||||||
|
accountExpiry.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"PricingService", func() error {
|
||||||
|
pricing.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"EmailQueueService", func() error {
|
||||||
|
emailQueue.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"BillingCacheService", func() error {
|
||||||
|
billingCache.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OAuthService", func() error {
|
||||||
|
oauth.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"OpenAIOAuthService", func() error {
|
||||||
|
openaiOAuth.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"GeminiOAuthService", func() error {
|
||||||
|
geminiOAuth.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"AntigravityOAuthService", func() error {
|
||||||
|
antigravityOAuth.Stop()
|
||||||
|
return nil
|
||||||
|
}},
|
||||||
|
{"Redis", func() error {
|
||||||
|
return rdb.Close()
|
||||||
|
}},
|
||||||
|
{"Ent", func() error {
|
||||||
|
return entClient.Close()
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, step := range cleanupSteps {
|
||||||
|
if err := step.fn(); err != nil {
|
||||||
|
log.Printf("[Cleanup] %s failed: %v", step.name, err)
|
||||||
|
|
||||||
|
} else {
|
||||||
|
log.Printf("[Cleanup] %s succeeded", step.name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds")
|
||||||
|
default:
|
||||||
|
log.Printf("[Cleanup] All cleanup steps completed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
494
backend/ent/account.go
Normal file
494
backend/ent/account.go
Normal file
@@ -0,0 +1,494 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Account is the model entity for the Account schema.
|
||||||
|
type Account struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// DeletedAt holds the value of the "deleted_at" field.
|
||||||
|
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||||
|
// Name holds the value of the "name" field.
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
// Notes holds the value of the "notes" field.
|
||||||
|
Notes *string `json:"notes,omitempty"`
|
||||||
|
// Platform holds the value of the "platform" field.
|
||||||
|
Platform string `json:"platform,omitempty"`
|
||||||
|
// Type holds the value of the "type" field.
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
// Credentials holds the value of the "credentials" field.
|
||||||
|
Credentials map[string]interface{} `json:"credentials,omitempty"`
|
||||||
|
// Extra holds the value of the "extra" field.
|
||||||
|
Extra map[string]interface{} `json:"extra,omitempty"`
|
||||||
|
// ProxyID holds the value of the "proxy_id" field.
|
||||||
|
ProxyID *int64 `json:"proxy_id,omitempty"`
|
||||||
|
// Concurrency holds the value of the "concurrency" field.
|
||||||
|
Concurrency int `json:"concurrency,omitempty"`
|
||||||
|
// Priority holds the value of the "priority" field.
|
||||||
|
Priority int `json:"priority,omitempty"`
|
||||||
|
// RateMultiplier holds the value of the "rate_multiplier" field.
|
||||||
|
RateMultiplier float64 `json:"rate_multiplier,omitempty"`
|
||||||
|
// Status holds the value of the "status" field.
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// ErrorMessage holds the value of the "error_message" field.
|
||||||
|
ErrorMessage *string `json:"error_message,omitempty"`
|
||||||
|
// LastUsedAt holds the value of the "last_used_at" field.
|
||||||
|
LastUsedAt *time.Time `json:"last_used_at,omitempty"`
|
||||||
|
// Account expiration time (NULL means no expiration).
|
||||||
|
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
||||||
|
// Auto pause scheduling when account expires.
|
||||||
|
AutoPauseOnExpired bool `json:"auto_pause_on_expired,omitempty"`
|
||||||
|
// Schedulable holds the value of the "schedulable" field.
|
||||||
|
Schedulable bool `json:"schedulable,omitempty"`
|
||||||
|
// RateLimitedAt holds the value of the "rate_limited_at" field.
|
||||||
|
RateLimitedAt *time.Time `json:"rate_limited_at,omitempty"`
|
||||||
|
// RateLimitResetAt holds the value of the "rate_limit_reset_at" field.
|
||||||
|
RateLimitResetAt *time.Time `json:"rate_limit_reset_at,omitempty"`
|
||||||
|
// OverloadUntil holds the value of the "overload_until" field.
|
||||||
|
OverloadUntil *time.Time `json:"overload_until,omitempty"`
|
||||||
|
// SessionWindowStart holds the value of the "session_window_start" field.
|
||||||
|
SessionWindowStart *time.Time `json:"session_window_start,omitempty"`
|
||||||
|
// SessionWindowEnd holds the value of the "session_window_end" field.
|
||||||
|
SessionWindowEnd *time.Time `json:"session_window_end,omitempty"`
|
||||||
|
// SessionWindowStatus holds the value of the "session_window_status" field.
|
||||||
|
SessionWindowStatus *string `json:"session_window_status,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the AccountQuery when eager-loading is set.
|
||||||
|
Edges AccountEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type AccountEdges struct {
|
||||||
|
// Groups holds the value of the groups edge.
|
||||||
|
Groups []*Group `json:"groups,omitempty"`
|
||||||
|
// Proxy holds the value of the proxy edge.
|
||||||
|
Proxy *Proxy `json:"proxy,omitempty"`
|
||||||
|
// UsageLogs holds the value of the usage_logs edge.
|
||||||
|
UsageLogs []*UsageLog `json:"usage_logs,omitempty"`
|
||||||
|
// AccountGroups holds the value of the account_groups edge.
|
||||||
|
AccountGroups []*AccountGroup `json:"account_groups,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [4]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupsOrErr returns the Groups value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e AccountEdges) GroupsOrErr() ([]*Group, error) {
|
||||||
|
if e.loadedTypes[0] {
|
||||||
|
return e.Groups, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "groups"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyOrErr returns the Proxy value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AccountEdges) ProxyOrErr() (*Proxy, error) {
|
||||||
|
if e.Proxy != nil {
|
||||||
|
return e.Proxy, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: proxy.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "proxy"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageLogsOrErr returns the UsageLogs value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e AccountEdges) UsageLogsOrErr() ([]*UsageLog, error) {
|
||||||
|
if e.loadedTypes[2] {
|
||||||
|
return e.UsageLogs, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "usage_logs"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupsOrErr returns the AccountGroups value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e AccountEdges) AccountGroupsOrErr() ([]*AccountGroup, error) {
|
||||||
|
if e.loadedTypes[3] {
|
||||||
|
return e.AccountGroups, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "account_groups"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*Account) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case account.FieldCredentials, account.FieldExtra:
|
||||||
|
values[i] = new([]byte)
|
||||||
|
case account.FieldAutoPauseOnExpired, account.FieldSchedulable:
|
||||||
|
values[i] = new(sql.NullBool)
|
||||||
|
case account.FieldRateMultiplier:
|
||||||
|
values[i] = new(sql.NullFloat64)
|
||||||
|
case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldPriority:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldSessionWindowStatus:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldExpiresAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the Account fields.
|
||||||
|
func (_m *Account) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case account.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case account.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldDeletedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DeletedAt = new(time.Time)
|
||||||
|
*_m.DeletedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldName:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Name = value.String
|
||||||
|
}
|
||||||
|
case account.FieldNotes:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field notes", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Notes = new(string)
|
||||||
|
*_m.Notes = value.String
|
||||||
|
}
|
||||||
|
case account.FieldPlatform:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field platform", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Platform = value.String
|
||||||
|
}
|
||||||
|
case account.FieldType:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field type", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Type = value.String
|
||||||
|
}
|
||||||
|
case account.FieldCredentials:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field credentials", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.Credentials); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field credentials: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case account.FieldExtra:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field extra", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.Extra); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field extra: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case account.FieldProxyID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field proxy_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ProxyID = new(int64)
|
||||||
|
*_m.ProxyID = value.Int64
|
||||||
|
}
|
||||||
|
case account.FieldConcurrency:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field concurrency", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Concurrency = int(value.Int64)
|
||||||
|
}
|
||||||
|
case account.FieldPriority:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field priority", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Priority = int(value.Int64)
|
||||||
|
}
|
||||||
|
case account.FieldRateMultiplier:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_multiplier", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateMultiplier = value.Float64
|
||||||
|
}
|
||||||
|
case account.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case account.FieldErrorMessage:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field error_message", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ErrorMessage = new(string)
|
||||||
|
*_m.ErrorMessage = value.String
|
||||||
|
}
|
||||||
|
case account.FieldLastUsedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field last_used_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.LastUsedAt = new(time.Time)
|
||||||
|
*_m.LastUsedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldExpiresAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field expires_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ExpiresAt = new(time.Time)
|
||||||
|
*_m.ExpiresAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldAutoPauseOnExpired:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field auto_pause_on_expired", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.AutoPauseOnExpired = value.Bool
|
||||||
|
}
|
||||||
|
case account.FieldSchedulable:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field schedulable", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Schedulable = value.Bool
|
||||||
|
}
|
||||||
|
case account.FieldRateLimitedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limited_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimitedAt = new(time.Time)
|
||||||
|
*_m.RateLimitedAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldRateLimitResetAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_limit_reset_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateLimitResetAt = new(time.Time)
|
||||||
|
*_m.RateLimitResetAt = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldOverloadUntil:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field overload_until", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.OverloadUntil = new(time.Time)
|
||||||
|
*_m.OverloadUntil = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldSessionWindowStart:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field session_window_start", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SessionWindowStart = new(time.Time)
|
||||||
|
*_m.SessionWindowStart = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldSessionWindowEnd:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field session_window_end", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SessionWindowEnd = new(time.Time)
|
||||||
|
*_m.SessionWindowEnd = value.Time
|
||||||
|
}
|
||||||
|
case account.FieldSessionWindowStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field session_window_status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SessionWindowStatus = new(string)
|
||||||
|
*_m.SessionWindowStatus = value.String
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the Account.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *Account) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroups queries the "groups" edge of the Account entity.
|
||||||
|
func (_m *Account) QueryGroups() *GroupQuery {
|
||||||
|
return NewAccountClient(_m.config).QueryGroups(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryProxy queries the "proxy" edge of the Account entity.
|
||||||
|
func (_m *Account) QueryProxy() *ProxyQuery {
|
||||||
|
return NewAccountClient(_m.config).QueryProxy(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageLogs queries the "usage_logs" edge of the Account entity.
|
||||||
|
func (_m *Account) QueryUsageLogs() *UsageLogQuery {
|
||||||
|
return NewAccountClient(_m.config).QueryUsageLogs(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccountGroups queries the "account_groups" edge of the Account entity.
|
||||||
|
func (_m *Account) QueryAccountGroups() *AccountGroupQuery {
|
||||||
|
return NewAccountClient(_m.config).QueryAccountGroups(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this Account.
|
||||||
|
// Note that you need to call Account.Unwrap() before calling this method if this Account
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *Account) Update() *AccountUpdateOne {
|
||||||
|
return NewAccountClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the Account entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *Account) Unwrap() *Account {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: Account is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *Account) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("Account(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.DeletedAt; v != nil {
|
||||||
|
builder.WriteString("deleted_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("name=")
|
||||||
|
builder.WriteString(_m.Name)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Notes; v != nil {
|
||||||
|
builder.WriteString("notes=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("platform=")
|
||||||
|
builder.WriteString(_m.Platform)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("type=")
|
||||||
|
builder.WriteString(_m.Type)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("credentials=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Credentials))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("extra=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Extra))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ProxyID; v != nil {
|
||||||
|
builder.WriteString("proxy_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("concurrency=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Concurrency))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("priority=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Priority))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("rate_multiplier=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ErrorMessage; v != nil {
|
||||||
|
builder.WriteString("error_message=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.LastUsedAt; v != nil {
|
||||||
|
builder.WriteString("last_used_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ExpiresAt; v != nil {
|
||||||
|
builder.WriteString("expires_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("auto_pause_on_expired=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.AutoPauseOnExpired))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("schedulable=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Schedulable))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.RateLimitedAt; v != nil {
|
||||||
|
builder.WriteString("rate_limited_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.RateLimitResetAt; v != nil {
|
||||||
|
builder.WriteString("rate_limit_reset_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.OverloadUntil; v != nil {
|
||||||
|
builder.WriteString("overload_until=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SessionWindowStart; v != nil {
|
||||||
|
builder.WriteString("session_window_start=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SessionWindowEnd; v != nil {
|
||||||
|
builder.WriteString("session_window_end=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.SessionWindowStatus; v != nil {
|
||||||
|
builder.WriteString("session_window_status=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accounts is a parsable slice of Account.
|
||||||
|
type Accounts []*Account
|
||||||
392
backend/ent/account/account.go
Normal file
392
backend/ent/account/account.go
Normal file
@@ -0,0 +1,392 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package account
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the account type in the database.
|
||||||
|
Label = "account"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||||
|
FieldDeletedAt = "deleted_at"
|
||||||
|
// FieldName holds the string denoting the name field in the database.
|
||||||
|
FieldName = "name"
|
||||||
|
// FieldNotes holds the string denoting the notes field in the database.
|
||||||
|
FieldNotes = "notes"
|
||||||
|
// FieldPlatform holds the string denoting the platform field in the database.
|
||||||
|
FieldPlatform = "platform"
|
||||||
|
// FieldType holds the string denoting the type field in the database.
|
||||||
|
FieldType = "type"
|
||||||
|
// FieldCredentials holds the string denoting the credentials field in the database.
|
||||||
|
FieldCredentials = "credentials"
|
||||||
|
// FieldExtra holds the string denoting the extra field in the database.
|
||||||
|
FieldExtra = "extra"
|
||||||
|
// FieldProxyID holds the string denoting the proxy_id field in the database.
|
||||||
|
FieldProxyID = "proxy_id"
|
||||||
|
// FieldConcurrency holds the string denoting the concurrency field in the database.
|
||||||
|
FieldConcurrency = "concurrency"
|
||||||
|
// FieldPriority holds the string denoting the priority field in the database.
|
||||||
|
FieldPriority = "priority"
|
||||||
|
// FieldRateMultiplier holds the string denoting the rate_multiplier field in the database.
|
||||||
|
FieldRateMultiplier = "rate_multiplier"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldErrorMessage holds the string denoting the error_message field in the database.
|
||||||
|
FieldErrorMessage = "error_message"
|
||||||
|
// FieldLastUsedAt holds the string denoting the last_used_at field in the database.
|
||||||
|
FieldLastUsedAt = "last_used_at"
|
||||||
|
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
||||||
|
FieldExpiresAt = "expires_at"
|
||||||
|
// FieldAutoPauseOnExpired holds the string denoting the auto_pause_on_expired field in the database.
|
||||||
|
FieldAutoPauseOnExpired = "auto_pause_on_expired"
|
||||||
|
// FieldSchedulable holds the string denoting the schedulable field in the database.
|
||||||
|
FieldSchedulable = "schedulable"
|
||||||
|
// FieldRateLimitedAt holds the string denoting the rate_limited_at field in the database.
|
||||||
|
FieldRateLimitedAt = "rate_limited_at"
|
||||||
|
// FieldRateLimitResetAt holds the string denoting the rate_limit_reset_at field in the database.
|
||||||
|
FieldRateLimitResetAt = "rate_limit_reset_at"
|
||||||
|
// FieldOverloadUntil holds the string denoting the overload_until field in the database.
|
||||||
|
FieldOverloadUntil = "overload_until"
|
||||||
|
// FieldSessionWindowStart holds the string denoting the session_window_start field in the database.
|
||||||
|
FieldSessionWindowStart = "session_window_start"
|
||||||
|
// FieldSessionWindowEnd holds the string denoting the session_window_end field in the database.
|
||||||
|
FieldSessionWindowEnd = "session_window_end"
|
||||||
|
// FieldSessionWindowStatus holds the string denoting the session_window_status field in the database.
|
||||||
|
FieldSessionWindowStatus = "session_window_status"
|
||||||
|
// EdgeGroups holds the string denoting the groups edge name in mutations.
|
||||||
|
EdgeGroups = "groups"
|
||||||
|
// EdgeProxy holds the string denoting the proxy edge name in mutations.
|
||||||
|
EdgeProxy = "proxy"
|
||||||
|
// EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations.
|
||||||
|
EdgeUsageLogs = "usage_logs"
|
||||||
|
// EdgeAccountGroups holds the string denoting the account_groups edge name in mutations.
|
||||||
|
EdgeAccountGroups = "account_groups"
|
||||||
|
// Table holds the table name of the account in the database.
|
||||||
|
Table = "accounts"
|
||||||
|
// GroupsTable is the table that holds the groups relation/edge. The primary key declared below.
|
||||||
|
GroupsTable = "account_groups"
|
||||||
|
// GroupsInverseTable is the table name for the Group entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||||
|
GroupsInverseTable = "groups"
|
||||||
|
// ProxyTable is the table that holds the proxy relation/edge.
|
||||||
|
ProxyTable = "accounts"
|
||||||
|
// ProxyInverseTable is the table name for the Proxy entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "proxy" package.
|
||||||
|
ProxyInverseTable = "proxies"
|
||||||
|
// ProxyColumn is the table column denoting the proxy relation/edge.
|
||||||
|
ProxyColumn = "proxy_id"
|
||||||
|
// UsageLogsTable is the table that holds the usage_logs relation/edge.
|
||||||
|
UsageLogsTable = "usage_logs"
|
||||||
|
// UsageLogsInverseTable is the table name for the UsageLog entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "usagelog" package.
|
||||||
|
UsageLogsInverseTable = "usage_logs"
|
||||||
|
// UsageLogsColumn is the table column denoting the usage_logs relation/edge.
|
||||||
|
UsageLogsColumn = "account_id"
|
||||||
|
// AccountGroupsTable is the table that holds the account_groups relation/edge.
|
||||||
|
AccountGroupsTable = "account_groups"
|
||||||
|
// AccountGroupsInverseTable is the table name for the AccountGroup entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "accountgroup" package.
|
||||||
|
AccountGroupsInverseTable = "account_groups"
|
||||||
|
// AccountGroupsColumn is the table column denoting the account_groups relation/edge.
|
||||||
|
AccountGroupsColumn = "account_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for account fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldDeletedAt,
|
||||||
|
FieldName,
|
||||||
|
FieldNotes,
|
||||||
|
FieldPlatform,
|
||||||
|
FieldType,
|
||||||
|
FieldCredentials,
|
||||||
|
FieldExtra,
|
||||||
|
FieldProxyID,
|
||||||
|
FieldConcurrency,
|
||||||
|
FieldPriority,
|
||||||
|
FieldRateMultiplier,
|
||||||
|
FieldStatus,
|
||||||
|
FieldErrorMessage,
|
||||||
|
FieldLastUsedAt,
|
||||||
|
FieldExpiresAt,
|
||||||
|
FieldAutoPauseOnExpired,
|
||||||
|
FieldSchedulable,
|
||||||
|
FieldRateLimitedAt,
|
||||||
|
FieldRateLimitResetAt,
|
||||||
|
FieldOverloadUntil,
|
||||||
|
FieldSessionWindowStart,
|
||||||
|
FieldSessionWindowEnd,
|
||||||
|
FieldSessionWindowStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// GroupsPrimaryKey and GroupsColumn2 are the table columns denoting the
|
||||||
|
// primary key for the groups relation (M2M).
|
||||||
|
GroupsPrimaryKey = []string{"account_id", "group_id"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the variables below are initialized by the runtime
|
||||||
|
// package on the initialization of the application. Therefore,
|
||||||
|
// it should be imported in the main as follows:
|
||||||
|
//
|
||||||
|
// import _ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
var (
|
||||||
|
Hooks [1]ent.Hook
|
||||||
|
Interceptors [1]ent.Interceptor
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
NameValidator func(string) error
|
||||||
|
// PlatformValidator is a validator for the "platform" field. It is called by the builders before save.
|
||||||
|
PlatformValidator func(string) error
|
||||||
|
// TypeValidator is a validator for the "type" field. It is called by the builders before save.
|
||||||
|
TypeValidator func(string) error
|
||||||
|
// DefaultCredentials holds the default value on creation for the "credentials" field.
|
||||||
|
DefaultCredentials func() map[string]interface{}
|
||||||
|
// DefaultExtra holds the default value on creation for the "extra" field.
|
||||||
|
DefaultExtra func() map[string]interface{}
|
||||||
|
// DefaultConcurrency holds the default value on creation for the "concurrency" field.
|
||||||
|
DefaultConcurrency int
|
||||||
|
// DefaultPriority holds the default value on creation for the "priority" field.
|
||||||
|
DefaultPriority int
|
||||||
|
// DefaultRateMultiplier holds the default value on creation for the "rate_multiplier" field.
|
||||||
|
DefaultRateMultiplier float64
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
// DefaultAutoPauseOnExpired holds the default value on creation for the "auto_pause_on_expired" field.
|
||||||
|
DefaultAutoPauseOnExpired bool
|
||||||
|
// DefaultSchedulable holds the default value on creation for the "schedulable" field.
|
||||||
|
DefaultSchedulable bool
|
||||||
|
// SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save.
|
||||||
|
SessionWindowStatusValidator func(string) error
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the Account queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDeletedAt orders the results by the deleted_at field.
|
||||||
|
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByName orders the results by the name field.
|
||||||
|
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByNotes orders the results by the notes field.
|
||||||
|
func ByNotes(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldNotes, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPlatform orders the results by the platform field.
|
||||||
|
func ByPlatform(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPlatform, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByType orders the results by the type field.
|
||||||
|
func ByType(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldType, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProxyID orders the results by the proxy_id field.
|
||||||
|
func ByProxyID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldProxyID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByConcurrency orders the results by the concurrency field.
|
||||||
|
func ByConcurrency(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldConcurrency, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPriority orders the results by the priority field.
|
||||||
|
func ByPriority(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPriority, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateMultiplier orders the results by the rate_multiplier field.
|
||||||
|
func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByErrorMessage orders the results by the error_message field.
|
||||||
|
func ByErrorMessage(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldErrorMessage, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByLastUsedAt orders the results by the last_used_at field.
|
||||||
|
func ByLastUsedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldLastUsedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByExpiresAt orders the results by the expires_at field.
|
||||||
|
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAutoPauseOnExpired orders the results by the auto_pause_on_expired field.
|
||||||
|
func ByAutoPauseOnExpired(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldAutoPauseOnExpired, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySchedulable orders the results by the schedulable field.
|
||||||
|
func BySchedulable(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSchedulable, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateLimitedAt orders the results by the rate_limited_at field.
|
||||||
|
func ByRateLimitedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimitedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateLimitResetAt orders the results by the rate_limit_reset_at field.
|
||||||
|
func ByRateLimitResetAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateLimitResetAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByOverloadUntil orders the results by the overload_until field.
|
||||||
|
func ByOverloadUntil(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldOverloadUntil, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySessionWindowStart orders the results by the session_window_start field.
|
||||||
|
func BySessionWindowStart(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSessionWindowStart, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySessionWindowEnd orders the results by the session_window_end field.
|
||||||
|
func BySessionWindowEnd(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSessionWindowEnd, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySessionWindowStatus orders the results by the session_window_status field.
|
||||||
|
func BySessionWindowStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSessionWindowStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupsCount orders the results by groups count.
|
||||||
|
func ByGroupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newGroupsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroups orders the results by groups terms.
|
||||||
|
func ByGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newGroupsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProxyField orders the results by proxy field.
|
||||||
|
func ByProxyField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newProxyStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogsCount orders the results by usage_logs count.
|
||||||
|
func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogs orders the results by usage_logs terms.
|
||||||
|
func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountGroupsCount orders the results by account_groups count.
|
||||||
|
func ByAccountGroupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newAccountGroupsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountGroups orders the results by account_groups terms.
|
||||||
|
func ByAccountGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAccountGroupsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newGroupsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(GroupsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2M, false, GroupsTable, GroupsPrimaryKey...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newProxyStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(ProxyInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, ProxyTable, ProxyColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newUsageLogsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UsageLogsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newAccountGroupsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(AccountGroupsInverseTable, AccountGroupsColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
1413
backend/ent/account/where.go
Normal file
1413
backend/ent/account/where.go
Normal file
File diff suppressed because it is too large
Load Diff
2296
backend/ent/account_create.go
Normal file
2296
backend/ent/account_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/account_delete.go
Normal file
88
backend/ent/account_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountDelete is the builder for deleting a Account entity.
|
||||||
|
type AccountDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AccountMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountDelete builder.
|
||||||
|
func (_d *AccountDelete) Where(ps ...predicate.Account) *AccountDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *AccountDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AccountDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *AccountDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(account.Table, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountDeleteOne is the builder for deleting a single Account entity.
|
||||||
|
type AccountDeleteOne struct {
|
||||||
|
_d *AccountDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountDelete builder.
|
||||||
|
func (_d *AccountDeleteOne) Where(ps ...predicate.Account) *AccountDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *AccountDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{account.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AccountDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
900
backend/ent/account_query.go
Normal file
900
backend/ent/account_query.go
Normal file
@@ -0,0 +1,900 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountQuery is the builder for querying Account entities.
|
||||||
|
type AccountQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []account.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.Account
|
||||||
|
withGroups *GroupQuery
|
||||||
|
withProxy *ProxyQuery
|
||||||
|
withUsageLogs *UsageLogQuery
|
||||||
|
withAccountGroups *AccountGroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the AccountQuery builder.
|
||||||
|
func (_q *AccountQuery) Where(ps ...predicate.Account) *AccountQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *AccountQuery) Limit(limit int) *AccountQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *AccountQuery) Offset(offset int) *AccountQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *AccountQuery) Unique(unique bool) *AccountQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *AccountQuery) Order(o ...account.OrderOption) *AccountQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroups chains the current query on the "groups" edge.
|
||||||
|
func (_q *AccountQuery) QueryGroups() *GroupQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(account.Table, account.FieldID, selector),
|
||||||
|
sqlgraph.To(group.Table, group.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2M, false, account.GroupsTable, account.GroupsPrimaryKey...),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryProxy chains the current query on the "proxy" edge.
|
||||||
|
func (_q *AccountQuery) QueryProxy() *ProxyQuery {
|
||||||
|
query := (&ProxyClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(account.Table, account.FieldID, selector),
|
||||||
|
sqlgraph.To(proxy.Table, proxy.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, account.ProxyTable, account.ProxyColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageLogs chains the current query on the "usage_logs" edge.
|
||||||
|
func (_q *AccountQuery) QueryUsageLogs() *UsageLogQuery {
|
||||||
|
query := (&UsageLogClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(account.Table, account.FieldID, selector),
|
||||||
|
sqlgraph.To(usagelog.Table, usagelog.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, account.UsageLogsTable, account.UsageLogsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccountGroups chains the current query on the "account_groups" edge.
|
||||||
|
func (_q *AccountQuery) QueryAccountGroups() *AccountGroupQuery {
|
||||||
|
query := (&AccountGroupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(account.Table, account.FieldID, selector),
|
||||||
|
sqlgraph.To(accountgroup.Table, accountgroup.AccountColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, true, account.AccountGroupsTable, account.AccountGroupsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first Account entity from the query.
|
||||||
|
// Returns a *NotFoundError when no Account was found.
|
||||||
|
func (_q *AccountQuery) First(ctx context.Context) (*Account, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{account.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) FirstX(ctx context.Context) *Account {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first Account ID from the query.
|
||||||
|
// Returns a *NotFoundError when no Account ID was found.
|
||||||
|
func (_q *AccountQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{account.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single Account entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one Account entity is found.
|
||||||
|
// Returns a *NotFoundError when no Account entities are found.
|
||||||
|
func (_q *AccountQuery) Only(ctx context.Context) (*Account, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{account.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{account.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) OnlyX(ctx context.Context) *Account {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only Account ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one Account ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *AccountQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{account.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{account.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of Accounts.
|
||||||
|
func (_q *AccountQuery) All(ctx context.Context) ([]*Account, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*Account, *AccountQuery]()
|
||||||
|
return withInterceptors[[]*Account](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) AllX(ctx context.Context) []*Account {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of Account IDs.
|
||||||
|
func (_q *AccountQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(account.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *AccountQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*AccountQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *AccountQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *AccountQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the AccountQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *AccountQuery) Clone() *AccountQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &AccountQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]account.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.Account{}, _q.predicates...),
|
||||||
|
withGroups: _q.withGroups.Clone(),
|
||||||
|
withProxy: _q.withProxy.Clone(),
|
||||||
|
withUsageLogs: _q.withUsageLogs.Clone(),
|
||||||
|
withAccountGroups: _q.withAccountGroups.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGroups tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "groups" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountQuery) WithGroups(opts ...func(*GroupQuery)) *AccountQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withGroups = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithProxy tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "proxy" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountQuery) WithProxy(opts ...func(*ProxyQuery)) *AccountQuery {
|
||||||
|
query := (&ProxyClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withProxy = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUsageLogs tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "usage_logs" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountQuery) WithUsageLogs(opts ...func(*UsageLogQuery)) *AccountQuery {
|
||||||
|
query := (&UsageLogClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUsageLogs = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAccountGroups tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "account_groups" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountQuery) WithAccountGroups(opts ...func(*AccountGroupQuery)) *AccountQuery {
|
||||||
|
query := (&AccountGroupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withAccountGroups = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.Account.Query().
|
||||||
|
// GroupBy(account.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AccountQuery) GroupBy(field string, fields ...string) *AccountGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &AccountGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = account.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.Account.Query().
|
||||||
|
// Select(account.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AccountQuery) Select(fields ...string) *AccountSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &AccountSelect{AccountQuery: _q}
|
||||||
|
sbuild.label = account.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a AccountSelect configured with the given aggregations.
|
||||||
|
func (_q *AccountQuery) Aggregate(fns ...AggregateFunc) *AccountSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !account.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Account, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*Account{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [4]bool{
|
||||||
|
_q.withGroups != nil,
|
||||||
|
_q.withProxy != nil,
|
||||||
|
_q.withUsageLogs != nil,
|
||||||
|
_q.withAccountGroups != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*Account).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &Account{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withGroups; query != nil {
|
||||||
|
if err := _q.loadGroups(ctx, query, nodes,
|
||||||
|
func(n *Account) { n.Edges.Groups = []*Group{} },
|
||||||
|
func(n *Account, e *Group) { n.Edges.Groups = append(n.Edges.Groups, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withProxy; query != nil {
|
||||||
|
if err := _q.loadProxy(ctx, query, nodes, nil,
|
||||||
|
func(n *Account, e *Proxy) { n.Edges.Proxy = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withUsageLogs; query != nil {
|
||||||
|
if err := _q.loadUsageLogs(ctx, query, nodes,
|
||||||
|
func(n *Account) { n.Edges.UsageLogs = []*UsageLog{} },
|
||||||
|
func(n *Account, e *UsageLog) { n.Edges.UsageLogs = append(n.Edges.UsageLogs, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withAccountGroups; query != nil {
|
||||||
|
if err := _q.loadAccountGroups(ctx, query, nodes,
|
||||||
|
func(n *Account) { n.Edges.AccountGroups = []*AccountGroup{} },
|
||||||
|
func(n *Account, e *AccountGroup) { n.Edges.AccountGroups = append(n.Edges.AccountGroups, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) loadGroups(ctx context.Context, query *GroupQuery, nodes []*Account, init func(*Account), assign func(*Account, *Group)) error {
|
||||||
|
edgeIDs := make([]driver.Value, len(nodes))
|
||||||
|
byID := make(map[int64]*Account)
|
||||||
|
nids := make(map[int64]map[*Account]struct{})
|
||||||
|
for i, node := range nodes {
|
||||||
|
edgeIDs[i] = node.ID
|
||||||
|
byID[node.ID] = node
|
||||||
|
if init != nil {
|
||||||
|
init(node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
query.Where(func(s *sql.Selector) {
|
||||||
|
joinT := sql.Table(account.GroupsTable)
|
||||||
|
s.Join(joinT).On(s.C(group.FieldID), joinT.C(account.GroupsPrimaryKey[1]))
|
||||||
|
s.Where(sql.InValues(joinT.C(account.GroupsPrimaryKey[0]), edgeIDs...))
|
||||||
|
columns := s.SelectedColumns()
|
||||||
|
s.Select(joinT.C(account.GroupsPrimaryKey[0]))
|
||||||
|
s.AppendSelect(columns...)
|
||||||
|
s.SetDistinct(false)
|
||||||
|
})
|
||||||
|
if err := query.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||||
|
return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) {
|
||||||
|
assign := spec.Assign
|
||||||
|
values := spec.ScanValues
|
||||||
|
spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
values, err := values(columns[1:])
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return append([]any{new(sql.NullInt64)}, values...), nil
|
||||||
|
}
|
||||||
|
spec.Assign = func(columns []string, values []any) error {
|
||||||
|
outValue := values[0].(*sql.NullInt64).Int64
|
||||||
|
inValue := values[1].(*sql.NullInt64).Int64
|
||||||
|
if nids[inValue] == nil {
|
||||||
|
nids[inValue] = map[*Account]struct{}{byID[outValue]: {}}
|
||||||
|
return assign(columns[1:], values[1:])
|
||||||
|
}
|
||||||
|
nids[inValue][byID[outValue]] = struct{}{}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
neighbors, err := withInterceptors[[]*Group](ctx, query, qr, query.inters)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected "groups" node returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for kn := range nodes {
|
||||||
|
assign(kn, n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AccountQuery) loadProxy(ctx context.Context, query *ProxyQuery, nodes []*Account, init func(*Account), assign func(*Account, *Proxy)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*Account)
|
||||||
|
for i := range nodes {
|
||||||
|
if nodes[i].ProxyID == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fk := *nodes[i].ProxyID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(proxy.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "proxy_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AccountQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, nodes []*Account, init func(*Account), assign func(*Account, *UsageLog)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*Account)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(usagelog.FieldAccountID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.UsageLog(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(account.UsageLogsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.AccountID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "account_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AccountQuery) loadAccountGroups(ctx context.Context, query *AccountGroupQuery, nodes []*Account, init func(*Account), assign func(*Account, *AccountGroup)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*Account)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(accountgroup.FieldAccountID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(account.AccountGroupsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.AccountID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "account_id" returned %v for node %v`, fk, n)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(account.Table, account.Columns, sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, account.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != account.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withProxy != nil {
|
||||||
|
_spec.Node.AddColumnOnce(account.FieldProxyID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(account.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = account.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AccountQuery) ForUpdate(opts ...sql.LockOption) *AccountQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AccountQuery) ForShare(opts ...sql.LockOption) *AccountQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupBy is the group-by builder for Account entities.
|
||||||
|
type AccountGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *AccountQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *AccountGroupBy) Aggregate(fns ...AggregateFunc) *AccountGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *AccountGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AccountQuery, *AccountGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *AccountGroupBy) sqlScan(ctx context.Context, root *AccountQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountSelect is the builder for selecting fields of Account entities.
|
||||||
|
type AccountSelect struct {
|
||||||
|
*AccountQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *AccountSelect) Aggregate(fns ...AggregateFunc) *AccountSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *AccountSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AccountQuery, *AccountSelect](ctx, _s.AccountQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *AccountSelect) sqlScan(ctx context.Context, root *AccountQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
1735
backend/ent/account_update.go
Normal file
1735
backend/ent/account_update.go
Normal file
File diff suppressed because it is too large
Load Diff
176
backend/ent/accountgroup.go
Normal file
176
backend/ent/accountgroup.go
Normal file
@@ -0,0 +1,176 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroup is the model entity for the AccountGroup schema.
|
||||||
|
type AccountGroup struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// AccountID holds the value of the "account_id" field.
|
||||||
|
AccountID int64 `json:"account_id,omitempty"`
|
||||||
|
// GroupID holds the value of the "group_id" field.
|
||||||
|
GroupID int64 `json:"group_id,omitempty"`
|
||||||
|
// Priority holds the value of the "priority" field.
|
||||||
|
Priority int `json:"priority,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the AccountGroupQuery when eager-loading is set.
|
||||||
|
Edges AccountGroupEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type AccountGroupEdges struct {
|
||||||
|
// Account holds the value of the account edge.
|
||||||
|
Account *Account `json:"account,omitempty"`
|
||||||
|
// Group holds the value of the group edge.
|
||||||
|
Group *Group `json:"group,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [2]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountOrErr returns the Account value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AccountGroupEdges) AccountOrErr() (*Account, error) {
|
||||||
|
if e.Account != nil {
|
||||||
|
return e.Account, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: account.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "account"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupOrErr returns the Group value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e AccountGroupEdges) GroupOrErr() (*Group, error) {
|
||||||
|
if e.Group != nil {
|
||||||
|
return e.Group, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: group.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "group"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*AccountGroup) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case accountgroup.FieldAccountID, accountgroup.FieldGroupID, accountgroup.FieldPriority:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case accountgroup.FieldCreatedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the AccountGroup fields.
|
||||||
|
func (_m *AccountGroup) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case accountgroup.FieldAccountID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field account_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.AccountID = value.Int64
|
||||||
|
}
|
||||||
|
case accountgroup.FieldGroupID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field group_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.GroupID = value.Int64
|
||||||
|
}
|
||||||
|
case accountgroup.FieldPriority:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field priority", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Priority = int(value.Int64)
|
||||||
|
}
|
||||||
|
case accountgroup.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the AccountGroup.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *AccountGroup) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccount queries the "account" edge of the AccountGroup entity.
|
||||||
|
func (_m *AccountGroup) QueryAccount() *AccountQuery {
|
||||||
|
return NewAccountGroupClient(_m.config).QueryAccount(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup queries the "group" edge of the AccountGroup entity.
|
||||||
|
func (_m *AccountGroup) QueryGroup() *GroupQuery {
|
||||||
|
return NewAccountGroupClient(_m.config).QueryGroup(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this AccountGroup.
|
||||||
|
// Note that you need to call AccountGroup.Unwrap() before calling this method if this AccountGroup
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *AccountGroup) Update() *AccountGroupUpdateOne {
|
||||||
|
return NewAccountGroupClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the AccountGroup entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *AccountGroup) Unwrap() *AccountGroup {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: AccountGroup is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *AccountGroup) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("AccountGroup(")
|
||||||
|
builder.WriteString("account_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.AccountID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("group_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.GroupID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("priority=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Priority))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroups is a parsable slice of AccountGroup.
|
||||||
|
type AccountGroups []*AccountGroup
|
||||||
123
backend/ent/accountgroup/accountgroup.go
Normal file
123
backend/ent/accountgroup/accountgroup.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package accountgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the accountgroup type in the database.
|
||||||
|
Label = "account_group"
|
||||||
|
// FieldAccountID holds the string denoting the account_id field in the database.
|
||||||
|
FieldAccountID = "account_id"
|
||||||
|
// FieldGroupID holds the string denoting the group_id field in the database.
|
||||||
|
FieldGroupID = "group_id"
|
||||||
|
// FieldPriority holds the string denoting the priority field in the database.
|
||||||
|
FieldPriority = "priority"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// EdgeAccount holds the string denoting the account edge name in mutations.
|
||||||
|
EdgeAccount = "account"
|
||||||
|
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||||
|
EdgeGroup = "group"
|
||||||
|
// AccountFieldID holds the string denoting the ID field of the Account.
|
||||||
|
AccountFieldID = "id"
|
||||||
|
// GroupFieldID holds the string denoting the ID field of the Group.
|
||||||
|
GroupFieldID = "id"
|
||||||
|
// Table holds the table name of the accountgroup in the database.
|
||||||
|
Table = "account_groups"
|
||||||
|
// AccountTable is the table that holds the account relation/edge.
|
||||||
|
AccountTable = "account_groups"
|
||||||
|
// AccountInverseTable is the table name for the Account entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "account" package.
|
||||||
|
AccountInverseTable = "accounts"
|
||||||
|
// AccountColumn is the table column denoting the account relation/edge.
|
||||||
|
AccountColumn = "account_id"
|
||||||
|
// GroupTable is the table that holds the group relation/edge.
|
||||||
|
GroupTable = "account_groups"
|
||||||
|
// GroupInverseTable is the table name for the Group entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||||
|
GroupInverseTable = "groups"
|
||||||
|
// GroupColumn is the table column denoting the group relation/edge.
|
||||||
|
GroupColumn = "group_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for accountgroup fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldAccountID,
|
||||||
|
FieldGroupID,
|
||||||
|
FieldPriority,
|
||||||
|
FieldCreatedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultPriority holds the default value on creation for the "priority" field.
|
||||||
|
DefaultPriority int
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the AccountGroup queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByAccountID orders the results by the account_id field.
|
||||||
|
func ByAccountID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldAccountID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupID orders the results by the group_id field.
|
||||||
|
func ByGroupID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldGroupID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPriority orders the results by the priority field.
|
||||||
|
func ByPriority(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPriority, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountField orders the results by account field.
|
||||||
|
func ByAccountField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAccountStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupField orders the results by group field.
|
||||||
|
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newAccountStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, AccountColumn),
|
||||||
|
sqlgraph.To(AccountInverseTable, AccountFieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, AccountTable, AccountColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newGroupStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, GroupColumn),
|
||||||
|
sqlgraph.To(GroupInverseTable, GroupFieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
212
backend/ent/accountgroup/where.go
Normal file
212
backend/ent/accountgroup/where.go
Normal file
@@ -0,0 +1,212 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package accountgroup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountID applies equality check predicate on the "account_id" field. It's identical to AccountIDEQ.
|
||||||
|
func AccountID(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldAccountID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ.
|
||||||
|
func GroupID(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Priority applies equality check predicate on the "priority" field. It's identical to PriorityEQ.
|
||||||
|
func Priority(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountIDEQ applies the EQ predicate on the "account_id" field.
|
||||||
|
func AccountIDEQ(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldAccountID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountIDNEQ applies the NEQ predicate on the "account_id" field.
|
||||||
|
func AccountIDNEQ(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNEQ(FieldAccountID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountIDIn applies the In predicate on the "account_id" field.
|
||||||
|
func AccountIDIn(vs ...int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldIn(FieldAccountID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountIDNotIn applies the NotIn predicate on the "account_id" field.
|
||||||
|
func AccountIDNotIn(vs ...int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNotIn(FieldAccountID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDEQ applies the EQ predicate on the "group_id" field.
|
||||||
|
func GroupIDEQ(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNEQ applies the NEQ predicate on the "group_id" field.
|
||||||
|
func GroupIDNEQ(v int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDIn applies the In predicate on the "group_id" field.
|
||||||
|
func GroupIDIn(vs ...int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldIn(FieldGroupID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNotIn applies the NotIn predicate on the "group_id" field.
|
||||||
|
func GroupIDNotIn(vs ...int64) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNotIn(FieldGroupID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityEQ applies the EQ predicate on the "priority" field.
|
||||||
|
func PriorityEQ(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityNEQ applies the NEQ predicate on the "priority" field.
|
||||||
|
func PriorityNEQ(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNEQ(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityIn applies the In predicate on the "priority" field.
|
||||||
|
func PriorityIn(vs ...int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldIn(FieldPriority, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityNotIn applies the NotIn predicate on the "priority" field.
|
||||||
|
func PriorityNotIn(vs ...int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNotIn(FieldPriority, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityGT applies the GT predicate on the "priority" field.
|
||||||
|
func PriorityGT(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldGT(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityGTE applies the GTE predicate on the "priority" field.
|
||||||
|
func PriorityGTE(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldGTE(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityLT applies the LT predicate on the "priority" field.
|
||||||
|
func PriorityLT(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldLT(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PriorityLTE applies the LTE predicate on the "priority" field.
|
||||||
|
func PriorityLTE(v int) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldLTE(FieldPriority, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAccount applies the HasEdge predicate on the "account" edge.
|
||||||
|
func HasAccount() predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, AccountColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, AccountTable, AccountColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAccountWith applies the HasEdge predicate on the "account" edge with a given conditions (other predicates).
|
||||||
|
func HasAccountWith(preds ...predicate.Account) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
step := newAccountStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGroup applies the HasEdge predicate on the "group" edge.
|
||||||
|
func HasGroup() predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, GroupColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, GroupTable, GroupColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||||
|
func HasGroupWith(preds ...predicate.Group) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(func(s *sql.Selector) {
|
||||||
|
step := newGroupStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.AccountGroup) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.AccountGroup) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.AccountGroup) predicate.AccountGroup {
|
||||||
|
return predicate.AccountGroup(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
653
backend/ent/accountgroup_create.go
Normal file
653
backend/ent/accountgroup_create.go
Normal file
@@ -0,0 +1,653 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroupCreate is the builder for creating a AccountGroup entity.
|
||||||
|
type AccountGroupCreate struct {
|
||||||
|
config
|
||||||
|
mutation *AccountGroupMutation
|
||||||
|
hooks []Hook
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (_c *AccountGroupCreate) SetAccountID(v int64) *AccountGroupCreate {
|
||||||
|
_c.mutation.SetAccountID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_c *AccountGroupCreate) SetGroupID(v int64) *AccountGroupCreate {
|
||||||
|
_c.mutation.SetGroupID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (_c *AccountGroupCreate) SetPriority(v int) *AccountGroupCreate {
|
||||||
|
_c.mutation.SetPriority(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePriority sets the "priority" field if the given value is not nil.
|
||||||
|
func (_c *AccountGroupCreate) SetNillablePriority(v *int) *AccountGroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetPriority(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCreatedAt sets the "created_at" field.
|
||||||
|
func (_c *AccountGroupCreate) SetCreatedAt(v time.Time) *AccountGroupCreate {
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||||
|
func (_c *AccountGroupCreate) SetNillableCreatedAt(v *time.Time) *AccountGroupCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetCreatedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccount sets the "account" edge to the Account entity.
|
||||||
|
func (_c *AccountGroupCreate) SetAccount(v *Account) *AccountGroupCreate {
|
||||||
|
return _c.SetAccountID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_c *AccountGroupCreate) SetGroup(v *Group) *AccountGroupCreate {
|
||||||
|
return _c.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AccountGroupMutation object of the builder.
|
||||||
|
func (_c *AccountGroupCreate) Mutation() *AccountGroupMutation {
|
||||||
|
return _c.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the AccountGroup in the database.
|
||||||
|
func (_c *AccountGroupCreate) Save(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
_c.defaults()
|
||||||
|
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX calls Save and panics if Save returns an error.
|
||||||
|
func (_c *AccountGroupCreate) SaveX(ctx context.Context) *AccountGroup {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *AccountGroupCreate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *AccountGroupCreate) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_c *AccountGroupCreate) defaults() {
|
||||||
|
if _, ok := _c.mutation.Priority(); !ok {
|
||||||
|
v := accountgroup.DefaultPriority
|
||||||
|
_c.mutation.SetPriority(v)
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
v := accountgroup.DefaultCreatedAt()
|
||||||
|
_c.mutation.SetCreatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_c *AccountGroupCreate) check() error {
|
||||||
|
if _, ok := _c.mutation.AccountID(); !ok {
|
||||||
|
return &ValidationError{Name: "account_id", err: errors.New(`ent: missing required field "AccountGroup.account_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.GroupID(); !ok {
|
||||||
|
return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "AccountGroup.group_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.Priority(); !ok {
|
||||||
|
return &ValidationError{Name: "priority", err: errors.New(`ent: missing required field "AccountGroup.priority"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.CreatedAt(); !ok {
|
||||||
|
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "AccountGroup.created_at"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.AccountIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "account", err: errors.New(`ent: missing required edge "AccountGroup.account"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.GroupIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "AccountGroup.group"`)}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *AccountGroupCreate) sqlSave(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
if err := _c.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_node, _spec := _c.createSpec()
|
||||||
|
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *AccountGroupCreate) createSpec() (*AccountGroup, *sqlgraph.CreateSpec) {
|
||||||
|
var (
|
||||||
|
_node = &AccountGroup{config: _c.config}
|
||||||
|
_spec = sqlgraph.NewCreateSpec(accountgroup.Table, nil)
|
||||||
|
)
|
||||||
|
_spec.OnConflict = _c.conflict
|
||||||
|
if value, ok := _c.mutation.Priority(); ok {
|
||||||
|
_spec.SetField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
_node.Priority = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.CreatedAt(); ok {
|
||||||
|
_spec.SetField(accountgroup.FieldCreatedAt, field.TypeTime, value)
|
||||||
|
_node.CreatedAt = value
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.AccountIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.AccountID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.GroupID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
return _node, _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// SetAccountID(v).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.AccountGroupUpsert) {
|
||||||
|
// SetAccountID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AccountGroupCreate) OnConflict(opts ...sql.ConflictOption) *AccountGroupUpsertOne {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &AccountGroupUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AccountGroupCreate) OnConflictColumns(columns ...string) *AccountGroupUpsertOne {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &AccountGroupUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// AccountGroupUpsertOne is the builder for "upsert"-ing
|
||||||
|
// one AccountGroup node.
|
||||||
|
AccountGroupUpsertOne struct {
|
||||||
|
create *AccountGroupCreate
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupUpsert is the "OnConflict" setter.
|
||||||
|
AccountGroupUpsert struct {
|
||||||
|
*sql.UpdateSet
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (u *AccountGroupUpsert) SetAccountID(v int64) *AccountGroupUpsert {
|
||||||
|
u.Set(accountgroup.FieldAccountID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccountID sets the "account_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsert) UpdateAccountID() *AccountGroupUpsert {
|
||||||
|
u.SetExcluded(accountgroup.FieldAccountID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (u *AccountGroupUpsert) SetGroupID(v int64) *AccountGroupUpsert {
|
||||||
|
u.Set(accountgroup.FieldGroupID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateGroupID sets the "group_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsert) UpdateGroupID() *AccountGroupUpsert {
|
||||||
|
u.SetExcluded(accountgroup.FieldGroupID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (u *AccountGroupUpsert) SetPriority(v int) *AccountGroupUpsert {
|
||||||
|
u.Set(accountgroup.FieldPriority, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePriority sets the "priority" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsert) UpdatePriority() *AccountGroupUpsert {
|
||||||
|
u.SetExcluded(accountgroup.FieldPriority)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds v to the "priority" field.
|
||||||
|
func (u *AccountGroupUpsert) AddPriority(v int) *AccountGroupUpsert {
|
||||||
|
u.Add(accountgroup.FieldPriority, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AccountGroupUpsertOne) UpdateNewValues() *AccountGroupUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
if _, exists := u.create.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(accountgroup.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AccountGroupUpsertOne) Ignore() *AccountGroupUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *AccountGroupUpsertOne) DoNothing() *AccountGroupUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the AccountGroupCreate.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *AccountGroupUpsertOne) Update(set func(*AccountGroupUpsert)) *AccountGroupUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&AccountGroupUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (u *AccountGroupUpsertOne) SetAccountID(v int64) *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetAccountID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccountID sets the "account_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertOne) UpdateAccountID() *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdateAccountID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (u *AccountGroupUpsertOne) SetGroupID(v int64) *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetGroupID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateGroupID sets the "group_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertOne) UpdateGroupID() *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdateGroupID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (u *AccountGroupUpsertOne) SetPriority(v int) *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetPriority(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds v to the "priority" field.
|
||||||
|
func (u *AccountGroupUpsertOne) AddPriority(v int) *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.AddPriority(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePriority sets the "priority" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertOne) UpdatePriority() *AccountGroupUpsertOne {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdatePriority()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *AccountGroupUpsertOne) Exec(ctx context.Context) error {
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for AccountGroupCreate.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *AccountGroupUpsertOne) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupCreateBulk is the builder for creating many AccountGroup entities in bulk.
|
||||||
|
type AccountGroupCreateBulk struct {
|
||||||
|
config
|
||||||
|
err error
|
||||||
|
builders []*AccountGroupCreate
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the AccountGroup entities in the database.
|
||||||
|
func (_c *AccountGroupCreateBulk) Save(ctx context.Context) ([]*AccountGroup, error) {
|
||||||
|
if _c.err != nil {
|
||||||
|
return nil, _c.err
|
||||||
|
}
|
||||||
|
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||||
|
nodes := make([]*AccountGroup, len(_c.builders))
|
||||||
|
mutators := make([]Mutator, len(_c.builders))
|
||||||
|
for i := range _c.builders {
|
||||||
|
func(i int, root context.Context) {
|
||||||
|
builder := _c.builders[i]
|
||||||
|
builder.defaults()
|
||||||
|
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||||
|
mutation, ok := m.(*AccountGroupMutation)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||||
|
}
|
||||||
|
if err := builder.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builder.mutation = mutation
|
||||||
|
var err error
|
||||||
|
nodes[i], specs[i] = builder.createSpec()
|
||||||
|
if i < len(mutators)-1 {
|
||||||
|
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||||
|
} else {
|
||||||
|
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||||
|
spec.OnConflict = _c.conflict
|
||||||
|
// Invoke the actual operation on the latest mutation in the chain.
|
||||||
|
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mutation.done = true
|
||||||
|
return nodes[i], nil
|
||||||
|
})
|
||||||
|
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||||
|
mut = builder.hooks[i](mut)
|
||||||
|
}
|
||||||
|
mutators[i] = mut
|
||||||
|
}(i, ctx)
|
||||||
|
}
|
||||||
|
if len(mutators) > 0 {
|
||||||
|
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_c *AccountGroupCreateBulk) SaveX(ctx context.Context) []*AccountGroup {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *AccountGroupCreateBulk) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *AccountGroupCreateBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.CreateBulk(builders...).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.AccountGroupUpsert) {
|
||||||
|
// SetAccountID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AccountGroupCreateBulk) OnConflict(opts ...sql.ConflictOption) *AccountGroupUpsertBulk {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &AccountGroupUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *AccountGroupCreateBulk) OnConflictColumns(columns ...string) *AccountGroupUpsertBulk {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &AccountGroupUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupUpsertBulk is the builder for "upsert"-ing
|
||||||
|
// a bulk of AccountGroup nodes.
|
||||||
|
type AccountGroupUpsertBulk struct {
|
||||||
|
create *AccountGroupCreateBulk
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that
|
||||||
|
// were set on create. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AccountGroupUpsertBulk) UpdateNewValues() *AccountGroupUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) {
|
||||||
|
for _, b := range u.create.builders {
|
||||||
|
if _, exists := b.mutation.CreatedAt(); exists {
|
||||||
|
s.SetIgnore(accountgroup.FieldCreatedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *AccountGroupUpsertBulk) Ignore() *AccountGroupUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *AccountGroupUpsertBulk) DoNothing() *AccountGroupUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the AccountGroupCreateBulk.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *AccountGroupUpsertBulk) Update(set func(*AccountGroupUpsert)) *AccountGroupUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&AccountGroupUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (u *AccountGroupUpsertBulk) SetAccountID(v int64) *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetAccountID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateAccountID sets the "account_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertBulk) UpdateAccountID() *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdateAccountID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (u *AccountGroupUpsertBulk) SetGroupID(v int64) *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetGroupID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateGroupID sets the "group_id" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertBulk) UpdateGroupID() *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdateGroupID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (u *AccountGroupUpsertBulk) SetPriority(v int) *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.SetPriority(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds v to the "priority" field.
|
||||||
|
func (u *AccountGroupUpsertBulk) AddPriority(v int) *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.AddPriority(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePriority sets the "priority" field to the value that was provided on create.
|
||||||
|
func (u *AccountGroupUpsertBulk) UpdatePriority() *AccountGroupUpsertBulk {
|
||||||
|
return u.Update(func(s *AccountGroupUpsert) {
|
||||||
|
s.UpdatePriority()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *AccountGroupUpsertBulk) Exec(ctx context.Context) error {
|
||||||
|
if u.create.err != nil {
|
||||||
|
return u.create.err
|
||||||
|
}
|
||||||
|
for i, b := range u.create.builders {
|
||||||
|
if len(b.conflict) != 0 {
|
||||||
|
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AccountGroupCreateBulk instead", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for AccountGroupCreateBulk.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *AccountGroupUpsertBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
87
backend/ent/accountgroup_delete.go
Normal file
87
backend/ent/accountgroup_delete.go
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroupDelete is the builder for deleting a AccountGroup entity.
|
||||||
|
type AccountGroupDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AccountGroupMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountGroupDelete builder.
|
||||||
|
func (_d *AccountGroupDelete) Where(ps ...predicate.AccountGroup) *AccountGroupDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *AccountGroupDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AccountGroupDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *AccountGroupDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(accountgroup.Table, nil)
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupDeleteOne is the builder for deleting a single AccountGroup entity.
|
||||||
|
type AccountGroupDeleteOne struct {
|
||||||
|
_d *AccountGroupDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountGroupDelete builder.
|
||||||
|
func (_d *AccountGroupDeleteOne) Where(ps ...predicate.AccountGroup) *AccountGroupDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *AccountGroupDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{accountgroup.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *AccountGroupDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
640
backend/ent/accountgroup_query.go
Normal file
640
backend/ent/accountgroup_query.go
Normal file
@@ -0,0 +1,640 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroupQuery is the builder for querying AccountGroup entities.
|
||||||
|
type AccountGroupQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []accountgroup.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.AccountGroup
|
||||||
|
withAccount *AccountQuery
|
||||||
|
withGroup *GroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the AccountGroupQuery builder.
|
||||||
|
func (_q *AccountGroupQuery) Where(ps ...predicate.AccountGroup) *AccountGroupQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *AccountGroupQuery) Limit(limit int) *AccountGroupQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *AccountGroupQuery) Offset(offset int) *AccountGroupQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *AccountGroupQuery) Unique(unique bool) *AccountGroupQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *AccountGroupQuery) Order(o ...accountgroup.OrderOption) *AccountGroupQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccount chains the current query on the "account" edge.
|
||||||
|
func (_q *AccountGroupQuery) QueryAccount() *AccountQuery {
|
||||||
|
query := (&AccountClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(accountgroup.Table, accountgroup.AccountColumn, selector),
|
||||||
|
sqlgraph.To(account.Table, account.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, accountgroup.AccountTable, accountgroup.AccountColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup chains the current query on the "group" edge.
|
||||||
|
func (_q *AccountGroupQuery) QueryGroup() *GroupQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(accountgroup.Table, accountgroup.GroupColumn, selector),
|
||||||
|
sqlgraph.To(group.Table, group.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, false, accountgroup.GroupTable, accountgroup.GroupColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first AccountGroup entity from the query.
|
||||||
|
// Returns a *NotFoundError when no AccountGroup was found.
|
||||||
|
func (_q *AccountGroupQuery) First(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{accountgroup.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) FirstX(ctx context.Context) *AccountGroup {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single AccountGroup entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one AccountGroup entity is found.
|
||||||
|
// Returns a *NotFoundError when no AccountGroup entities are found.
|
||||||
|
func (_q *AccountGroupQuery) Only(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{accountgroup.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{accountgroup.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) OnlyX(ctx context.Context) *AccountGroup {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of AccountGroups.
|
||||||
|
func (_q *AccountGroupQuery) All(ctx context.Context) ([]*AccountGroup, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*AccountGroup, *AccountGroupQuery]()
|
||||||
|
return withInterceptors[[]*AccountGroup](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) AllX(ctx context.Context) []*AccountGroup {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *AccountGroupQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*AccountGroupQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *AccountGroupQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.First(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *AccountGroupQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the AccountGroupQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *AccountGroupQuery) Clone() *AccountGroupQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &AccountGroupQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]accountgroup.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.AccountGroup{}, _q.predicates...),
|
||||||
|
withAccount: _q.withAccount.Clone(),
|
||||||
|
withGroup: _q.withGroup.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAccount tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "account" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountGroupQuery) WithAccount(opts ...func(*AccountQuery)) *AccountGroupQuery {
|
||||||
|
query := (&AccountClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withAccount = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *AccountGroupQuery) WithGroup(opts ...func(*GroupQuery)) *AccountGroupQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withGroup = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// AccountID int64 `json:"account_id,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Query().
|
||||||
|
// GroupBy(accountgroup.FieldAccountID).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AccountGroupQuery) GroupBy(field string, fields ...string) *AccountGroupGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &AccountGroupGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = accountgroup.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// AccountID int64 `json:"account_id,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.AccountGroup.Query().
|
||||||
|
// Select(accountgroup.FieldAccountID).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *AccountGroupQuery) Select(fields ...string) *AccountGroupSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &AccountGroupSelect{AccountGroupQuery: _q}
|
||||||
|
sbuild.label = accountgroup.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a AccountGroupSelect configured with the given aggregations.
|
||||||
|
func (_q *AccountGroupQuery) Aggregate(fns ...AggregateFunc) *AccountGroupSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !accountgroup.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*AccountGroup, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*AccountGroup{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [2]bool{
|
||||||
|
_q.withAccount != nil,
|
||||||
|
_q.withGroup != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*AccountGroup).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &AccountGroup{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withAccount; query != nil {
|
||||||
|
if err := _q.loadAccount(ctx, query, nodes, nil,
|
||||||
|
func(n *AccountGroup, e *Account) { n.Edges.Account = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withGroup; query != nil {
|
||||||
|
if err := _q.loadGroup(ctx, query, nodes, nil,
|
||||||
|
func(n *AccountGroup, e *Group) { n.Edges.Group = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) loadAccount(ctx context.Context, query *AccountQuery, nodes []*AccountGroup, init func(*AccountGroup), assign func(*AccountGroup, *Account)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*AccountGroup)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].AccountID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(account.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "account_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *AccountGroupQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*AccountGroup, init func(*AccountGroup), assign func(*AccountGroup, *Group)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*AccountGroup)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].GroupID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(group.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Unique = false
|
||||||
|
_spec.Node.Columns = nil
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(accountgroup.Table, accountgroup.Columns, nil)
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
for i := range fields {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
if _q.withAccount != nil {
|
||||||
|
_spec.Node.AddColumnOnce(accountgroup.FieldAccountID)
|
||||||
|
}
|
||||||
|
if _q.withGroup != nil {
|
||||||
|
_spec.Node.AddColumnOnce(accountgroup.FieldGroupID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *AccountGroupQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(accountgroup.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = accountgroup.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *AccountGroupQuery) ForUpdate(opts ...sql.LockOption) *AccountGroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *AccountGroupQuery) ForShare(opts ...sql.LockOption) *AccountGroupQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupGroupBy is the group-by builder for AccountGroup entities.
|
||||||
|
type AccountGroupGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *AccountGroupQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *AccountGroupGroupBy) Aggregate(fns ...AggregateFunc) *AccountGroupGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *AccountGroupGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AccountGroupQuery, *AccountGroupGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *AccountGroupGroupBy) sqlScan(ctx context.Context, root *AccountGroupQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupSelect is the builder for selecting fields of AccountGroup entities.
|
||||||
|
type AccountGroupSelect struct {
|
||||||
|
*AccountGroupQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *AccountGroupSelect) Aggregate(fns ...AggregateFunc) *AccountGroupSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *AccountGroupSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*AccountGroupQuery, *AccountGroupSelect](ctx, _s.AccountGroupQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *AccountGroupSelect) sqlScan(ctx context.Context, root *AccountGroupQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
477
backend/ent/accountgroup_update.go
Normal file
477
backend/ent/accountgroup_update.go
Normal file
@@ -0,0 +1,477 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroupUpdate is the builder for updating AccountGroup entities.
|
||||||
|
type AccountGroupUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AccountGroupMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountGroupUpdate builder.
|
||||||
|
func (_u *AccountGroupUpdate) Where(ps ...predicate.AccountGroup) *AccountGroupUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (_u *AccountGroupUpdate) SetAccountID(v int64) *AccountGroupUpdate {
|
||||||
|
_u.mutation.SetAccountID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableAccountID sets the "account_id" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdate) SetNillableAccountID(v *int64) *AccountGroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetAccountID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_u *AccountGroupUpdate) SetGroupID(v int64) *AccountGroupUpdate {
|
||||||
|
_u.mutation.SetGroupID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableGroupID sets the "group_id" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdate) SetNillableGroupID(v *int64) *AccountGroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetGroupID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (_u *AccountGroupUpdate) SetPriority(v int) *AccountGroupUpdate {
|
||||||
|
_u.mutation.ResetPriority()
|
||||||
|
_u.mutation.SetPriority(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePriority sets the "priority" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdate) SetNillablePriority(v *int) *AccountGroupUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPriority(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds value to the "priority" field.
|
||||||
|
func (_u *AccountGroupUpdate) AddPriority(v int) *AccountGroupUpdate {
|
||||||
|
_u.mutation.AddPriority(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccount sets the "account" edge to the Account entity.
|
||||||
|
func (_u *AccountGroupUpdate) SetAccount(v *Account) *AccountGroupUpdate {
|
||||||
|
return _u.SetAccountID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_u *AccountGroupUpdate) SetGroup(v *Group) *AccountGroupUpdate {
|
||||||
|
return _u.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AccountGroupMutation object of the builder.
|
||||||
|
func (_u *AccountGroupUpdate) Mutation() *AccountGroupMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAccount clears the "account" edge to the Account entity.
|
||||||
|
func (_u *AccountGroupUpdate) ClearAccount() *AccountGroupUpdate {
|
||||||
|
_u.mutation.ClearAccount()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroup clears the "group" edge to the Group entity.
|
||||||
|
func (_u *AccountGroupUpdate) ClearGroup() *AccountGroupUpdate {
|
||||||
|
_u.mutation.ClearGroup()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *AccountGroupUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AccountGroupUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *AccountGroupUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AccountGroupUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AccountGroupUpdate) check() error {
|
||||||
|
if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AccountGroup.account"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AccountGroup.group"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AccountGroupUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(accountgroup.Table, accountgroup.Columns, sqlgraph.NewFieldSpec(accountgroup.FieldAccountID, field.TypeInt64), sqlgraph.NewFieldSpec(accountgroup.FieldGroupID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Priority(); ok {
|
||||||
|
_spec.SetField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedPriority(); ok {
|
||||||
|
_spec.AddField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.AccountCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{accountgroup.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupUpdateOne is the builder for updating a single AccountGroup entity.
|
||||||
|
type AccountGroupUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *AccountGroupMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccountID sets the "account_id" field.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetAccountID(v int64) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.SetAccountID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableAccountID sets the "account_id" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetNillableAccountID(v *int64) *AccountGroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetAccountID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetGroupID(v int64) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.SetGroupID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableGroupID sets the "group_id" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetNillableGroupID(v *int64) *AccountGroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetGroupID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPriority sets the "priority" field.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetPriority(v int) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.ResetPriority()
|
||||||
|
_u.mutation.SetPriority(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePriority sets the "priority" field if the given value is not nil.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetNillablePriority(v *int) *AccountGroupUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPriority(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPriority adds value to the "priority" field.
|
||||||
|
func (_u *AccountGroupUpdateOne) AddPriority(v int) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.AddPriority(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAccount sets the "account" edge to the Account entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetAccount(v *Account) *AccountGroupUpdateOne {
|
||||||
|
return _u.SetAccountID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) SetGroup(v *Group) *AccountGroupUpdateOne {
|
||||||
|
return _u.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the AccountGroupMutation object of the builder.
|
||||||
|
func (_u *AccountGroupUpdateOne) Mutation() *AccountGroupMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAccount clears the "account" edge to the Account entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) ClearAccount() *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.ClearAccount()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroup clears the "group" edge to the Group entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) ClearGroup() *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.ClearGroup()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the AccountGroupUpdate builder.
|
||||||
|
func (_u *AccountGroupUpdateOne) Where(ps ...predicate.AccountGroup) *AccountGroupUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *AccountGroupUpdateOne) Select(field string, fields ...string) *AccountGroupUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated AccountGroup entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) Save(ctx context.Context) (*AccountGroup, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *AccountGroupUpdateOne) SaveX(ctx context.Context) *AccountGroup {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *AccountGroupUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *AccountGroupUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *AccountGroupUpdateOne) check() error {
|
||||||
|
if _u.mutation.AccountCleared() && len(_u.mutation.AccountIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AccountGroup.account"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() && len(_u.mutation.GroupIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "AccountGroup.group"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *AccountGroupUpdateOne) sqlSave(ctx context.Context) (_node *AccountGroup, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(accountgroup.Table, accountgroup.Columns, sqlgraph.NewFieldSpec(accountgroup.FieldAccountID, field.TypeInt64), sqlgraph.NewFieldSpec(accountgroup.FieldGroupID, field.TypeInt64))
|
||||||
|
if id, ok := _u.mutation.AccountID(); !ok {
|
||||||
|
return nil, &ValidationError{Name: "account_id", err: errors.New(`ent: missing "AccountGroup.account_id" for update`)}
|
||||||
|
} else {
|
||||||
|
_spec.Node.CompositeID[0].Value = id
|
||||||
|
}
|
||||||
|
if id, ok := _u.mutation.GroupID(); !ok {
|
||||||
|
return nil, &ValidationError{Name: "group_id", err: errors.New(`ent: missing "AccountGroup.group_id" for update`)}
|
||||||
|
} else {
|
||||||
|
_spec.Node.CompositeID[1].Value = id
|
||||||
|
}
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, len(fields))
|
||||||
|
for i, f := range fields {
|
||||||
|
if !accountgroup.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
_spec.Node.Columns[i] = f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Priority(); ok {
|
||||||
|
_spec.SetField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedPriority(); ok {
|
||||||
|
_spec.AddField(accountgroup.FieldPriority, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.AccountCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AccountIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.AccountTable,
|
||||||
|
Columns: []string{accountgroup.AccountColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: false,
|
||||||
|
Table: accountgroup.GroupTable,
|
||||||
|
Columns: []string{accountgroup.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &AccountGroup{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{accountgroup.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
282
backend/ent/apikey.go
Normal file
282
backend/ent/apikey.go
Normal file
@@ -0,0 +1,282 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIKey is the model entity for the APIKey schema.
|
||||||
|
type APIKey struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// DeletedAt holds the value of the "deleted_at" field.
|
||||||
|
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||||
|
// UserID holds the value of the "user_id" field.
|
||||||
|
UserID int64 `json:"user_id,omitempty"`
|
||||||
|
// Key holds the value of the "key" field.
|
||||||
|
Key string `json:"key,omitempty"`
|
||||||
|
// Name holds the value of the "name" field.
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
// GroupID holds the value of the "group_id" field.
|
||||||
|
GroupID *int64 `json:"group_id,omitempty"`
|
||||||
|
// Status holds the value of the "status" field.
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// Allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"]
|
||||||
|
IPWhitelist []string `json:"ip_whitelist,omitempty"`
|
||||||
|
// Blocked IPs/CIDRs
|
||||||
|
IPBlacklist []string `json:"ip_blacklist,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the APIKeyQuery when eager-loading is set.
|
||||||
|
Edges APIKeyEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type APIKeyEdges struct {
|
||||||
|
// User holds the value of the user edge.
|
||||||
|
User *User `json:"user,omitempty"`
|
||||||
|
// Group holds the value of the group edge.
|
||||||
|
Group *Group `json:"group,omitempty"`
|
||||||
|
// UsageLogs holds the value of the usage_logs edge.
|
||||||
|
UsageLogs []*UsageLog `json:"usage_logs,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [3]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserOrErr returns the User value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e APIKeyEdges) UserOrErr() (*User, error) {
|
||||||
|
if e.User != nil {
|
||||||
|
return e.User, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: user.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "user"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupOrErr returns the Group value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e APIKeyEdges) GroupOrErr() (*Group, error) {
|
||||||
|
if e.Group != nil {
|
||||||
|
return e.Group, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: group.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "group"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageLogsOrErr returns the UsageLogs value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e APIKeyEdges) UsageLogsOrErr() ([]*UsageLog, error) {
|
||||||
|
if e.loadedTypes[2] {
|
||||||
|
return e.UsageLogs, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "usage_logs"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*APIKey) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case apikey.FieldIPWhitelist, apikey.FieldIPBlacklist:
|
||||||
|
values[i] = new([]byte)
|
||||||
|
case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case apikey.FieldCreatedAt, apikey.FieldUpdatedAt, apikey.FieldDeletedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the APIKey fields.
|
||||||
|
func (_m *APIKey) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case apikey.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case apikey.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldDeletedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DeletedAt = new(time.Time)
|
||||||
|
*_m.DeletedAt = value.Time
|
||||||
|
}
|
||||||
|
case apikey.FieldUserID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field user_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UserID = value.Int64
|
||||||
|
}
|
||||||
|
case apikey.FieldKey:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field key", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Key = value.String
|
||||||
|
}
|
||||||
|
case apikey.FieldName:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Name = value.String
|
||||||
|
}
|
||||||
|
case apikey.FieldGroupID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field group_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.GroupID = new(int64)
|
||||||
|
*_m.GroupID = value.Int64
|
||||||
|
}
|
||||||
|
case apikey.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case apikey.FieldIPWhitelist:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field ip_whitelist", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.IPWhitelist); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field ip_whitelist: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case apikey.FieldIPBlacklist:
|
||||||
|
if value, ok := values[i].(*[]byte); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field ip_blacklist", values[i])
|
||||||
|
} else if value != nil && len(*value) > 0 {
|
||||||
|
if err := json.Unmarshal(*value, &_m.IPBlacklist); err != nil {
|
||||||
|
return fmt.Errorf("unmarshal field ip_blacklist: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the APIKey.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *APIKey) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser queries the "user" edge of the APIKey entity.
|
||||||
|
func (_m *APIKey) QueryUser() *UserQuery {
|
||||||
|
return NewAPIKeyClient(_m.config).QueryUser(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup queries the "group" edge of the APIKey entity.
|
||||||
|
func (_m *APIKey) QueryGroup() *GroupQuery {
|
||||||
|
return NewAPIKeyClient(_m.config).QueryGroup(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageLogs queries the "usage_logs" edge of the APIKey entity.
|
||||||
|
func (_m *APIKey) QueryUsageLogs() *UsageLogQuery {
|
||||||
|
return NewAPIKeyClient(_m.config).QueryUsageLogs(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this APIKey.
|
||||||
|
// Note that you need to call APIKey.Unwrap() before calling this method if this APIKey
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *APIKey) Update() *APIKeyUpdateOne {
|
||||||
|
return NewAPIKeyClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the APIKey entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *APIKey) Unwrap() *APIKey {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: APIKey is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *APIKey) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("APIKey(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.DeletedAt; v != nil {
|
||||||
|
builder.WriteString("deleted_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("user_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.UserID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("key=")
|
||||||
|
builder.WriteString(_m.Key)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("name=")
|
||||||
|
builder.WriteString(_m.Name)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.GroupID; v != nil {
|
||||||
|
builder.WriteString("group_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("ip_whitelist=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.IPWhitelist))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("ip_blacklist=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.IPBlacklist))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeys is a parsable slice of APIKey.
|
||||||
|
type APIKeys []*APIKey
|
||||||
213
backend/ent/apikey/apikey.go
Normal file
213
backend/ent/apikey/apikey.go
Normal file
@@ -0,0 +1,213 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package apikey
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the apikey type in the database.
|
||||||
|
Label = "api_key"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||||
|
FieldDeletedAt = "deleted_at"
|
||||||
|
// FieldUserID holds the string denoting the user_id field in the database.
|
||||||
|
FieldUserID = "user_id"
|
||||||
|
// FieldKey holds the string denoting the key field in the database.
|
||||||
|
FieldKey = "key"
|
||||||
|
// FieldName holds the string denoting the name field in the database.
|
||||||
|
FieldName = "name"
|
||||||
|
// FieldGroupID holds the string denoting the group_id field in the database.
|
||||||
|
FieldGroupID = "group_id"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldIPWhitelist holds the string denoting the ip_whitelist field in the database.
|
||||||
|
FieldIPWhitelist = "ip_whitelist"
|
||||||
|
// FieldIPBlacklist holds the string denoting the ip_blacklist field in the database.
|
||||||
|
FieldIPBlacklist = "ip_blacklist"
|
||||||
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
|
EdgeUser = "user"
|
||||||
|
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||||
|
EdgeGroup = "group"
|
||||||
|
// EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations.
|
||||||
|
EdgeUsageLogs = "usage_logs"
|
||||||
|
// Table holds the table name of the apikey in the database.
|
||||||
|
Table = "api_keys"
|
||||||
|
// UserTable is the table that holds the user relation/edge.
|
||||||
|
UserTable = "api_keys"
|
||||||
|
// UserInverseTable is the table name for the User entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||||
|
UserInverseTable = "users"
|
||||||
|
// UserColumn is the table column denoting the user relation/edge.
|
||||||
|
UserColumn = "user_id"
|
||||||
|
// GroupTable is the table that holds the group relation/edge.
|
||||||
|
GroupTable = "api_keys"
|
||||||
|
// GroupInverseTable is the table name for the Group entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||||
|
GroupInverseTable = "groups"
|
||||||
|
// GroupColumn is the table column denoting the group relation/edge.
|
||||||
|
GroupColumn = "group_id"
|
||||||
|
// UsageLogsTable is the table that holds the usage_logs relation/edge.
|
||||||
|
UsageLogsTable = "usage_logs"
|
||||||
|
// UsageLogsInverseTable is the table name for the UsageLog entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "usagelog" package.
|
||||||
|
UsageLogsInverseTable = "usage_logs"
|
||||||
|
// UsageLogsColumn is the table column denoting the usage_logs relation/edge.
|
||||||
|
UsageLogsColumn = "api_key_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for apikey fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldDeletedAt,
|
||||||
|
FieldUserID,
|
||||||
|
FieldKey,
|
||||||
|
FieldName,
|
||||||
|
FieldGroupID,
|
||||||
|
FieldStatus,
|
||||||
|
FieldIPWhitelist,
|
||||||
|
FieldIPBlacklist,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the variables below are initialized by the runtime
|
||||||
|
// package on the initialization of the application. Therefore,
|
||||||
|
// it should be imported in the main as follows:
|
||||||
|
//
|
||||||
|
// import _ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
var (
|
||||||
|
Hooks [1]ent.Hook
|
||||||
|
Interceptors [1]ent.Interceptor
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// KeyValidator is a validator for the "key" field. It is called by the builders before save.
|
||||||
|
KeyValidator func(string) error
|
||||||
|
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
NameValidator func(string) error
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the APIKey queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDeletedAt orders the results by the deleted_at field.
|
||||||
|
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserID orders the results by the user_id field.
|
||||||
|
func ByUserID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUserID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByKey orders the results by the key field.
|
||||||
|
func ByKey(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldKey, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByName orders the results by the name field.
|
||||||
|
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupID orders the results by the group_id field.
|
||||||
|
func ByGroupID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldGroupID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserField orders the results by user field.
|
||||||
|
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupField orders the results by group field.
|
||||||
|
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogsCount orders the results by usage_logs count.
|
||||||
|
func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogs orders the results by usage_logs terms.
|
||||||
|
func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newUserStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UserInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newGroupStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(GroupInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newUsageLogsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UsageLogsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
575
backend/ent/apikey/where.go
Normal file
575
backend/ent/apikey/where.go
Normal file
@@ -0,0 +1,575 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package apikey
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
|
||||||
|
func DeletedAt(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ.
|
||||||
|
func UserID(v int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key applies equality check predicate on the "key" field. It's identical to KeyEQ.
|
||||||
|
func Key(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||||
|
func Name(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ.
|
||||||
|
func GroupID(v int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status applies equality check predicate on the "status" field. It's identical to StatusEQ.
|
||||||
|
func Status(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtNEQ(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtIn applies the In predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldDeletedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtNotIn(vs ...time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldDeletedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtGT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtGTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtLT(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtLTE(v time.Time) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldDeletedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldDeletedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDEQ applies the EQ predicate on the "user_id" field.
|
||||||
|
func UserIDEQ(v int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNEQ applies the NEQ predicate on the "user_id" field.
|
||||||
|
func UserIDNEQ(v int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDIn applies the In predicate on the "user_id" field.
|
||||||
|
func UserIDIn(vs ...int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNotIn applies the NotIn predicate on the "user_id" field.
|
||||||
|
func UserIDNotIn(vs ...int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyEQ applies the EQ predicate on the "key" field.
|
||||||
|
func KeyEQ(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyNEQ applies the NEQ predicate on the "key" field.
|
||||||
|
func KeyNEQ(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyIn applies the In predicate on the "key" field.
|
||||||
|
func KeyIn(vs ...string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldKey, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyNotIn applies the NotIn predicate on the "key" field.
|
||||||
|
func KeyNotIn(vs ...string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldKey, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyGT applies the GT predicate on the "key" field.
|
||||||
|
func KeyGT(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyGTE applies the GTE predicate on the "key" field.
|
||||||
|
func KeyGTE(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyLT applies the LT predicate on the "key" field.
|
||||||
|
func KeyLT(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyLTE applies the LTE predicate on the "key" field.
|
||||||
|
func KeyLTE(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyContains applies the Contains predicate on the "key" field.
|
||||||
|
func KeyContains(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldContains(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyHasPrefix applies the HasPrefix predicate on the "key" field.
|
||||||
|
func KeyHasPrefix(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldHasPrefix(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyHasSuffix applies the HasSuffix predicate on the "key" field.
|
||||||
|
func KeyHasSuffix(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldHasSuffix(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyEqualFold applies the EqualFold predicate on the "key" field.
|
||||||
|
func KeyEqualFold(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEqualFold(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// KeyContainsFold applies the ContainsFold predicate on the "key" field.
|
||||||
|
func KeyContainsFold(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldContainsFold(FieldKey, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameEQ applies the EQ predicate on the "name" field.
|
||||||
|
func NameEQ(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||||
|
func NameNEQ(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameIn applies the In predicate on the "name" field.
|
||||||
|
func NameIn(vs ...string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldName, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||||
|
func NameNotIn(vs ...string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldName, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameGT applies the GT predicate on the "name" field.
|
||||||
|
func NameGT(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameGTE applies the GTE predicate on the "name" field.
|
||||||
|
func NameGTE(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameLT applies the LT predicate on the "name" field.
|
||||||
|
func NameLT(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameLTE applies the LTE predicate on the "name" field.
|
||||||
|
func NameLTE(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameContains applies the Contains predicate on the "name" field.
|
||||||
|
func NameContains(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldContains(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||||
|
func NameHasPrefix(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldHasPrefix(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||||
|
func NameHasSuffix(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldHasSuffix(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||||
|
func NameEqualFold(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEqualFold(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||||
|
func NameContainsFold(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldContainsFold(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDEQ applies the EQ predicate on the "group_id" field.
|
||||||
|
func GroupIDEQ(v int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNEQ applies the NEQ predicate on the "group_id" field.
|
||||||
|
func GroupIDNEQ(v int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDIn applies the In predicate on the "group_id" field.
|
||||||
|
func GroupIDIn(vs ...int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldGroupID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNotIn applies the NotIn predicate on the "group_id" field.
|
||||||
|
func GroupIDNotIn(vs ...int64) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldGroupID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDIsNil applies the IsNil predicate on the "group_id" field.
|
||||||
|
func GroupIDIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldGroupID))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNotNil applies the NotNil predicate on the "group_id" field.
|
||||||
|
func GroupIDNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldGroupID))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEQ applies the EQ predicate on the "status" field.
|
||||||
|
func StatusEQ(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNEQ applies the NEQ predicate on the "status" field.
|
||||||
|
func StatusNEQ(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusIn applies the In predicate on the "status" field.
|
||||||
|
func StatusIn(vs ...string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNotIn applies the NotIn predicate on the "status" field.
|
||||||
|
func StatusNotIn(vs ...string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGT applies the GT predicate on the "status" field.
|
||||||
|
func StatusGT(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGTE applies the GTE predicate on the "status" field.
|
||||||
|
func StatusGTE(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldGTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLT applies the LT predicate on the "status" field.
|
||||||
|
func StatusLT(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLTE applies the LTE predicate on the "status" field.
|
||||||
|
func StatusLTE(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldLTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContains applies the Contains predicate on the "status" field.
|
||||||
|
func StatusContains(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldContains(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasPrefix applies the HasPrefix predicate on the "status" field.
|
||||||
|
func StatusHasPrefix(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldHasPrefix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasSuffix applies the HasSuffix predicate on the "status" field.
|
||||||
|
func StatusHasSuffix(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldHasSuffix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEqualFold applies the EqualFold predicate on the "status" field.
|
||||||
|
func StatusEqualFold(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldEqualFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContainsFold applies the ContainsFold predicate on the "status" field.
|
||||||
|
func StatusContainsFold(v string) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldContainsFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPWhitelistIsNil applies the IsNil predicate on the "ip_whitelist" field.
|
||||||
|
func IPWhitelistIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldIPWhitelist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPWhitelistNotNil applies the NotNil predicate on the "ip_whitelist" field.
|
||||||
|
func IPWhitelistNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldIPWhitelist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPBlacklistIsNil applies the IsNil predicate on the "ip_blacklist" field.
|
||||||
|
func IPBlacklistIsNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldIsNull(FieldIPBlacklist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPBlacklistNotNil applies the NotNil predicate on the "ip_blacklist" field.
|
||||||
|
func IPBlacklistNotNil() predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.FieldNotNull(FieldIPBlacklist))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||||
|
func HasUser() predicate.APIKey {
|
||||||
|
return predicate.APIKey(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||||
|
func HasUserWith(preds ...predicate.User) predicate.APIKey {
|
||||||
|
return predicate.APIKey(func(s *sql.Selector) {
|
||||||
|
step := newUserStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGroup applies the HasEdge predicate on the "group" edge.
|
||||||
|
func HasGroup() predicate.APIKey {
|
||||||
|
return predicate.APIKey(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||||
|
func HasGroupWith(preds ...predicate.Group) predicate.APIKey {
|
||||||
|
return predicate.APIKey(func(s *sql.Selector) {
|
||||||
|
step := newGroupStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUsageLogs applies the HasEdge predicate on the "usage_logs" edge.
|
||||||
|
func HasUsageLogs() predicate.APIKey {
|
||||||
|
return predicate.APIKey(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUsageLogsWith applies the HasEdge predicate on the "usage_logs" edge with a given conditions (other predicates).
|
||||||
|
func HasUsageLogsWith(preds ...predicate.UsageLog) predicate.APIKey {
|
||||||
|
return predicate.APIKey(func(s *sql.Selector) {
|
||||||
|
step := newUsageLogsStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.APIKey) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.APIKey) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.APIKey) predicate.APIKey {
|
||||||
|
return predicate.APIKey(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
1127
backend/ent/apikey_create.go
Normal file
1127
backend/ent/apikey_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/apikey_delete.go
Normal file
88
backend/ent/apikey_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIKeyDelete is the builder for deleting a APIKey entity.
|
||||||
|
type APIKeyDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *APIKeyMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the APIKeyDelete builder.
|
||||||
|
func (_d *APIKeyDelete) Where(ps ...predicate.APIKey) *APIKeyDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *APIKeyDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *APIKeyDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *APIKeyDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(apikey.Table, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyDeleteOne is the builder for deleting a single APIKey entity.
|
||||||
|
type APIKeyDeleteOne struct {
|
||||||
|
_d *APIKeyDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the APIKeyDelete builder.
|
||||||
|
func (_d *APIKeyDeleteOne) Where(ps ...predicate.APIKey) *APIKeyDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *APIKeyDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{apikey.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *APIKeyDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
796
backend/ent/apikey_query.go
Normal file
796
backend/ent/apikey_query.go
Normal file
@@ -0,0 +1,796 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIKeyQuery is the builder for querying APIKey entities.
|
||||||
|
type APIKeyQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []apikey.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.APIKey
|
||||||
|
withUser *UserQuery
|
||||||
|
withGroup *GroupQuery
|
||||||
|
withUsageLogs *UsageLogQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the APIKeyQuery builder.
|
||||||
|
func (_q *APIKeyQuery) Where(ps ...predicate.APIKey) *APIKeyQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *APIKeyQuery) Limit(limit int) *APIKeyQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *APIKeyQuery) Offset(offset int) *APIKeyQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *APIKeyQuery) Unique(unique bool) *APIKeyQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *APIKeyQuery) Order(o ...apikey.OrderOption) *APIKeyQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser chains the current query on the "user" edge.
|
||||||
|
func (_q *APIKeyQuery) QueryUser() *UserQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(apikey.Table, apikey.FieldID, selector),
|
||||||
|
sqlgraph.To(user.Table, user.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, apikey.UserTable, apikey.UserColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup chains the current query on the "group" edge.
|
||||||
|
func (_q *APIKeyQuery) QueryGroup() *GroupQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(apikey.Table, apikey.FieldID, selector),
|
||||||
|
sqlgraph.To(group.Table, group.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, apikey.GroupTable, apikey.GroupColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageLogs chains the current query on the "usage_logs" edge.
|
||||||
|
func (_q *APIKeyQuery) QueryUsageLogs() *UsageLogQuery {
|
||||||
|
query := (&UsageLogClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(apikey.Table, apikey.FieldID, selector),
|
||||||
|
sqlgraph.To(usagelog.Table, usagelog.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, apikey.UsageLogsTable, apikey.UsageLogsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first APIKey entity from the query.
|
||||||
|
// Returns a *NotFoundError when no APIKey was found.
|
||||||
|
func (_q *APIKeyQuery) First(ctx context.Context) (*APIKey, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{apikey.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) FirstX(ctx context.Context) *APIKey {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first APIKey ID from the query.
|
||||||
|
// Returns a *NotFoundError when no APIKey ID was found.
|
||||||
|
func (_q *APIKeyQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{apikey.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single APIKey entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one APIKey entity is found.
|
||||||
|
// Returns a *NotFoundError when no APIKey entities are found.
|
||||||
|
func (_q *APIKeyQuery) Only(ctx context.Context) (*APIKey, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{apikey.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{apikey.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) OnlyX(ctx context.Context) *APIKey {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only APIKey ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one APIKey ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *APIKeyQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{apikey.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{apikey.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of APIKeys.
|
||||||
|
func (_q *APIKeyQuery) All(ctx context.Context) ([]*APIKey, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*APIKey, *APIKeyQuery]()
|
||||||
|
return withInterceptors[[]*APIKey](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) AllX(ctx context.Context) []*APIKey {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of APIKey IDs.
|
||||||
|
func (_q *APIKeyQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(apikey.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *APIKeyQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*APIKeyQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *APIKeyQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *APIKeyQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the APIKeyQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *APIKeyQuery) Clone() *APIKeyQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &APIKeyQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]apikey.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.APIKey{}, _q.predicates...),
|
||||||
|
withUser: _q.withUser.Clone(),
|
||||||
|
withGroup: _q.withGroup.Clone(),
|
||||||
|
withUsageLogs: _q.withUsageLogs.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *APIKeyQuery) WithUser(opts ...func(*UserQuery)) *APIKeyQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUser = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *APIKeyQuery) WithGroup(opts ...func(*GroupQuery)) *APIKeyQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withGroup = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUsageLogs tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "usage_logs" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *APIKeyQuery) WithUsageLogs(opts ...func(*UsageLogQuery)) *APIKeyQuery {
|
||||||
|
query := (&UsageLogClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUsageLogs = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.APIKey.Query().
|
||||||
|
// GroupBy(apikey.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *APIKeyQuery) GroupBy(field string, fields ...string) *APIKeyGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &APIKeyGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = apikey.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.APIKey.Query().
|
||||||
|
// Select(apikey.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *APIKeyQuery) Select(fields ...string) *APIKeySelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &APIKeySelect{APIKeyQuery: _q}
|
||||||
|
sbuild.label = apikey.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a APIKeySelect configured with the given aggregations.
|
||||||
|
func (_q *APIKeyQuery) Aggregate(fns ...AggregateFunc) *APIKeySelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !apikey.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*APIKey, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*APIKey{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [3]bool{
|
||||||
|
_q.withUser != nil,
|
||||||
|
_q.withGroup != nil,
|
||||||
|
_q.withUsageLogs != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*APIKey).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &APIKey{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withUser; query != nil {
|
||||||
|
if err := _q.loadUser(ctx, query, nodes, nil,
|
||||||
|
func(n *APIKey, e *User) { n.Edges.User = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withGroup; query != nil {
|
||||||
|
if err := _q.loadGroup(ctx, query, nodes, nil,
|
||||||
|
func(n *APIKey, e *Group) { n.Edges.Group = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withUsageLogs; query != nil {
|
||||||
|
if err := _q.loadUsageLogs(ctx, query, nodes,
|
||||||
|
func(n *APIKey) { n.Edges.UsageLogs = []*UsageLog{} },
|
||||||
|
func(n *APIKey, e *UsageLog) { n.Edges.UsageLogs = append(n.Edges.UsageLogs, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *User)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*APIKey)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].UserID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(user.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *APIKeyQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *Group)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*APIKey)
|
||||||
|
for i := range nodes {
|
||||||
|
if nodes[i].GroupID == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fk := *nodes[i].GroupID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(group.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *APIKeyQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, nodes []*APIKey, init func(*APIKey), assign func(*APIKey, *UsageLog)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*APIKey)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(usagelog.FieldAPIKeyID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.UsageLog(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(apikey.UsageLogsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.APIKeyID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "api_key_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, apikey.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != apikey.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withUser != nil {
|
||||||
|
_spec.Node.AddColumnOnce(apikey.FieldUserID)
|
||||||
|
}
|
||||||
|
if _q.withGroup != nil {
|
||||||
|
_spec.Node.AddColumnOnce(apikey.FieldGroupID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *APIKeyQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(apikey.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = apikey.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *APIKeyQuery) ForUpdate(opts ...sql.LockOption) *APIKeyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *APIKeyQuery) ForShare(opts ...sql.LockOption) *APIKeyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyGroupBy is the group-by builder for APIKey entities.
|
||||||
|
type APIKeyGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *APIKeyQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *APIKeyGroupBy) Aggregate(fns ...AggregateFunc) *APIKeyGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *APIKeyGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*APIKeyQuery, *APIKeyGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *APIKeyGroupBy) sqlScan(ctx context.Context, root *APIKeyQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeySelect is the builder for selecting fields of APIKey entities.
|
||||||
|
type APIKeySelect struct {
|
||||||
|
*APIKeyQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *APIKeySelect) Aggregate(fns ...AggregateFunc) *APIKeySelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *APIKeySelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*APIKeyQuery, *APIKeySelect](ctx, _s.APIKeyQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *APIKeySelect) sqlScan(ctx context.Context, root *APIKeyQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
940
backend/ent/apikey_update.go
Normal file
940
backend/ent/apikey_update.go
Normal file
@@ -0,0 +1,940 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/dialect/sql/sqljson"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIKeyUpdate is the builder for updating APIKey entities.
|
||||||
|
type APIKeyUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *APIKeyMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the APIKeyUpdate builder.
|
||||||
|
func (_u *APIKeyUpdate) Where(ps ...predicate.APIKey) *APIKeyUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *APIKeyUpdate) SetUpdatedAt(v time.Time) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeletedAt sets the "deleted_at" field.
|
||||||
|
func (_u *APIKeyUpdate) SetDeletedAt(v time.Time) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetDeletedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableDeletedAt(v *time.Time) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetDeletedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearDeletedAt() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearDeletedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *APIKeyUpdate) SetUserID(v int64) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableUserID(v *int64) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetKey sets the "key" field.
|
||||||
|
func (_u *APIKeyUpdate) SetKey(v string) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetKey(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableKey sets the "key" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableKey(v *string) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetKey(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetName sets the "name" field.
|
||||||
|
func (_u *APIKeyUpdate) SetName(v string) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetName(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableName sets the "name" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableName(v *string) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetName(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_u *APIKeyUpdate) SetGroupID(v int64) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetGroupID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableGroupID sets the "group_id" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableGroupID(v *int64) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetGroupID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroupID clears the value of the "group_id" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearGroupID() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearGroupID()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *APIKeyUpdate) SetStatus(v string) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdate) SetNillableStatus(v *string) *APIKeyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdate) SetIPWhitelist(v []string) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetIPWhitelist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendIPWhitelist appends value to the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdate) AppendIPWhitelist(v []string) *APIKeyUpdate {
|
||||||
|
_u.mutation.AppendIPWhitelist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPWhitelist clears the value of the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearIPWhitelist() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearIPWhitelist()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPBlacklist sets the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdate) SetIPBlacklist(v []string) *APIKeyUpdate {
|
||||||
|
_u.mutation.SetIPBlacklist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendIPBlacklist appends value to the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdate) AppendIPBlacklist(v []string) *APIKeyUpdate {
|
||||||
|
_u.mutation.AppendIPBlacklist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPBlacklist clears the value of the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdate) ClearIPBlacklist() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearIPBlacklist()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *APIKeyUpdate) SetUser(v *User) *APIKeyUpdate {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_u *APIKeyUpdate) SetGroup(v *Group) *APIKeyUpdate {
|
||||||
|
return _u.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs.
|
||||||
|
func (_u *APIKeyUpdate) AddUsageLogIDs(ids ...int64) *APIKeyUpdate {
|
||||||
|
_u.mutation.AddUsageLogIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity.
|
||||||
|
func (_u *APIKeyUpdate) AddUsageLogs(v ...*UsageLog) *APIKeyUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddUsageLogIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the APIKeyMutation object of the builder.
|
||||||
|
func (_u *APIKeyUpdate) Mutation() *APIKeyMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *APIKeyUpdate) ClearUser() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroup clears the "group" edge to the Group entity.
|
||||||
|
func (_u *APIKeyUpdate) ClearGroup() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearGroup()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity.
|
||||||
|
func (_u *APIKeyUpdate) ClearUsageLogs() *APIKeyUpdate {
|
||||||
|
_u.mutation.ClearUsageLogs()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs.
|
||||||
|
func (_u *APIKeyUpdate) RemoveUsageLogIDs(ids ...int64) *APIKeyUpdate {
|
||||||
|
_u.mutation.RemoveUsageLogIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities.
|
||||||
|
func (_u *APIKeyUpdate) RemoveUsageLogs(v ...*UsageLog) *APIKeyUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveUsageLogIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *APIKeyUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
if err := _u.defaults(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *APIKeyUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *APIKeyUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *APIKeyUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *APIKeyUpdate) defaults() error {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
if apikey.UpdateDefaultUpdatedAt == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized apikey.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
v := apikey.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *APIKeyUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.Key(); ok {
|
||||||
|
if err := apikey.KeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "APIKey.key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Name(); ok {
|
||||||
|
if err := apikey.NameValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "APIKey.name": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := apikey.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "APIKey.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "APIKey.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *APIKeyUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.DeletedAt(); ok {
|
||||||
|
_spec.SetField(apikey.FieldDeletedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.DeletedAtCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldDeletedAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Key(); ok {
|
||||||
|
_spec.SetField(apikey.FieldKey, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Name(); ok {
|
||||||
|
_spec.SetField(apikey.FieldName, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.IPWhitelist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AppendedIPWhitelist(); ok {
|
||||||
|
_spec.AddModifier(func(u *sql.UpdateBuilder) {
|
||||||
|
sqljson.Append(u, apikey.FieldIPWhitelist, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _u.mutation.IPWhitelistCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldIPWhitelist, field.TypeJSON)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.IPBlacklist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AppendedIPBlacklist(); ok {
|
||||||
|
_spec.AddModifier(func(u *sql.UpdateBuilder) {
|
||||||
|
sqljson.Append(u, apikey.FieldIPBlacklist, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _u.mutation.IPBlacklistCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldIPBlacklist, field.TypeJSON)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: apikey.UserTable,
|
||||||
|
Columns: []string{apikey.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: apikey.UserTable,
|
||||||
|
Columns: []string{apikey.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: apikey.GroupTable,
|
||||||
|
Columns: []string{apikey.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: apikey.GroupTable,
|
||||||
|
Columns: []string{apikey.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsageLogsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: apikey.UsageLogsTable,
|
||||||
|
Columns: []string{apikey.UsageLogsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: apikey.UsageLogsTable,
|
||||||
|
Columns: []string{apikey.UsageLogsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: apikey.UsageLogsTable,
|
||||||
|
Columns: []string{apikey.UsageLogsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{apikey.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeyUpdateOne is the builder for updating a single APIKey entity.
|
||||||
|
type APIKeyUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *APIKeyMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetUpdatedAt(v time.Time) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeletedAt sets the "deleted_at" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetDeletedAt(v time.Time) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetDeletedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableDeletedAt(v *time.Time) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetDeletedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearDeletedAt() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearDeletedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetUserID(v int64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableUserID(v *int64) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetKey sets the "key" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetKey(v string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetKey(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableKey sets the "key" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableKey(v *string) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetKey(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetName sets the "name" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetName(v string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetName(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableName sets the "name" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableName(v *string) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetName(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetGroupID(v int64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetGroupID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableGroupID sets the "group_id" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableGroupID(v *int64) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetGroupID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroupID clears the value of the "group_id" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearGroupID() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearGroupID()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetStatus(v string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *APIKeyUpdateOne) SetNillableStatus(v *string) *APIKeyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPWhitelist sets the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetIPWhitelist(v []string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetIPWhitelist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendIPWhitelist appends value to the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AppendIPWhitelist(v []string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AppendIPWhitelist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPWhitelist clears the value of the "ip_whitelist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearIPWhitelist() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearIPWhitelist()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetIPBlacklist sets the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) SetIPBlacklist(v []string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.SetIPBlacklist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendIPBlacklist appends value to the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) AppendIPBlacklist(v []string) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AppendIPBlacklist(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearIPBlacklist clears the value of the "ip_blacklist" field.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearIPBlacklist() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearIPBlacklist()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *APIKeyUpdateOne) SetUser(v *User) *APIKeyUpdateOne {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_u *APIKeyUpdateOne) SetGroup(v *Group) *APIKeyUpdateOne {
|
||||||
|
return _u.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageLogIDs adds the "usage_logs" edge to the UsageLog entity by IDs.
|
||||||
|
func (_u *APIKeyUpdateOne) AddUsageLogIDs(ids ...int64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.AddUsageLogIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageLogs adds the "usage_logs" edges to the UsageLog entity.
|
||||||
|
func (_u *APIKeyUpdateOne) AddUsageLogs(v ...*UsageLog) *APIKeyUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddUsageLogIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the APIKeyMutation object of the builder.
|
||||||
|
func (_u *APIKeyUpdateOne) Mutation() *APIKeyMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearUser() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroup clears the "group" edge to the Group entity.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearGroup() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearGroup()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsageLogs clears all "usage_logs" edges to the UsageLog entity.
|
||||||
|
func (_u *APIKeyUpdateOne) ClearUsageLogs() *APIKeyUpdateOne {
|
||||||
|
_u.mutation.ClearUsageLogs()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageLogIDs removes the "usage_logs" edge to UsageLog entities by IDs.
|
||||||
|
func (_u *APIKeyUpdateOne) RemoveUsageLogIDs(ids ...int64) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.RemoveUsageLogIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageLogs removes "usage_logs" edges to UsageLog entities.
|
||||||
|
func (_u *APIKeyUpdateOne) RemoveUsageLogs(v ...*UsageLog) *APIKeyUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveUsageLogIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the APIKeyUpdate builder.
|
||||||
|
func (_u *APIKeyUpdateOne) Where(ps ...predicate.APIKey) *APIKeyUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *APIKeyUpdateOne) Select(field string, fields ...string) *APIKeyUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated APIKey entity.
|
||||||
|
func (_u *APIKeyUpdateOne) Save(ctx context.Context) (*APIKey, error) {
|
||||||
|
if err := _u.defaults(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *APIKeyUpdateOne) SaveX(ctx context.Context) *APIKey {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *APIKeyUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *APIKeyUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *APIKeyUpdateOne) defaults() error {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
if apikey.UpdateDefaultUpdatedAt == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized apikey.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
v := apikey.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *APIKeyUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.Key(); ok {
|
||||||
|
if err := apikey.KeyValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "key", err: fmt.Errorf(`ent: validator failed for field "APIKey.key": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Name(); ok {
|
||||||
|
if err := apikey.NameValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "APIKey.name": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := apikey.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "APIKey.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "APIKey.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *APIKeyUpdateOne) sqlSave(ctx context.Context) (_node *APIKey, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(apikey.Table, apikey.Columns, sqlgraph.NewFieldSpec(apikey.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "APIKey.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, apikey.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !apikey.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != apikey.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(apikey.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.DeletedAt(); ok {
|
||||||
|
_spec.SetField(apikey.FieldDeletedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.DeletedAtCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldDeletedAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Key(); ok {
|
||||||
|
_spec.SetField(apikey.FieldKey, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Name(); ok {
|
||||||
|
_spec.SetField(apikey.FieldName, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(apikey.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.IPWhitelist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AppendedIPWhitelist(); ok {
|
||||||
|
_spec.AddModifier(func(u *sql.UpdateBuilder) {
|
||||||
|
sqljson.Append(u, apikey.FieldIPWhitelist, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _u.mutation.IPWhitelistCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldIPWhitelist, field.TypeJSON)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.IPBlacklist(); ok {
|
||||||
|
_spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AppendedIPBlacklist(); ok {
|
||||||
|
_spec.AddModifier(func(u *sql.UpdateBuilder) {
|
||||||
|
sqljson.Append(u, apikey.FieldIPBlacklist, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if _u.mutation.IPBlacklistCleared() {
|
||||||
|
_spec.ClearField(apikey.FieldIPBlacklist, field.TypeJSON)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: apikey.UserTable,
|
||||||
|
Columns: []string{apikey.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: apikey.UserTable,
|
||||||
|
Columns: []string{apikey.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: apikey.GroupTable,
|
||||||
|
Columns: []string{apikey.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: apikey.GroupTable,
|
||||||
|
Columns: []string{apikey.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsageLogsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: apikey.UsageLogsTable,
|
||||||
|
Columns: []string{apikey.UsageLogsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedUsageLogsIDs(); len(nodes) > 0 && !_u.mutation.UsageLogsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: apikey.UsageLogsTable,
|
||||||
|
Columns: []string{apikey.UsageLogsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UsageLogsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: apikey.UsageLogsTable,
|
||||||
|
Columns: []string{apikey.UsageLogsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(usagelog.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &APIKey{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{apikey.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
3009
backend/ent/client.go
Normal file
3009
backend/ent/client.go
Normal file
File diff suppressed because it is too large
Load Diff
8
backend/ent/driver_access.go
Normal file
8
backend/ent/driver_access.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
package ent
|
||||||
|
|
||||||
|
import "entgo.io/ent/dialect"
|
||||||
|
|
||||||
|
// Driver 暴露底层 driver,供需要 raw SQL 的集成层使用。
|
||||||
|
func (c *Client) Driver() dialect.Driver {
|
||||||
|
return c.driver
|
||||||
|
}
|
||||||
636
backend/ent/ent.go
Normal file
636
backend/ent/ent.go
Normal file
@@ -0,0 +1,636 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/userallowedgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/userattributedefinition"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/userattributevalue"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usersubscription"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ent aliases to avoid import conflicts in user's code.
|
||||||
|
type (
|
||||||
|
Op = ent.Op
|
||||||
|
Hook = ent.Hook
|
||||||
|
Value = ent.Value
|
||||||
|
Query = ent.Query
|
||||||
|
QueryContext = ent.QueryContext
|
||||||
|
Querier = ent.Querier
|
||||||
|
QuerierFunc = ent.QuerierFunc
|
||||||
|
Interceptor = ent.Interceptor
|
||||||
|
InterceptFunc = ent.InterceptFunc
|
||||||
|
Traverser = ent.Traverser
|
||||||
|
TraverseFunc = ent.TraverseFunc
|
||||||
|
Policy = ent.Policy
|
||||||
|
Mutator = ent.Mutator
|
||||||
|
Mutation = ent.Mutation
|
||||||
|
MutateFunc = ent.MutateFunc
|
||||||
|
)
|
||||||
|
|
||||||
|
type clientCtxKey struct{}
|
||||||
|
|
||||||
|
// FromContext returns a Client stored inside a context, or nil if there isn't one.
|
||||||
|
func FromContext(ctx context.Context) *Client {
|
||||||
|
c, _ := ctx.Value(clientCtxKey{}).(*Client)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewContext returns a new context with the given Client attached.
|
||||||
|
func NewContext(parent context.Context, c *Client) context.Context {
|
||||||
|
return context.WithValue(parent, clientCtxKey{}, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
type txCtxKey struct{}
|
||||||
|
|
||||||
|
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
|
||||||
|
func TxFromContext(ctx context.Context) *Tx {
|
||||||
|
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
|
||||||
|
return tx
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTxContext returns a new context with the given Tx attached.
|
||||||
|
func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
||||||
|
return context.WithValue(parent, txCtxKey{}, tx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrderFunc applies an ordering on the sql selector.
|
||||||
|
// Deprecated: Use Asc/Desc functions or the package builders instead.
|
||||||
|
type OrderFunc func(*sql.Selector)
|
||||||
|
|
||||||
|
var (
|
||||||
|
initCheck sync.Once
|
||||||
|
columnCheck sql.ColumnCheck
|
||||||
|
)
|
||||||
|
|
||||||
|
// checkColumn checks if the column exists in the given table.
|
||||||
|
func checkColumn(t, c string) error {
|
||||||
|
initCheck.Do(func() {
|
||||||
|
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
||||||
|
apikey.Table: apikey.ValidColumn,
|
||||||
|
account.Table: account.ValidColumn,
|
||||||
|
accountgroup.Table: accountgroup.ValidColumn,
|
||||||
|
group.Table: group.ValidColumn,
|
||||||
|
promocode.Table: promocode.ValidColumn,
|
||||||
|
promocodeusage.Table: promocodeusage.ValidColumn,
|
||||||
|
proxy.Table: proxy.ValidColumn,
|
||||||
|
redeemcode.Table: redeemcode.ValidColumn,
|
||||||
|
setting.Table: setting.ValidColumn,
|
||||||
|
usagelog.Table: usagelog.ValidColumn,
|
||||||
|
user.Table: user.ValidColumn,
|
||||||
|
userallowedgroup.Table: userallowedgroup.ValidColumn,
|
||||||
|
userattributedefinition.Table: userattributedefinition.ValidColumn,
|
||||||
|
userattributevalue.Table: userattributevalue.ValidColumn,
|
||||||
|
usersubscription.Table: usersubscription.ValidColumn,
|
||||||
|
})
|
||||||
|
})
|
||||||
|
return columnCheck(t, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Asc applies the given fields in ASC order.
|
||||||
|
func Asc(fields ...string) func(*sql.Selector) {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
for _, f := range fields {
|
||||||
|
if err := checkColumn(s.TableName(), f); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||||
|
}
|
||||||
|
s.OrderBy(sql.Asc(s.C(f)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Desc applies the given fields in DESC order.
|
||||||
|
func Desc(fields ...string) func(*sql.Selector) {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
for _, f := range fields {
|
||||||
|
if err := checkColumn(s.TableName(), f); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||||
|
}
|
||||||
|
s.OrderBy(sql.Desc(s.C(f)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AggregateFunc applies an aggregation step on the group-by traversal/selector.
|
||||||
|
type AggregateFunc func(*sql.Selector) string
|
||||||
|
|
||||||
|
// As is a pseudo aggregation function for renaming another other functions with custom names. For example:
|
||||||
|
//
|
||||||
|
// GroupBy(field1, field2).
|
||||||
|
// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func As(fn AggregateFunc, end string) AggregateFunc {
|
||||||
|
return func(s *sql.Selector) string {
|
||||||
|
return sql.As(fn(s), end)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count applies the "count" aggregation function on each group.
|
||||||
|
func Count() AggregateFunc {
|
||||||
|
return func(s *sql.Selector) string {
|
||||||
|
return sql.Count("*")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max applies the "max" aggregation function on the given field of each group.
|
||||||
|
func Max(field string) AggregateFunc {
|
||||||
|
return func(s *sql.Selector) string {
|
||||||
|
if err := checkColumn(s.TableName(), field); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return sql.Max(s.C(field))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mean applies the "mean" aggregation function on the given field of each group.
|
||||||
|
func Mean(field string) AggregateFunc {
|
||||||
|
return func(s *sql.Selector) string {
|
||||||
|
if err := checkColumn(s.TableName(), field); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return sql.Avg(s.C(field))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Min applies the "min" aggregation function on the given field of each group.
|
||||||
|
func Min(field string) AggregateFunc {
|
||||||
|
return func(s *sql.Selector) string {
|
||||||
|
if err := checkColumn(s.TableName(), field); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return sql.Min(s.C(field))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sum applies the "sum" aggregation function on the given field of each group.
|
||||||
|
func Sum(field string) AggregateFunc {
|
||||||
|
return func(s *sql.Selector) string {
|
||||||
|
if err := checkColumn(s.TableName(), field); err != nil {
|
||||||
|
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return sql.Sum(s.C(field))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidationError returns when validating a field or edge fails.
|
||||||
|
type ValidationError struct {
|
||||||
|
Name string // Field or edge name.
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (e *ValidationError) Error() string {
|
||||||
|
return e.err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap implements the errors.Wrapper interface.
|
||||||
|
func (e *ValidationError) Unwrap() error {
|
||||||
|
return e.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsValidationError returns a boolean indicating whether the error is a validation error.
|
||||||
|
func IsValidationError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var e *ValidationError
|
||||||
|
return errors.As(err, &e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotFoundError returns when trying to fetch a specific entity and it was not found in the database.
|
||||||
|
type NotFoundError struct {
|
||||||
|
label string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (e *NotFoundError) Error() string {
|
||||||
|
return "ent: " + e.label + " not found"
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotFound returns a boolean indicating whether the error is a not found error.
|
||||||
|
func IsNotFound(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var e *NotFoundError
|
||||||
|
return errors.As(err, &e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaskNotFound masks not found error.
|
||||||
|
func MaskNotFound(err error) error {
|
||||||
|
if IsNotFound(err) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database.
|
||||||
|
type NotSingularError struct {
|
||||||
|
label string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (e *NotSingularError) Error() string {
|
||||||
|
return "ent: " + e.label + " not singular"
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotSingular returns a boolean indicating whether the error is a not singular error.
|
||||||
|
func IsNotSingular(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var e *NotSingularError
|
||||||
|
return errors.As(err, &e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotLoadedError returns when trying to get a node that was not loaded by the query.
|
||||||
|
type NotLoadedError struct {
|
||||||
|
edge string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (e *NotLoadedError) Error() string {
|
||||||
|
return "ent: " + e.edge + " edge was not loaded"
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNotLoaded returns a boolean indicating whether the error is a not loaded error.
|
||||||
|
func IsNotLoaded(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var e *NotLoadedError
|
||||||
|
return errors.As(err, &e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConstraintError returns when trying to create/update one or more entities and
|
||||||
|
// one or more of their constraints failed. For example, violation of edge or
|
||||||
|
// field uniqueness.
|
||||||
|
type ConstraintError struct {
|
||||||
|
msg string
|
||||||
|
wrap error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error implements the error interface.
|
||||||
|
func (e ConstraintError) Error() string {
|
||||||
|
return "ent: constraint failed: " + e.msg
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap implements the errors.Wrapper interface.
|
||||||
|
func (e *ConstraintError) Unwrap() error {
|
||||||
|
return e.wrap
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsConstraintError returns a boolean indicating whether the error is a constraint failure.
|
||||||
|
func IsConstraintError(err error) bool {
|
||||||
|
if err == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var e *ConstraintError
|
||||||
|
return errors.As(err, &e)
|
||||||
|
}
|
||||||
|
|
||||||
|
// selector embedded by the different Select/GroupBy builders.
|
||||||
|
type selector struct {
|
||||||
|
label string
|
||||||
|
flds *[]string
|
||||||
|
fns []AggregateFunc
|
||||||
|
scan func(context.Context, any) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScanX is like Scan, but panics if an error occurs.
|
||||||
|
func (s *selector) ScanX(ctx context.Context, v any) {
|
||||||
|
if err := s.scan(ctx, v); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strings returns list of strings from a selector. It is only allowed when selecting one field.
|
||||||
|
func (s *selector) Strings(ctx context.Context) ([]string, error) {
|
||||||
|
if len(*s.flds) > 1 {
|
||||||
|
return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field")
|
||||||
|
}
|
||||||
|
var v []string
|
||||||
|
if err := s.scan(ctx, &v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringsX is like Strings, but panics if an error occurs.
|
||||||
|
func (s *selector) StringsX(ctx context.Context) []string {
|
||||||
|
v, err := s.Strings(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns a single string from a selector. It is only allowed when selecting one field.
|
||||||
|
func (s *selector) String(ctx context.Context) (_ string, err error) {
|
||||||
|
var v []string
|
||||||
|
if v, err = s.Strings(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(v) {
|
||||||
|
case 1:
|
||||||
|
return v[0], nil
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{s.label}
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringX is like String, but panics if an error occurs.
|
||||||
|
func (s *selector) StringX(ctx context.Context) string {
|
||||||
|
v, err := s.String(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ints returns list of ints from a selector. It is only allowed when selecting one field.
|
||||||
|
func (s *selector) Ints(ctx context.Context) ([]int, error) {
|
||||||
|
if len(*s.flds) > 1 {
|
||||||
|
return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field")
|
||||||
|
}
|
||||||
|
var v []int
|
||||||
|
if err := s.scan(ctx, &v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntsX is like Ints, but panics if an error occurs.
|
||||||
|
func (s *selector) IntsX(ctx context.Context) []int {
|
||||||
|
v, err := s.Ints(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int returns a single int from a selector. It is only allowed when selecting one field.
|
||||||
|
func (s *selector) Int(ctx context.Context) (_ int, err error) {
|
||||||
|
var v []int
|
||||||
|
if v, err = s.Ints(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(v) {
|
||||||
|
case 1:
|
||||||
|
return v[0], nil
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{s.label}
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntX is like Int, but panics if an error occurs.
|
||||||
|
func (s *selector) IntX(ctx context.Context) int {
|
||||||
|
v, err := s.Int(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64s returns list of float64s from a selector. It is only allowed when selecting one field.
|
||||||
|
func (s *selector) Float64s(ctx context.Context) ([]float64, error) {
|
||||||
|
if len(*s.flds) > 1 {
|
||||||
|
return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field")
|
||||||
|
}
|
||||||
|
var v []float64
|
||||||
|
if err := s.scan(ctx, &v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64sX is like Float64s, but panics if an error occurs.
|
||||||
|
func (s *selector) Float64sX(ctx context.Context) []float64 {
|
||||||
|
v, err := s.Float64s(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 returns a single float64 from a selector. It is only allowed when selecting one field.
|
||||||
|
func (s *selector) Float64(ctx context.Context) (_ float64, err error) {
|
||||||
|
var v []float64
|
||||||
|
if v, err = s.Float64s(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(v) {
|
||||||
|
case 1:
|
||||||
|
return v[0], nil
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{s.label}
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64X is like Float64, but panics if an error occurs.
|
||||||
|
func (s *selector) Float64X(ctx context.Context) float64 {
|
||||||
|
v, err := s.Float64(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bools returns list of bools from a selector. It is only allowed when selecting one field.
|
||||||
|
func (s *selector) Bools(ctx context.Context) ([]bool, error) {
|
||||||
|
if len(*s.flds) > 1 {
|
||||||
|
return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field")
|
||||||
|
}
|
||||||
|
var v []bool
|
||||||
|
if err := s.scan(ctx, &v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolsX is like Bools, but panics if an error occurs.
|
||||||
|
func (s *selector) BoolsX(ctx context.Context) []bool {
|
||||||
|
v, err := s.Bools(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns a single bool from a selector. It is only allowed when selecting one field.
|
||||||
|
func (s *selector) Bool(ctx context.Context) (_ bool, err error) {
|
||||||
|
var v []bool
|
||||||
|
if v, err = s.Bools(ctx); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(v) {
|
||||||
|
case 1:
|
||||||
|
return v[0], nil
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{s.label}
|
||||||
|
default:
|
||||||
|
err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v))
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolX is like Bool, but panics if an error occurs.
|
||||||
|
func (s *selector) BoolX(ctx context.Context) bool {
|
||||||
|
v, err := s.Bool(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// withHooks invokes the builder operation with the given hooks, if any.
|
||||||
|
func withHooks[V Value, M any, PM interface {
|
||||||
|
*M
|
||||||
|
Mutation
|
||||||
|
}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) {
|
||||||
|
if len(hooks) == 0 {
|
||||||
|
return exec(ctx)
|
||||||
|
}
|
||||||
|
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||||
|
mutationT, ok := any(m).(PM)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||||
|
}
|
||||||
|
// Set the mutation to the builder.
|
||||||
|
*mutation = *mutationT
|
||||||
|
return exec(ctx)
|
||||||
|
})
|
||||||
|
for i := len(hooks) - 1; i >= 0; i-- {
|
||||||
|
if hooks[i] == nil {
|
||||||
|
return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
mut = hooks[i](mut)
|
||||||
|
}
|
||||||
|
v, err := mut.Mutate(ctx, mutation)
|
||||||
|
if err != nil {
|
||||||
|
return value, err
|
||||||
|
}
|
||||||
|
nv, ok := v.(V)
|
||||||
|
if !ok {
|
||||||
|
return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation)
|
||||||
|
}
|
||||||
|
return nv, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist.
|
||||||
|
func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context {
|
||||||
|
if ent.QueryFromContext(ctx) == nil {
|
||||||
|
qc.Op = op
|
||||||
|
ctx = ent.NewQueryContext(ctx, qc)
|
||||||
|
}
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func querierAll[V Value, Q interface {
|
||||||
|
sqlAll(context.Context, ...queryHook) (V, error)
|
||||||
|
}]() Querier {
|
||||||
|
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||||
|
query, ok := q.(Q)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||||
|
}
|
||||||
|
return query.sqlAll(ctx)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func querierCount[Q interface {
|
||||||
|
sqlCount(context.Context) (int, error)
|
||||||
|
}]() Querier {
|
||||||
|
return QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||||
|
query, ok := q.(Q)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||||
|
}
|
||||||
|
return query.sqlCount(ctx)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) {
|
||||||
|
for i := len(inters) - 1; i >= 0; i-- {
|
||||||
|
qr = inters[i].Intercept(qr)
|
||||||
|
}
|
||||||
|
rv, err := qr.Query(ctx, q)
|
||||||
|
if err != nil {
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
vt, ok := rv.(V)
|
||||||
|
if !ok {
|
||||||
|
return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v)
|
||||||
|
}
|
||||||
|
return vt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func scanWithInterceptors[Q1 ent.Query, Q2 interface {
|
||||||
|
sqlScan(context.Context, Q1, any) error
|
||||||
|
}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) {
|
||||||
|
query, ok := q.(Q1)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T", q)
|
||||||
|
}
|
||||||
|
if err := selectOrGroup.sqlScan(ctx, query, v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() {
|
||||||
|
return rv.Elem().Interface(), nil
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
})
|
||||||
|
for i := len(inters) - 1; i >= 0; i-- {
|
||||||
|
qr = inters[i].Intercept(qr)
|
||||||
|
}
|
||||||
|
vv, err := qr.Query(ctx, rootQuery)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch rv2 := reflect.ValueOf(vv); {
|
||||||
|
case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer:
|
||||||
|
case rv.Type() == rv2.Type():
|
||||||
|
rv.Elem().Set(rv2.Elem())
|
||||||
|
case rv.Elem().Type() == rv2.Type():
|
||||||
|
rv.Elem().Set(rv2)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// queryHook describes an internal hook for the different sqlAll methods.
|
||||||
|
type queryHook func(context.Context, *sqlgraph.QuerySpec)
|
||||||
84
backend/ent/enttest/enttest.go
Normal file
84
backend/ent/enttest/enttest.go
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package enttest
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent"
|
||||||
|
// required by schema hooks.
|
||||||
|
_ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql/schema"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/migrate"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// TestingT is the interface that is shared between
|
||||||
|
// testing.T and testing.B and used by enttest.
|
||||||
|
TestingT interface {
|
||||||
|
FailNow()
|
||||||
|
Error(...any)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option configures client creation.
|
||||||
|
Option func(*options)
|
||||||
|
|
||||||
|
options struct {
|
||||||
|
opts []ent.Option
|
||||||
|
migrateOpts []schema.MigrateOption
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// WithOptions forwards options to client creation.
|
||||||
|
func WithOptions(opts ...ent.Option) Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.opts = append(o.opts, opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMigrateOptions forwards options to auto migration.
|
||||||
|
func WithMigrateOptions(opts ...schema.MigrateOption) Option {
|
||||||
|
return func(o *options) {
|
||||||
|
o.migrateOpts = append(o.migrateOpts, opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOptions(opts []Option) *options {
|
||||||
|
o := &options{}
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(o)
|
||||||
|
}
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open calls ent.Open and auto-run migration.
|
||||||
|
func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client {
|
||||||
|
o := newOptions(opts)
|
||||||
|
c, err := ent.Open(driverName, dataSourceName, o.opts...)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
migrateSchema(t, c, o)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient calls ent.NewClient and auto-run migration.
|
||||||
|
func NewClient(t TestingT, opts ...Option) *ent.Client {
|
||||||
|
o := newOptions(opts)
|
||||||
|
c := ent.NewClient(o.opts...)
|
||||||
|
migrateSchema(t, c, o)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
func migrateSchema(t TestingT, c *ent.Client, o *options) {
|
||||||
|
tables, err := schema.CopyTables(migrate.Tables)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
6
backend/ent/generate.go
Normal file
6
backend/ent/generate.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
// Package ent provides the generated ORM code for database entities.
|
||||||
|
package ent
|
||||||
|
|
||||||
|
// 启用 sql/execquery 以生成 ExecContext/QueryContext 的透传接口,便于事务内执行原生 SQL。
|
||||||
|
// 启用 sql/lock 以支持 FOR UPDATE 行锁。
|
||||||
|
//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/upsert,intercept,sql/execquery,sql/lock --idtype int64 ./schema
|
||||||
473
backend/ent/group.go
Normal file
473
backend/ent/group.go
Normal file
@@ -0,0 +1,473 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Group is the model entity for the Group schema.
|
||||||
|
type Group struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// DeletedAt holds the value of the "deleted_at" field.
|
||||||
|
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||||
|
// Name holds the value of the "name" field.
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
// Description holds the value of the "description" field.
|
||||||
|
Description *string `json:"description,omitempty"`
|
||||||
|
// RateMultiplier holds the value of the "rate_multiplier" field.
|
||||||
|
RateMultiplier float64 `json:"rate_multiplier,omitempty"`
|
||||||
|
// IsExclusive holds the value of the "is_exclusive" field.
|
||||||
|
IsExclusive bool `json:"is_exclusive,omitempty"`
|
||||||
|
// Status holds the value of the "status" field.
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// Platform holds the value of the "platform" field.
|
||||||
|
Platform string `json:"platform,omitempty"`
|
||||||
|
// SubscriptionType holds the value of the "subscription_type" field.
|
||||||
|
SubscriptionType string `json:"subscription_type,omitempty"`
|
||||||
|
// DailyLimitUsd holds the value of the "daily_limit_usd" field.
|
||||||
|
DailyLimitUsd *float64 `json:"daily_limit_usd,omitempty"`
|
||||||
|
// WeeklyLimitUsd holds the value of the "weekly_limit_usd" field.
|
||||||
|
WeeklyLimitUsd *float64 `json:"weekly_limit_usd,omitempty"`
|
||||||
|
// MonthlyLimitUsd holds the value of the "monthly_limit_usd" field.
|
||||||
|
MonthlyLimitUsd *float64 `json:"monthly_limit_usd,omitempty"`
|
||||||
|
// DefaultValidityDays holds the value of the "default_validity_days" field.
|
||||||
|
DefaultValidityDays int `json:"default_validity_days,omitempty"`
|
||||||
|
// ImagePrice1k holds the value of the "image_price_1k" field.
|
||||||
|
ImagePrice1k *float64 `json:"image_price_1k,omitempty"`
|
||||||
|
// ImagePrice2k holds the value of the "image_price_2k" field.
|
||||||
|
ImagePrice2k *float64 `json:"image_price_2k,omitempty"`
|
||||||
|
// ImagePrice4k holds the value of the "image_price_4k" field.
|
||||||
|
ImagePrice4k *float64 `json:"image_price_4k,omitempty"`
|
||||||
|
// 是否仅允许 Claude Code 客户端
|
||||||
|
ClaudeCodeOnly bool `json:"claude_code_only,omitempty"`
|
||||||
|
// 非 Claude Code 请求降级使用的分组 ID
|
||||||
|
FallbackGroupID *int64 `json:"fallback_group_id,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the GroupQuery when eager-loading is set.
|
||||||
|
Edges GroupEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type GroupEdges struct {
|
||||||
|
// APIKeys holds the value of the api_keys edge.
|
||||||
|
APIKeys []*APIKey `json:"api_keys,omitempty"`
|
||||||
|
// RedeemCodes holds the value of the redeem_codes edge.
|
||||||
|
RedeemCodes []*RedeemCode `json:"redeem_codes,omitempty"`
|
||||||
|
// Subscriptions holds the value of the subscriptions edge.
|
||||||
|
Subscriptions []*UserSubscription `json:"subscriptions,omitempty"`
|
||||||
|
// UsageLogs holds the value of the usage_logs edge.
|
||||||
|
UsageLogs []*UsageLog `json:"usage_logs,omitempty"`
|
||||||
|
// Accounts holds the value of the accounts edge.
|
||||||
|
Accounts []*Account `json:"accounts,omitempty"`
|
||||||
|
// AllowedUsers holds the value of the allowed_users edge.
|
||||||
|
AllowedUsers []*User `json:"allowed_users,omitempty"`
|
||||||
|
// AccountGroups holds the value of the account_groups edge.
|
||||||
|
AccountGroups []*AccountGroup `json:"account_groups,omitempty"`
|
||||||
|
// UserAllowedGroups holds the value of the user_allowed_groups edge.
|
||||||
|
UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [8]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// APIKeysOrErr returns the APIKeys value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e GroupEdges) APIKeysOrErr() ([]*APIKey, error) {
|
||||||
|
if e.loadedTypes[0] {
|
||||||
|
return e.APIKeys, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "api_keys"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedeemCodesOrErr returns the RedeemCodes value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e GroupEdges) RedeemCodesOrErr() ([]*RedeemCode, error) {
|
||||||
|
if e.loadedTypes[1] {
|
||||||
|
return e.RedeemCodes, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "redeem_codes"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubscriptionsOrErr returns the Subscriptions value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e GroupEdges) SubscriptionsOrErr() ([]*UserSubscription, error) {
|
||||||
|
if e.loadedTypes[2] {
|
||||||
|
return e.Subscriptions, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "subscriptions"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageLogsOrErr returns the UsageLogs value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e GroupEdges) UsageLogsOrErr() ([]*UsageLog, error) {
|
||||||
|
if e.loadedTypes[3] {
|
||||||
|
return e.UsageLogs, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "usage_logs"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountsOrErr returns the Accounts value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e GroupEdges) AccountsOrErr() ([]*Account, error) {
|
||||||
|
if e.loadedTypes[4] {
|
||||||
|
return e.Accounts, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "accounts"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllowedUsersOrErr returns the AllowedUsers value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e GroupEdges) AllowedUsersOrErr() ([]*User, error) {
|
||||||
|
if e.loadedTypes[5] {
|
||||||
|
return e.AllowedUsers, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "allowed_users"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountGroupsOrErr returns the AccountGroups value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e GroupEdges) AccountGroupsOrErr() ([]*AccountGroup, error) {
|
||||||
|
if e.loadedTypes[6] {
|
||||||
|
return e.AccountGroups, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "account_groups"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e GroupEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) {
|
||||||
|
if e.loadedTypes[7] {
|
||||||
|
return e.UserAllowedGroups, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "user_allowed_groups"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*Group) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case group.FieldIsExclusive, group.FieldClaudeCodeOnly:
|
||||||
|
values[i] = new(sql.NullBool)
|
||||||
|
case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k:
|
||||||
|
values[i] = new(sql.NullFloat64)
|
||||||
|
case group.FieldID, group.FieldDefaultValidityDays, group.FieldFallbackGroupID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case group.FieldName, group.FieldDescription, group.FieldStatus, group.FieldPlatform, group.FieldSubscriptionType:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case group.FieldCreatedAt, group.FieldUpdatedAt, group.FieldDeletedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the Group fields.
|
||||||
|
func (_m *Group) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case group.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case group.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case group.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case group.FieldDeletedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DeletedAt = new(time.Time)
|
||||||
|
*_m.DeletedAt = value.Time
|
||||||
|
}
|
||||||
|
case group.FieldName:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Name = value.String
|
||||||
|
}
|
||||||
|
case group.FieldDescription:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field description", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Description = new(string)
|
||||||
|
*_m.Description = value.String
|
||||||
|
}
|
||||||
|
case group.FieldRateMultiplier:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field rate_multiplier", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.RateMultiplier = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldIsExclusive:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field is_exclusive", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.IsExclusive = value.Bool
|
||||||
|
}
|
||||||
|
case group.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case group.FieldPlatform:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field platform", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Platform = value.String
|
||||||
|
}
|
||||||
|
case group.FieldSubscriptionType:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field subscription_type", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.SubscriptionType = value.String
|
||||||
|
}
|
||||||
|
case group.FieldDailyLimitUsd:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field daily_limit_usd", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DailyLimitUsd = new(float64)
|
||||||
|
*_m.DailyLimitUsd = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldWeeklyLimitUsd:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field weekly_limit_usd", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.WeeklyLimitUsd = new(float64)
|
||||||
|
*_m.WeeklyLimitUsd = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldMonthlyLimitUsd:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field monthly_limit_usd", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.MonthlyLimitUsd = new(float64)
|
||||||
|
*_m.MonthlyLimitUsd = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldDefaultValidityDays:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field default_validity_days", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DefaultValidityDays = int(value.Int64)
|
||||||
|
}
|
||||||
|
case group.FieldImagePrice1k:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field image_price_1k", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ImagePrice1k = new(float64)
|
||||||
|
*_m.ImagePrice1k = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldImagePrice2k:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field image_price_2k", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ImagePrice2k = new(float64)
|
||||||
|
*_m.ImagePrice2k = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldImagePrice4k:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field image_price_4k", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ImagePrice4k = new(float64)
|
||||||
|
*_m.ImagePrice4k = value.Float64
|
||||||
|
}
|
||||||
|
case group.FieldClaudeCodeOnly:
|
||||||
|
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field claude_code_only", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ClaudeCodeOnly = value.Bool
|
||||||
|
}
|
||||||
|
case group.FieldFallbackGroupID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field fallback_group_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.FallbackGroupID = new(int64)
|
||||||
|
*_m.FallbackGroupID = value.Int64
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the Group.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *Group) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAPIKeys queries the "api_keys" edge of the Group entity.
|
||||||
|
func (_m *Group) QueryAPIKeys() *APIKeyQuery {
|
||||||
|
return NewGroupClient(_m.config).QueryAPIKeys(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryRedeemCodes queries the "redeem_codes" edge of the Group entity.
|
||||||
|
func (_m *Group) QueryRedeemCodes() *RedeemCodeQuery {
|
||||||
|
return NewGroupClient(_m.config).QueryRedeemCodes(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QuerySubscriptions queries the "subscriptions" edge of the Group entity.
|
||||||
|
func (_m *Group) QuerySubscriptions() *UserSubscriptionQuery {
|
||||||
|
return NewGroupClient(_m.config).QuerySubscriptions(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageLogs queries the "usage_logs" edge of the Group entity.
|
||||||
|
func (_m *Group) QueryUsageLogs() *UsageLogQuery {
|
||||||
|
return NewGroupClient(_m.config).QueryUsageLogs(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccounts queries the "accounts" edge of the Group entity.
|
||||||
|
func (_m *Group) QueryAccounts() *AccountQuery {
|
||||||
|
return NewGroupClient(_m.config).QueryAccounts(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAllowedUsers queries the "allowed_users" edge of the Group entity.
|
||||||
|
func (_m *Group) QueryAllowedUsers() *UserQuery {
|
||||||
|
return NewGroupClient(_m.config).QueryAllowedUsers(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccountGroups queries the "account_groups" edge of the Group entity.
|
||||||
|
func (_m *Group) QueryAccountGroups() *AccountGroupQuery {
|
||||||
|
return NewGroupClient(_m.config).QueryAccountGroups(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUserAllowedGroups queries the "user_allowed_groups" edge of the Group entity.
|
||||||
|
func (_m *Group) QueryUserAllowedGroups() *UserAllowedGroupQuery {
|
||||||
|
return NewGroupClient(_m.config).QueryUserAllowedGroups(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this Group.
|
||||||
|
// Note that you need to call Group.Unwrap() before calling this method if this Group
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *Group) Update() *GroupUpdateOne {
|
||||||
|
return NewGroupClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the Group entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *Group) Unwrap() *Group {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: Group is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *Group) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("Group(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.DeletedAt; v != nil {
|
||||||
|
builder.WriteString("deleted_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("name=")
|
||||||
|
builder.WriteString(_m.Name)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Description; v != nil {
|
||||||
|
builder.WriteString("description=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("rate_multiplier=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("is_exclusive=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.IsExclusive))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("platform=")
|
||||||
|
builder.WriteString(_m.Platform)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("subscription_type=")
|
||||||
|
builder.WriteString(_m.SubscriptionType)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.DailyLimitUsd; v != nil {
|
||||||
|
builder.WriteString("daily_limit_usd=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.WeeklyLimitUsd; v != nil {
|
||||||
|
builder.WriteString("weekly_limit_usd=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.MonthlyLimitUsd; v != nil {
|
||||||
|
builder.WriteString("monthly_limit_usd=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("default_validity_days=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.DefaultValidityDays))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ImagePrice1k; v != nil {
|
||||||
|
builder.WriteString("image_price_1k=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ImagePrice2k; v != nil {
|
||||||
|
builder.WriteString("image_price_2k=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ImagePrice4k; v != nil {
|
||||||
|
builder.WriteString("image_price_4k=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("claude_code_only=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.ClaudeCodeOnly))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.FallbackGroupID; v != nil {
|
||||||
|
builder.WriteString("fallback_group_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Groups is a parsable slice of Group.
|
||||||
|
type Groups []*Group
|
||||||
478
backend/ent/group/group.go
Normal file
478
backend/ent/group/group.go
Normal file
@@ -0,0 +1,478 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package group
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the group type in the database.
|
||||||
|
Label = "group"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||||
|
FieldDeletedAt = "deleted_at"
|
||||||
|
// FieldName holds the string denoting the name field in the database.
|
||||||
|
FieldName = "name"
|
||||||
|
// FieldDescription holds the string denoting the description field in the database.
|
||||||
|
FieldDescription = "description"
|
||||||
|
// FieldRateMultiplier holds the string denoting the rate_multiplier field in the database.
|
||||||
|
FieldRateMultiplier = "rate_multiplier"
|
||||||
|
// FieldIsExclusive holds the string denoting the is_exclusive field in the database.
|
||||||
|
FieldIsExclusive = "is_exclusive"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldPlatform holds the string denoting the platform field in the database.
|
||||||
|
FieldPlatform = "platform"
|
||||||
|
// FieldSubscriptionType holds the string denoting the subscription_type field in the database.
|
||||||
|
FieldSubscriptionType = "subscription_type"
|
||||||
|
// FieldDailyLimitUsd holds the string denoting the daily_limit_usd field in the database.
|
||||||
|
FieldDailyLimitUsd = "daily_limit_usd"
|
||||||
|
// FieldWeeklyLimitUsd holds the string denoting the weekly_limit_usd field in the database.
|
||||||
|
FieldWeeklyLimitUsd = "weekly_limit_usd"
|
||||||
|
// FieldMonthlyLimitUsd holds the string denoting the monthly_limit_usd field in the database.
|
||||||
|
FieldMonthlyLimitUsd = "monthly_limit_usd"
|
||||||
|
// FieldDefaultValidityDays holds the string denoting the default_validity_days field in the database.
|
||||||
|
FieldDefaultValidityDays = "default_validity_days"
|
||||||
|
// FieldImagePrice1k holds the string denoting the image_price_1k field in the database.
|
||||||
|
FieldImagePrice1k = "image_price_1k"
|
||||||
|
// FieldImagePrice2k holds the string denoting the image_price_2k field in the database.
|
||||||
|
FieldImagePrice2k = "image_price_2k"
|
||||||
|
// FieldImagePrice4k holds the string denoting the image_price_4k field in the database.
|
||||||
|
FieldImagePrice4k = "image_price_4k"
|
||||||
|
// FieldClaudeCodeOnly holds the string denoting the claude_code_only field in the database.
|
||||||
|
FieldClaudeCodeOnly = "claude_code_only"
|
||||||
|
// FieldFallbackGroupID holds the string denoting the fallback_group_id field in the database.
|
||||||
|
FieldFallbackGroupID = "fallback_group_id"
|
||||||
|
// EdgeAPIKeys holds the string denoting the api_keys edge name in mutations.
|
||||||
|
EdgeAPIKeys = "api_keys"
|
||||||
|
// EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations.
|
||||||
|
EdgeRedeemCodes = "redeem_codes"
|
||||||
|
// EdgeSubscriptions holds the string denoting the subscriptions edge name in mutations.
|
||||||
|
EdgeSubscriptions = "subscriptions"
|
||||||
|
// EdgeUsageLogs holds the string denoting the usage_logs edge name in mutations.
|
||||||
|
EdgeUsageLogs = "usage_logs"
|
||||||
|
// EdgeAccounts holds the string denoting the accounts edge name in mutations.
|
||||||
|
EdgeAccounts = "accounts"
|
||||||
|
// EdgeAllowedUsers holds the string denoting the allowed_users edge name in mutations.
|
||||||
|
EdgeAllowedUsers = "allowed_users"
|
||||||
|
// EdgeAccountGroups holds the string denoting the account_groups edge name in mutations.
|
||||||
|
EdgeAccountGroups = "account_groups"
|
||||||
|
// EdgeUserAllowedGroups holds the string denoting the user_allowed_groups edge name in mutations.
|
||||||
|
EdgeUserAllowedGroups = "user_allowed_groups"
|
||||||
|
// Table holds the table name of the group in the database.
|
||||||
|
Table = "groups"
|
||||||
|
// APIKeysTable is the table that holds the api_keys relation/edge.
|
||||||
|
APIKeysTable = "api_keys"
|
||||||
|
// APIKeysInverseTable is the table name for the APIKey entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "apikey" package.
|
||||||
|
APIKeysInverseTable = "api_keys"
|
||||||
|
// APIKeysColumn is the table column denoting the api_keys relation/edge.
|
||||||
|
APIKeysColumn = "group_id"
|
||||||
|
// RedeemCodesTable is the table that holds the redeem_codes relation/edge.
|
||||||
|
RedeemCodesTable = "redeem_codes"
|
||||||
|
// RedeemCodesInverseTable is the table name for the RedeemCode entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "redeemcode" package.
|
||||||
|
RedeemCodesInverseTable = "redeem_codes"
|
||||||
|
// RedeemCodesColumn is the table column denoting the redeem_codes relation/edge.
|
||||||
|
RedeemCodesColumn = "group_id"
|
||||||
|
// SubscriptionsTable is the table that holds the subscriptions relation/edge.
|
||||||
|
SubscriptionsTable = "user_subscriptions"
|
||||||
|
// SubscriptionsInverseTable is the table name for the UserSubscription entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "usersubscription" package.
|
||||||
|
SubscriptionsInverseTable = "user_subscriptions"
|
||||||
|
// SubscriptionsColumn is the table column denoting the subscriptions relation/edge.
|
||||||
|
SubscriptionsColumn = "group_id"
|
||||||
|
// UsageLogsTable is the table that holds the usage_logs relation/edge.
|
||||||
|
UsageLogsTable = "usage_logs"
|
||||||
|
// UsageLogsInverseTable is the table name for the UsageLog entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "usagelog" package.
|
||||||
|
UsageLogsInverseTable = "usage_logs"
|
||||||
|
// UsageLogsColumn is the table column denoting the usage_logs relation/edge.
|
||||||
|
UsageLogsColumn = "group_id"
|
||||||
|
// AccountsTable is the table that holds the accounts relation/edge. The primary key declared below.
|
||||||
|
AccountsTable = "account_groups"
|
||||||
|
// AccountsInverseTable is the table name for the Account entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "account" package.
|
||||||
|
AccountsInverseTable = "accounts"
|
||||||
|
// AllowedUsersTable is the table that holds the allowed_users relation/edge. The primary key declared below.
|
||||||
|
AllowedUsersTable = "user_allowed_groups"
|
||||||
|
// AllowedUsersInverseTable is the table name for the User entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||||
|
AllowedUsersInverseTable = "users"
|
||||||
|
// AccountGroupsTable is the table that holds the account_groups relation/edge.
|
||||||
|
AccountGroupsTable = "account_groups"
|
||||||
|
// AccountGroupsInverseTable is the table name for the AccountGroup entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "accountgroup" package.
|
||||||
|
AccountGroupsInverseTable = "account_groups"
|
||||||
|
// AccountGroupsColumn is the table column denoting the account_groups relation/edge.
|
||||||
|
AccountGroupsColumn = "group_id"
|
||||||
|
// UserAllowedGroupsTable is the table that holds the user_allowed_groups relation/edge.
|
||||||
|
UserAllowedGroupsTable = "user_allowed_groups"
|
||||||
|
// UserAllowedGroupsInverseTable is the table name for the UserAllowedGroup entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "userallowedgroup" package.
|
||||||
|
UserAllowedGroupsInverseTable = "user_allowed_groups"
|
||||||
|
// UserAllowedGroupsColumn is the table column denoting the user_allowed_groups relation/edge.
|
||||||
|
UserAllowedGroupsColumn = "group_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for group fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldDeletedAt,
|
||||||
|
FieldName,
|
||||||
|
FieldDescription,
|
||||||
|
FieldRateMultiplier,
|
||||||
|
FieldIsExclusive,
|
||||||
|
FieldStatus,
|
||||||
|
FieldPlatform,
|
||||||
|
FieldSubscriptionType,
|
||||||
|
FieldDailyLimitUsd,
|
||||||
|
FieldWeeklyLimitUsd,
|
||||||
|
FieldMonthlyLimitUsd,
|
||||||
|
FieldDefaultValidityDays,
|
||||||
|
FieldImagePrice1k,
|
||||||
|
FieldImagePrice2k,
|
||||||
|
FieldImagePrice4k,
|
||||||
|
FieldClaudeCodeOnly,
|
||||||
|
FieldFallbackGroupID,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// AccountsPrimaryKey and AccountsColumn2 are the table columns denoting the
|
||||||
|
// primary key for the accounts relation (M2M).
|
||||||
|
AccountsPrimaryKey = []string{"account_id", "group_id"}
|
||||||
|
// AllowedUsersPrimaryKey and AllowedUsersColumn2 are the table columns denoting the
|
||||||
|
// primary key for the allowed_users relation (M2M).
|
||||||
|
AllowedUsersPrimaryKey = []string{"user_id", "group_id"}
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the variables below are initialized by the runtime
|
||||||
|
// package on the initialization of the application. Therefore,
|
||||||
|
// it should be imported in the main as follows:
|
||||||
|
//
|
||||||
|
// import _ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
var (
|
||||||
|
Hooks [1]ent.Hook
|
||||||
|
Interceptors [1]ent.Interceptor
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
NameValidator func(string) error
|
||||||
|
// DefaultRateMultiplier holds the default value on creation for the "rate_multiplier" field.
|
||||||
|
DefaultRateMultiplier float64
|
||||||
|
// DefaultIsExclusive holds the default value on creation for the "is_exclusive" field.
|
||||||
|
DefaultIsExclusive bool
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
// DefaultPlatform holds the default value on creation for the "platform" field.
|
||||||
|
DefaultPlatform string
|
||||||
|
// PlatformValidator is a validator for the "platform" field. It is called by the builders before save.
|
||||||
|
PlatformValidator func(string) error
|
||||||
|
// DefaultSubscriptionType holds the default value on creation for the "subscription_type" field.
|
||||||
|
DefaultSubscriptionType string
|
||||||
|
// SubscriptionTypeValidator is a validator for the "subscription_type" field. It is called by the builders before save.
|
||||||
|
SubscriptionTypeValidator func(string) error
|
||||||
|
// DefaultDefaultValidityDays holds the default value on creation for the "default_validity_days" field.
|
||||||
|
DefaultDefaultValidityDays int
|
||||||
|
// DefaultClaudeCodeOnly holds the default value on creation for the "claude_code_only" field.
|
||||||
|
DefaultClaudeCodeOnly bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the Group queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDeletedAt orders the results by the deleted_at field.
|
||||||
|
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByName orders the results by the name field.
|
||||||
|
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDescription orders the results by the description field.
|
||||||
|
func ByDescription(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDescription, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRateMultiplier orders the results by the rate_multiplier field.
|
||||||
|
func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByIsExclusive orders the results by the is_exclusive field.
|
||||||
|
func ByIsExclusive(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldIsExclusive, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPlatform orders the results by the platform field.
|
||||||
|
func ByPlatform(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPlatform, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySubscriptionType orders the results by the subscription_type field.
|
||||||
|
func BySubscriptionType(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldSubscriptionType, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDailyLimitUsd orders the results by the daily_limit_usd field.
|
||||||
|
func ByDailyLimitUsd(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDailyLimitUsd, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByWeeklyLimitUsd orders the results by the weekly_limit_usd field.
|
||||||
|
func ByWeeklyLimitUsd(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldWeeklyLimitUsd, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByMonthlyLimitUsd orders the results by the monthly_limit_usd field.
|
||||||
|
func ByMonthlyLimitUsd(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldMonthlyLimitUsd, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDefaultValidityDays orders the results by the default_validity_days field.
|
||||||
|
func ByDefaultValidityDays(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDefaultValidityDays, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByImagePrice1k orders the results by the image_price_1k field.
|
||||||
|
func ByImagePrice1k(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldImagePrice1k, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByImagePrice2k orders the results by the image_price_2k field.
|
||||||
|
func ByImagePrice2k(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldImagePrice2k, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByImagePrice4k orders the results by the image_price_4k field.
|
||||||
|
func ByImagePrice4k(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldImagePrice4k, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByClaudeCodeOnly orders the results by the claude_code_only field.
|
||||||
|
func ByClaudeCodeOnly(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldClaudeCodeOnly, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByFallbackGroupID orders the results by the fallback_group_id field.
|
||||||
|
func ByFallbackGroupID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldFallbackGroupID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAPIKeysCount orders the results by api_keys count.
|
||||||
|
func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newAPIKeysStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAPIKeys orders the results by api_keys terms.
|
||||||
|
func ByAPIKeys(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAPIKeysStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRedeemCodesCount orders the results by redeem_codes count.
|
||||||
|
func ByRedeemCodesCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newRedeemCodesStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByRedeemCodes orders the results by redeem_codes terms.
|
||||||
|
func ByRedeemCodes(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newRedeemCodesStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySubscriptionsCount orders the results by subscriptions count.
|
||||||
|
func BySubscriptionsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newSubscriptionsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BySubscriptions orders the results by subscriptions terms.
|
||||||
|
func BySubscriptions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newSubscriptionsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogsCount orders the results by usage_logs count.
|
||||||
|
func ByUsageLogsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newUsageLogsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageLogs orders the results by usage_logs terms.
|
||||||
|
func ByUsageLogs(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUsageLogsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountsCount orders the results by accounts count.
|
||||||
|
func ByAccountsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newAccountsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccounts orders the results by accounts terms.
|
||||||
|
func ByAccounts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAccountsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAllowedUsersCount orders the results by allowed_users count.
|
||||||
|
func ByAllowedUsersCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newAllowedUsersStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAllowedUsers orders the results by allowed_users terms.
|
||||||
|
func ByAllowedUsers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAllowedUsersStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountGroupsCount orders the results by account_groups count.
|
||||||
|
func ByAccountGroupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newAccountGroupsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountGroups orders the results by account_groups terms.
|
||||||
|
func ByAccountGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAccountGroupsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserAllowedGroupsCount orders the results by user_allowed_groups count.
|
||||||
|
func ByUserAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newUserAllowedGroupsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserAllowedGroups orders the results by user_allowed_groups terms.
|
||||||
|
func ByUserAllowedGroups(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUserAllowedGroupsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newAPIKeysStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(APIKeysInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, APIKeysTable, APIKeysColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newRedeemCodesStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(RedeemCodesInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, RedeemCodesTable, RedeemCodesColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newSubscriptionsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(SubscriptionsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, SubscriptionsTable, SubscriptionsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newUsageLogsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UsageLogsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageLogsTable, UsageLogsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newAccountsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(AccountsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2M, true, AccountsTable, AccountsPrimaryKey...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newAllowedUsersStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(AllowedUsersInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2M, true, AllowedUsersTable, AllowedUsersPrimaryKey...),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newAccountGroupsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(AccountGroupsInverseTable, AccountGroupsColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, true, AccountGroupsTable, AccountGroupsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newUserAllowedGroupsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UserAllowedGroupsInverseTable, UserAllowedGroupsColumn),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, true, UserAllowedGroupsTable, UserAllowedGroupsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
1265
backend/ent/group/where.go
Normal file
1265
backend/ent/group/where.go
Normal file
File diff suppressed because it is too large
Load Diff
2129
backend/ent/group_create.go
Normal file
2129
backend/ent/group_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/group_delete.go
Normal file
88
backend/ent/group_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GroupDelete is the builder for deleting a Group entity.
|
||||||
|
type GroupDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *GroupMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the GroupDelete builder.
|
||||||
|
func (_d *GroupDelete) Where(ps ...predicate.Group) *GroupDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *GroupDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *GroupDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *GroupDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(group.Table, sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupDeleteOne is the builder for deleting a single Group entity.
|
||||||
|
type GroupDeleteOne struct {
|
||||||
|
_d *GroupDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the GroupDelete builder.
|
||||||
|
func (_d *GroupDeleteOne) Where(ps ...predicate.Group) *GroupDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *GroupDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{group.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *GroupDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
1232
backend/ent/group_query.go
Normal file
1232
backend/ent/group_query.go
Normal file
File diff suppressed because it is too large
Load Diff
2226
backend/ent/group_update.go
Normal file
2226
backend/ent/group_update.go
Normal file
File diff suppressed because it is too large
Load Diff
367
backend/ent/hook/hook.go
Normal file
367
backend/ent/hook/hook.go
Normal file
@@ -0,0 +1,367 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package hook
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The APIKeyFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as APIKey mutator.
|
||||||
|
type APIKeyFunc func(context.Context, *ent.APIKeyMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f APIKeyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.APIKeyMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.APIKeyMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The AccountFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as Account mutator.
|
||||||
|
type AccountFunc func(context.Context, *ent.AccountMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f AccountFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.AccountMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AccountMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The AccountGroupFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as AccountGroup mutator.
|
||||||
|
type AccountGroupFunc func(context.Context, *ent.AccountGroupMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f AccountGroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.AccountGroupMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AccountGroupMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The GroupFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as Group mutator.
|
||||||
|
type GroupFunc func(context.Context, *ent.GroupMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.GroupMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The PromoCodeFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as PromoCode mutator.
|
||||||
|
type PromoCodeFunc func(context.Context, *ent.PromoCodeMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f PromoCodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.PromoCodeMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PromoCodeMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The PromoCodeUsageFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as PromoCodeUsage mutator.
|
||||||
|
type PromoCodeUsageFunc func(context.Context, *ent.PromoCodeUsageMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f PromoCodeUsageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.PromoCodeUsageMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PromoCodeUsageMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ProxyFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as Proxy mutator.
|
||||||
|
type ProxyFunc func(context.Context, *ent.ProxyMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f ProxyFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.ProxyMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ProxyMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The RedeemCodeFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as RedeemCode mutator.
|
||||||
|
type RedeemCodeFunc func(context.Context, *ent.RedeemCodeMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f RedeemCodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.RedeemCodeMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.RedeemCodeMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The SettingFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as Setting mutator.
|
||||||
|
type SettingFunc func(context.Context, *ent.SettingMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f SettingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.SettingMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.SettingMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UsageLogFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as UsageLog mutator.
|
||||||
|
type UsageLogFunc func(context.Context, *ent.UsageLogMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f UsageLogFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.UsageLogMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UsageLogMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as User mutator.
|
||||||
|
type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.UserMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserAllowedGroupFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as UserAllowedGroup mutator.
|
||||||
|
type UserAllowedGroupFunc func(context.Context, *ent.UserAllowedGroupMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f UserAllowedGroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.UserAllowedGroupMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAllowedGroupMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserAttributeDefinitionFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as UserAttributeDefinition mutator.
|
||||||
|
type UserAttributeDefinitionFunc func(context.Context, *ent.UserAttributeDefinitionMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f UserAttributeDefinitionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.UserAttributeDefinitionMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAttributeDefinitionMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserAttributeValueFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as UserAttributeValue mutator.
|
||||||
|
type UserAttributeValueFunc func(context.Context, *ent.UserAttributeValueMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f UserAttributeValueFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.UserAttributeValueMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserAttributeValueMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserSubscriptionFunc type is an adapter to allow the use of ordinary
|
||||||
|
// function as UserSubscription mutator.
|
||||||
|
type UserSubscriptionFunc func(context.Context, *ent.UserSubscriptionMutation) (ent.Value, error)
|
||||||
|
|
||||||
|
// Mutate calls f(ctx, m).
|
||||||
|
func (f UserSubscriptionFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if mv, ok := m.(*ent.UserSubscriptionMutation); ok {
|
||||||
|
return f(ctx, mv)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserSubscriptionMutation", m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Condition is a hook condition function.
|
||||||
|
type Condition func(context.Context, ent.Mutation) bool
|
||||||
|
|
||||||
|
// And groups conditions with the AND operator.
|
||||||
|
func And(first, second Condition, rest ...Condition) Condition {
|
||||||
|
return func(ctx context.Context, m ent.Mutation) bool {
|
||||||
|
if !first(ctx, m) || !second(ctx, m) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, cond := range rest {
|
||||||
|
if !cond(ctx, m) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups conditions with the OR operator.
|
||||||
|
func Or(first, second Condition, rest ...Condition) Condition {
|
||||||
|
return func(ctx context.Context, m ent.Mutation) bool {
|
||||||
|
if first(ctx, m) || second(ctx, m) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
for _, cond := range rest {
|
||||||
|
if cond(ctx, m) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not negates a given condition.
|
||||||
|
func Not(cond Condition) Condition {
|
||||||
|
return func(ctx context.Context, m ent.Mutation) bool {
|
||||||
|
return !cond(ctx, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasOp is a condition testing mutation operation.
|
||||||
|
func HasOp(op ent.Op) Condition {
|
||||||
|
return func(_ context.Context, m ent.Mutation) bool {
|
||||||
|
return m.Op().Is(op)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAddedFields is a condition validating `.AddedField` on fields.
|
||||||
|
func HasAddedFields(field string, fields ...string) Condition {
|
||||||
|
return func(_ context.Context, m ent.Mutation) bool {
|
||||||
|
if _, exists := m.AddedField(field); !exists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, field := range fields {
|
||||||
|
if _, exists := m.AddedField(field); !exists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasClearedFields is a condition validating `.FieldCleared` on fields.
|
||||||
|
func HasClearedFields(field string, fields ...string) Condition {
|
||||||
|
return func(_ context.Context, m ent.Mutation) bool {
|
||||||
|
if exists := m.FieldCleared(field); !exists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, field := range fields {
|
||||||
|
if exists := m.FieldCleared(field); !exists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFields is a condition validating `.Field` on fields.
|
||||||
|
func HasFields(field string, fields ...string) Condition {
|
||||||
|
return func(_ context.Context, m ent.Mutation) bool {
|
||||||
|
if _, exists := m.Field(field); !exists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for _, field := range fields {
|
||||||
|
if _, exists := m.Field(field); !exists {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If executes the given hook under condition.
|
||||||
|
//
|
||||||
|
// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...)))
|
||||||
|
func If(hk ent.Hook, cond Condition) ent.Hook {
|
||||||
|
return func(next ent.Mutator) ent.Mutator {
|
||||||
|
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
if cond(ctx, m) {
|
||||||
|
return hk(next).Mutate(ctx, m)
|
||||||
|
}
|
||||||
|
return next.Mutate(ctx, m)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// On executes the given hook only for the given operation.
|
||||||
|
//
|
||||||
|
// hook.On(Log, ent.Delete|ent.Create)
|
||||||
|
func On(hk ent.Hook, op ent.Op) ent.Hook {
|
||||||
|
return If(hk, HasOp(op))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unless skips the given hook only for the given operation.
|
||||||
|
//
|
||||||
|
// hook.Unless(Log, ent.Update|ent.UpdateOne)
|
||||||
|
func Unless(hk ent.Hook, op ent.Op) ent.Hook {
|
||||||
|
return If(hk, Not(HasOp(op)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// FixedError is a hook returning a fixed error.
|
||||||
|
func FixedError(err error) ent.Hook {
|
||||||
|
return func(ent.Mutator) ent.Mutator {
|
||||||
|
return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) {
|
||||||
|
return nil, err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reject returns a hook that rejects all operations that match op.
|
||||||
|
//
|
||||||
|
// func (T) Hooks() []ent.Hook {
|
||||||
|
// return []ent.Hook{
|
||||||
|
// Reject(ent.Delete|ent.Update),
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
func Reject(op ent.Op) ent.Hook {
|
||||||
|
hk := FixedError(fmt.Errorf("%s operation is not allowed", op))
|
||||||
|
return On(hk, op)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chain acts as a list of hooks and is effectively immutable.
|
||||||
|
// Once created, it will always hold the same set of hooks in the same order.
|
||||||
|
type Chain struct {
|
||||||
|
hooks []ent.Hook
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewChain creates a new chain of hooks.
|
||||||
|
func NewChain(hooks ...ent.Hook) Chain {
|
||||||
|
return Chain{append([]ent.Hook(nil), hooks...)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hook chains the list of hooks and returns the final hook.
|
||||||
|
func (c Chain) Hook() ent.Hook {
|
||||||
|
return func(mutator ent.Mutator) ent.Mutator {
|
||||||
|
for i := len(c.hooks) - 1; i >= 0; i-- {
|
||||||
|
mutator = c.hooks[i](mutator)
|
||||||
|
}
|
||||||
|
return mutator
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append extends a chain, adding the specified hook
|
||||||
|
// as the last ones in the mutation flow.
|
||||||
|
func (c Chain) Append(hooks ...ent.Hook) Chain {
|
||||||
|
newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks))
|
||||||
|
newHooks = append(newHooks, c.hooks...)
|
||||||
|
newHooks = append(newHooks, hooks...)
|
||||||
|
return Chain{newHooks}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extend extends a chain, adding the specified chain
|
||||||
|
// as the last ones in the mutation flow.
|
||||||
|
func (c Chain) Extend(chain Chain) Chain {
|
||||||
|
return c.Append(chain.hooks...)
|
||||||
|
}
|
||||||
569
backend/ent/intercept/intercept.go
Normal file
569
backend/ent/intercept/intercept.go
Normal file
@@ -0,0 +1,569 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package intercept
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/userallowedgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/userattributedefinition"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/userattributevalue"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usersubscription"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The Query interface represents an operation that queries a graph.
|
||||||
|
// By using this interface, users can write generic code that manipulates
|
||||||
|
// query builders of different types.
|
||||||
|
type Query interface {
|
||||||
|
// Type returns the string representation of the query type.
|
||||||
|
Type() string
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
Limit(int)
|
||||||
|
// Offset to start from.
|
||||||
|
Offset(int)
|
||||||
|
// Unique configures the query builder to filter duplicate records.
|
||||||
|
Unique(bool)
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
Order(...func(*sql.Selector))
|
||||||
|
// WhereP appends storage-level predicates to the query builder. Using this method, users
|
||||||
|
// can use type-assertion to append predicates that do not depend on any generated package.
|
||||||
|
WhereP(...func(*sql.Selector))
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Func type is an adapter that allows ordinary functions to be used as interceptors.
|
||||||
|
// Unlike traversal functions, interceptors are skipped during graph traversals. Note that the
|
||||||
|
// implementation of Func is different from the one defined in entgo.io/ent.InterceptFunc.
|
||||||
|
type Func func(context.Context, Query) error
|
||||||
|
|
||||||
|
// Intercept calls f(ctx, q) and then applied the next Querier.
|
||||||
|
func (f Func) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return ent.QuerierFunc(func(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
query, err := NewQuery(q)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := f(ctx, query); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return next.Query(ctx, q)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseFunc type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
// If f is a function with the appropriate signature, TraverseFunc(f) is a Traverser that calls f.
|
||||||
|
type TraverseFunc func(context.Context, Query) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseFunc) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseFunc) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
query, err := NewQuery(q)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return f(ctx, query)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The APIKeyFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type APIKeyFunc func(context.Context, *ent.APIKeyQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f APIKeyFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.APIKeyQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.APIKeyQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseAPIKey type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseAPIKey func(context.Context, *ent.APIKeyQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseAPIKey) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseAPIKey) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.APIKeyQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.APIKeyQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The AccountFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type AccountFunc func(context.Context, *ent.AccountQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f AccountFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.AccountQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.AccountQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseAccount type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseAccount func(context.Context, *ent.AccountQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseAccount) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseAccount) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.AccountQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.AccountQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The AccountGroupFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type AccountGroupFunc func(context.Context, *ent.AccountGroupQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f AccountGroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.AccountGroupQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.AccountGroupQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseAccountGroup type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseAccountGroup func(context.Context, *ent.AccountGroupQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseAccountGroup) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseAccountGroup) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.AccountGroupQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.AccountGroupQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The GroupFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type GroupFunc func(context.Context, *ent.GroupQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f GroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.GroupQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseGroup type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseGroup func(context.Context, *ent.GroupQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseGroup) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseGroup) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.GroupQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The PromoCodeFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type PromoCodeFunc func(context.Context, *ent.PromoCodeQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f PromoCodeFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.PromoCodeQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraversePromoCode type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraversePromoCode func(context.Context, *ent.PromoCodeQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraversePromoCode) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraversePromoCode) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.PromoCodeQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The PromoCodeUsageFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type PromoCodeUsageFunc func(context.Context, *ent.PromoCodeUsageQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f PromoCodeUsageFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.PromoCodeUsageQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeUsageQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraversePromoCodeUsage type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraversePromoCodeUsage func(context.Context, *ent.PromoCodeUsageQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraversePromoCodeUsage) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraversePromoCodeUsage) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.PromoCodeUsageQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeUsageQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The ProxyFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type ProxyFunc func(context.Context, *ent.ProxyQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f ProxyFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.ProxyQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.ProxyQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseProxy type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseProxy func(context.Context, *ent.ProxyQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseProxy) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseProxy) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.ProxyQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.ProxyQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The RedeemCodeFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type RedeemCodeFunc func(context.Context, *ent.RedeemCodeQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f RedeemCodeFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.RedeemCodeQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.RedeemCodeQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseRedeemCode type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseRedeemCode func(context.Context, *ent.RedeemCodeQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseRedeemCode) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseRedeemCode) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.RedeemCodeQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.RedeemCodeQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The SettingFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type SettingFunc func(context.Context, *ent.SettingQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f SettingFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.SettingQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseSetting type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseSetting func(context.Context, *ent.SettingQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseSetting) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseSetting) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.SettingQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.SettingQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UsageLogFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type UsageLogFunc func(context.Context, *ent.UsageLogQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f UsageLogFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.UsageLogQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.UsageLogQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseUsageLog type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseUsageLog func(context.Context, *ent.UsageLogQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseUsageLog) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseUsageLog) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.UsageLogQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.UsageLogQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type UserFunc func(context.Context, *ent.UserQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f UserFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.UserQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseUser type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseUser func(context.Context, *ent.UserQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseUser) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseUser) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.UserQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.UserQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserAllowedGroupFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type UserAllowedGroupFunc func(context.Context, *ent.UserAllowedGroupQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f UserAllowedGroupFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.UserAllowedGroupQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserAllowedGroupQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseUserAllowedGroup type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseUserAllowedGroup func(context.Context, *ent.UserAllowedGroupQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseUserAllowedGroup) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseUserAllowedGroup) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.UserAllowedGroupQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.UserAllowedGroupQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserAttributeDefinitionFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type UserAttributeDefinitionFunc func(context.Context, *ent.UserAttributeDefinitionQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f UserAttributeDefinitionFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.UserAttributeDefinitionQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeDefinitionQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseUserAttributeDefinition type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseUserAttributeDefinition func(context.Context, *ent.UserAttributeDefinitionQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseUserAttributeDefinition) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseUserAttributeDefinition) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.UserAttributeDefinitionQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeDefinitionQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserAttributeValueFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type UserAttributeValueFunc func(context.Context, *ent.UserAttributeValueQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f UserAttributeValueFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.UserAttributeValueQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeValueQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseUserAttributeValue type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseUserAttributeValue func(context.Context, *ent.UserAttributeValueQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseUserAttributeValue) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseUserAttributeValue) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.UserAttributeValueQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.UserAttributeValueQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The UserSubscriptionFunc type is an adapter to allow the use of ordinary function as a Querier.
|
||||||
|
type UserSubscriptionFunc func(context.Context, *ent.UserSubscriptionQuery) (ent.Value, error)
|
||||||
|
|
||||||
|
// Query calls f(ctx, q).
|
||||||
|
func (f UserSubscriptionFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) {
|
||||||
|
if q, ok := q.(*ent.UserSubscriptionQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("unexpected query type %T. expect *ent.UserSubscriptionQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The TraverseUserSubscription type is an adapter to allow the use of ordinary function as Traverser.
|
||||||
|
type TraverseUserSubscription func(context.Context, *ent.UserSubscriptionQuery) error
|
||||||
|
|
||||||
|
// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline.
|
||||||
|
func (f TraverseUserSubscription) Intercept(next ent.Querier) ent.Querier {
|
||||||
|
return next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Traverse calls f(ctx, q).
|
||||||
|
func (f TraverseUserSubscription) Traverse(ctx context.Context, q ent.Query) error {
|
||||||
|
if q, ok := q.(*ent.UserSubscriptionQuery); ok {
|
||||||
|
return f(ctx, q)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unexpected query type %T. expect *ent.UserSubscriptionQuery", q)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQuery returns the generic Query interface for the given typed query.
|
||||||
|
func NewQuery(q ent.Query) (Query, error) {
|
||||||
|
switch q := q.(type) {
|
||||||
|
case *ent.APIKeyQuery:
|
||||||
|
return &query[*ent.APIKeyQuery, predicate.APIKey, apikey.OrderOption]{typ: ent.TypeAPIKey, tq: q}, nil
|
||||||
|
case *ent.AccountQuery:
|
||||||
|
return &query[*ent.AccountQuery, predicate.Account, account.OrderOption]{typ: ent.TypeAccount, tq: q}, nil
|
||||||
|
case *ent.AccountGroupQuery:
|
||||||
|
return &query[*ent.AccountGroupQuery, predicate.AccountGroup, accountgroup.OrderOption]{typ: ent.TypeAccountGroup, tq: q}, nil
|
||||||
|
case *ent.GroupQuery:
|
||||||
|
return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil
|
||||||
|
case *ent.PromoCodeQuery:
|
||||||
|
return &query[*ent.PromoCodeQuery, predicate.PromoCode, promocode.OrderOption]{typ: ent.TypePromoCode, tq: q}, nil
|
||||||
|
case *ent.PromoCodeUsageQuery:
|
||||||
|
return &query[*ent.PromoCodeUsageQuery, predicate.PromoCodeUsage, promocodeusage.OrderOption]{typ: ent.TypePromoCodeUsage, tq: q}, nil
|
||||||
|
case *ent.ProxyQuery:
|
||||||
|
return &query[*ent.ProxyQuery, predicate.Proxy, proxy.OrderOption]{typ: ent.TypeProxy, tq: q}, nil
|
||||||
|
case *ent.RedeemCodeQuery:
|
||||||
|
return &query[*ent.RedeemCodeQuery, predicate.RedeemCode, redeemcode.OrderOption]{typ: ent.TypeRedeemCode, tq: q}, nil
|
||||||
|
case *ent.SettingQuery:
|
||||||
|
return &query[*ent.SettingQuery, predicate.Setting, setting.OrderOption]{typ: ent.TypeSetting, tq: q}, nil
|
||||||
|
case *ent.UsageLogQuery:
|
||||||
|
return &query[*ent.UsageLogQuery, predicate.UsageLog, usagelog.OrderOption]{typ: ent.TypeUsageLog, tq: q}, nil
|
||||||
|
case *ent.UserQuery:
|
||||||
|
return &query[*ent.UserQuery, predicate.User, user.OrderOption]{typ: ent.TypeUser, tq: q}, nil
|
||||||
|
case *ent.UserAllowedGroupQuery:
|
||||||
|
return &query[*ent.UserAllowedGroupQuery, predicate.UserAllowedGroup, userallowedgroup.OrderOption]{typ: ent.TypeUserAllowedGroup, tq: q}, nil
|
||||||
|
case *ent.UserAttributeDefinitionQuery:
|
||||||
|
return &query[*ent.UserAttributeDefinitionQuery, predicate.UserAttributeDefinition, userattributedefinition.OrderOption]{typ: ent.TypeUserAttributeDefinition, tq: q}, nil
|
||||||
|
case *ent.UserAttributeValueQuery:
|
||||||
|
return &query[*ent.UserAttributeValueQuery, predicate.UserAttributeValue, userattributevalue.OrderOption]{typ: ent.TypeUserAttributeValue, tq: q}, nil
|
||||||
|
case *ent.UserSubscriptionQuery:
|
||||||
|
return &query[*ent.UserSubscriptionQuery, predicate.UserSubscription, usersubscription.OrderOption]{typ: ent.TypeUserSubscription, tq: q}, nil
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("unknown query type %T", q)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type query[T any, P ~func(*sql.Selector), R ~func(*sql.Selector)] struct {
|
||||||
|
typ string
|
||||||
|
tq interface {
|
||||||
|
Limit(int) T
|
||||||
|
Offset(int) T
|
||||||
|
Unique(bool) T
|
||||||
|
Order(...R) T
|
||||||
|
Where(...P) T
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q query[T, P, R]) Type() string {
|
||||||
|
return q.typ
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q query[T, P, R]) Limit(limit int) {
|
||||||
|
q.tq.Limit(limit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q query[T, P, R]) Offset(offset int) {
|
||||||
|
q.tq.Offset(offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q query[T, P, R]) Unique(unique bool) {
|
||||||
|
q.tq.Unique(unique)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q query[T, P, R]) Order(orders ...func(*sql.Selector)) {
|
||||||
|
rs := make([]R, len(orders))
|
||||||
|
for i := range orders {
|
||||||
|
rs[i] = orders[i]
|
||||||
|
}
|
||||||
|
q.tq.Order(rs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q query[T, P, R]) WhereP(ps ...func(*sql.Selector)) {
|
||||||
|
p := make([]P, len(ps))
|
||||||
|
for i := range ps {
|
||||||
|
p[i] = ps[i]
|
||||||
|
}
|
||||||
|
q.tq.Where(p...)
|
||||||
|
}
|
||||||
64
backend/ent/migrate/migrate.go
Normal file
64
backend/ent/migrate/migrate.go
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package migrate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// WithGlobalUniqueID sets the universal ids options to the migration.
|
||||||
|
// If this option is enabled, ent migration will allocate a 1<<32 range
|
||||||
|
// for the ids of each entity (table).
|
||||||
|
// Note that this option cannot be applied on tables that already exist.
|
||||||
|
WithGlobalUniqueID = schema.WithGlobalUniqueID
|
||||||
|
// WithDropColumn sets the drop column option to the migration.
|
||||||
|
// If this option is enabled, ent migration will drop old columns
|
||||||
|
// that were used for both fields and edges. This defaults to false.
|
||||||
|
WithDropColumn = schema.WithDropColumn
|
||||||
|
// WithDropIndex sets the drop index option to the migration.
|
||||||
|
// If this option is enabled, ent migration will drop old indexes
|
||||||
|
// that were defined in the schema. This defaults to false.
|
||||||
|
// Note that unique constraints are defined using `UNIQUE INDEX`,
|
||||||
|
// and therefore, it's recommended to enable this option to get more
|
||||||
|
// flexibility in the schema changes.
|
||||||
|
WithDropIndex = schema.WithDropIndex
|
||||||
|
// WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true.
|
||||||
|
WithForeignKeys = schema.WithForeignKeys
|
||||||
|
)
|
||||||
|
|
||||||
|
// Schema is the API for creating, migrating and dropping a schema.
|
||||||
|
type Schema struct {
|
||||||
|
drv dialect.Driver
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSchema creates a new schema client.
|
||||||
|
func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} }
|
||||||
|
|
||||||
|
// Create creates all schema resources.
|
||||||
|
func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error {
|
||||||
|
return Create(ctx, s, Tables, opts...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create creates all table resources using the given schema driver.
|
||||||
|
func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error {
|
||||||
|
migrate, err := schema.NewMigrate(s.drv, opts...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("ent/migrate: %w", err)
|
||||||
|
}
|
||||||
|
return migrate.Create(ctx, tables...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteTo writes the schema changes to w instead of running them against the database.
|
||||||
|
//
|
||||||
|
// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil {
|
||||||
|
// log.Fatal(err)
|
||||||
|
// }
|
||||||
|
func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error {
|
||||||
|
return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...)
|
||||||
|
}
|
||||||
882
backend/ent/migrate/schema.go
Normal file
882
backend/ent/migrate/schema.go
Normal file
@@ -0,0 +1,882 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package migrate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"entgo.io/ent/dialect/entsql"
|
||||||
|
"entgo.io/ent/dialect/sql/schema"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// APIKeysColumns holds the columns for the "api_keys" table.
|
||||||
|
APIKeysColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "key", Type: field.TypeString, Unique: true, Size: 128},
|
||||||
|
{Name: "name", Type: field.TypeString, Size: 100},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
{Name: "ip_whitelist", Type: field.TypeJSON, Nullable: true},
|
||||||
|
{Name: "ip_blacklist", Type: field.TypeJSON, Nullable: true},
|
||||||
|
{Name: "group_id", Type: field.TypeInt64, Nullable: true},
|
||||||
|
{Name: "user_id", Type: field.TypeInt64},
|
||||||
|
}
|
||||||
|
// APIKeysTable holds the schema information for the "api_keys" table.
|
||||||
|
APIKeysTable = &schema.Table{
|
||||||
|
Name: "api_keys",
|
||||||
|
Columns: APIKeysColumns,
|
||||||
|
PrimaryKey: []*schema.Column{APIKeysColumns[0]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "api_keys_groups_api_keys",
|
||||||
|
Columns: []*schema.Column{APIKeysColumns[9]},
|
||||||
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
|
OnDelete: schema.SetNull,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "api_keys_users_api_keys",
|
||||||
|
Columns: []*schema.Column{APIKeysColumns[10]},
|
||||||
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "apikey_user_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{APIKeysColumns[10]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "apikey_group_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{APIKeysColumns[9]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "apikey_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{APIKeysColumns[6]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "apikey_deleted_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{APIKeysColumns[3]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// AccountsColumns holds the columns for the "accounts" table.
|
||||||
|
AccountsColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "name", Type: field.TypeString, Size: 100},
|
||||||
|
{Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
{Name: "platform", Type: field.TypeString, Size: 50},
|
||||||
|
{Name: "type", Type: field.TypeString, Size: 20},
|
||||||
|
{Name: "credentials", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||||
|
{Name: "extra", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||||
|
{Name: "concurrency", Type: field.TypeInt, Default: 3},
|
||||||
|
{Name: "priority", Type: field.TypeInt, Default: 50},
|
||||||
|
{Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
{Name: "error_message", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
{Name: "last_used_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "expires_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "auto_pause_on_expired", Type: field.TypeBool, Default: true},
|
||||||
|
{Name: "schedulable", Type: field.TypeBool, Default: true},
|
||||||
|
{Name: "rate_limited_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "rate_limit_reset_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "overload_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "session_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "session_window_end", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "session_window_status", Type: field.TypeString, Nullable: true, Size: 20},
|
||||||
|
{Name: "proxy_id", Type: field.TypeInt64, Nullable: true},
|
||||||
|
}
|
||||||
|
// AccountsTable holds the schema information for the "accounts" table.
|
||||||
|
AccountsTable = &schema.Table{
|
||||||
|
Name: "accounts",
|
||||||
|
Columns: AccountsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{AccountsColumns[0]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "accounts_proxies_proxy",
|
||||||
|
Columns: []*schema.Column{AccountsColumns[25]},
|
||||||
|
RefColumns: []*schema.Column{ProxiesColumns[0]},
|
||||||
|
OnDelete: schema.SetNull,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "account_platform",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[6]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_type",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[7]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[13]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_proxy_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[25]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_priority",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[11]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_last_used_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[15]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_schedulable",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[18]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_rate_limited_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[19]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_rate_limit_reset_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[20]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_overload_until",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[21]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "account_deleted_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountsColumns[3]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// AccountGroupsColumns holds the columns for the "account_groups" table.
|
||||||
|
AccountGroupsColumns = []*schema.Column{
|
||||||
|
{Name: "priority", Type: field.TypeInt, Default: 50},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "account_id", Type: field.TypeInt64},
|
||||||
|
{Name: "group_id", Type: field.TypeInt64},
|
||||||
|
}
|
||||||
|
// AccountGroupsTable holds the schema information for the "account_groups" table.
|
||||||
|
AccountGroupsTable = &schema.Table{
|
||||||
|
Name: "account_groups",
|
||||||
|
Columns: AccountGroupsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{AccountGroupsColumns[2], AccountGroupsColumns[3]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "account_groups_accounts_account",
|
||||||
|
Columns: []*schema.Column{AccountGroupsColumns[2]},
|
||||||
|
RefColumns: []*schema.Column{AccountsColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "account_groups_groups_group",
|
||||||
|
Columns: []*schema.Column{AccountGroupsColumns[3]},
|
||||||
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "accountgroup_group_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountGroupsColumns[3]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "accountgroup_priority",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{AccountGroupsColumns[0]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// GroupsColumns holds the columns for the "groups" table.
|
||||||
|
GroupsColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "name", Type: field.TypeString, Size: 100},
|
||||||
|
{Name: "description", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
{Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
|
||||||
|
{Name: "is_exclusive", Type: field.TypeBool, Default: false},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
{Name: "platform", Type: field.TypeString, Size: 50, Default: "anthropic"},
|
||||||
|
{Name: "subscription_type", Type: field.TypeString, Size: 20, Default: "standard"},
|
||||||
|
{Name: "daily_limit_usd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "weekly_limit_usd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "monthly_limit_usd", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "default_validity_days", Type: field.TypeInt, Default: 30},
|
||||||
|
{Name: "image_price_1k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "image_price_2k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "image_price_4k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "claude_code_only", Type: field.TypeBool, Default: false},
|
||||||
|
{Name: "fallback_group_id", Type: field.TypeInt64, Nullable: true},
|
||||||
|
}
|
||||||
|
// GroupsTable holds the schema information for the "groups" table.
|
||||||
|
GroupsTable = &schema.Table{
|
||||||
|
Name: "groups",
|
||||||
|
Columns: GroupsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{GroupsColumns[0]},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "group_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{GroupsColumns[8]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "group_platform",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{GroupsColumns[9]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "group_subscription_type",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{GroupsColumns[10]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "group_is_exclusive",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{GroupsColumns[7]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "group_deleted_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{GroupsColumns[3]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// PromoCodesColumns holds the columns for the "promo_codes" table.
|
||||||
|
PromoCodesColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "code", Type: field.TypeString, Unique: true, Size: 32},
|
||||||
|
{Name: "bonus_amount", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "max_uses", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "used_count", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
{Name: "expires_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
}
|
||||||
|
// PromoCodesTable holds the schema information for the "promo_codes" table.
|
||||||
|
PromoCodesTable = &schema.Table{
|
||||||
|
Name: "promo_codes",
|
||||||
|
Columns: PromoCodesColumns,
|
||||||
|
PrimaryKey: []*schema.Column{PromoCodesColumns[0]},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "promocode_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{PromoCodesColumns[5]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "promocode_expires_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{PromoCodesColumns[6]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// PromoCodeUsagesColumns holds the columns for the "promo_code_usages" table.
|
||||||
|
PromoCodeUsagesColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "bonus_amount", Type: field.TypeFloat64, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "used_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "promo_code_id", Type: field.TypeInt64},
|
||||||
|
{Name: "user_id", Type: field.TypeInt64},
|
||||||
|
}
|
||||||
|
// PromoCodeUsagesTable holds the schema information for the "promo_code_usages" table.
|
||||||
|
PromoCodeUsagesTable = &schema.Table{
|
||||||
|
Name: "promo_code_usages",
|
||||||
|
Columns: PromoCodeUsagesColumns,
|
||||||
|
PrimaryKey: []*schema.Column{PromoCodeUsagesColumns[0]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "promo_code_usages_promo_codes_usage_records",
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[3]},
|
||||||
|
RefColumns: []*schema.Column{PromoCodesColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "promo_code_usages_users_promo_code_usages",
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[4]},
|
||||||
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "promocodeusage_promo_code_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[3]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "promocodeusage_user_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[4]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "promocodeusage_promo_code_id_user_id",
|
||||||
|
Unique: true,
|
||||||
|
Columns: []*schema.Column{PromoCodeUsagesColumns[3], PromoCodeUsagesColumns[4]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// ProxiesColumns holds the columns for the "proxies" table.
|
||||||
|
ProxiesColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "name", Type: field.TypeString, Size: 100},
|
||||||
|
{Name: "protocol", Type: field.TypeString, Size: 20},
|
||||||
|
{Name: "host", Type: field.TypeString, Size: 255},
|
||||||
|
{Name: "port", Type: field.TypeInt},
|
||||||
|
{Name: "username", Type: field.TypeString, Nullable: true, Size: 100},
|
||||||
|
{Name: "password", Type: field.TypeString, Nullable: true, Size: 100},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
}
|
||||||
|
// ProxiesTable holds the schema information for the "proxies" table.
|
||||||
|
ProxiesTable = &schema.Table{
|
||||||
|
Name: "proxies",
|
||||||
|
Columns: ProxiesColumns,
|
||||||
|
PrimaryKey: []*schema.Column{ProxiesColumns[0]},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "proxy_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{ProxiesColumns[10]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "proxy_deleted_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{ProxiesColumns[3]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// RedeemCodesColumns holds the columns for the "redeem_codes" table.
|
||||||
|
RedeemCodesColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "code", Type: field.TypeString, Unique: true, Size: 32},
|
||||||
|
{Name: "type", Type: field.TypeString, Size: 20, Default: "balance"},
|
||||||
|
{Name: "value", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "unused"},
|
||||||
|
{Name: "used_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "validity_days", Type: field.TypeInt, Default: 30},
|
||||||
|
{Name: "group_id", Type: field.TypeInt64, Nullable: true},
|
||||||
|
{Name: "used_by", Type: field.TypeInt64, Nullable: true},
|
||||||
|
}
|
||||||
|
// RedeemCodesTable holds the schema information for the "redeem_codes" table.
|
||||||
|
RedeemCodesTable = &schema.Table{
|
||||||
|
Name: "redeem_codes",
|
||||||
|
Columns: RedeemCodesColumns,
|
||||||
|
PrimaryKey: []*schema.Column{RedeemCodesColumns[0]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "redeem_codes_groups_redeem_codes",
|
||||||
|
Columns: []*schema.Column{RedeemCodesColumns[9]},
|
||||||
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
|
OnDelete: schema.SetNull,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "redeem_codes_users_redeem_codes",
|
||||||
|
Columns: []*schema.Column{RedeemCodesColumns[10]},
|
||||||
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
|
OnDelete: schema.SetNull,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "redeemcode_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{RedeemCodesColumns[4]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "redeemcode_used_by",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{RedeemCodesColumns[10]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "redeemcode_group_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{RedeemCodesColumns[9]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// SettingsColumns holds the columns for the "settings" table.
|
||||||
|
SettingsColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "key", Type: field.TypeString, Unique: true, Size: 100},
|
||||||
|
{Name: "value", Type: field.TypeString, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
}
|
||||||
|
// SettingsTable holds the schema information for the "settings" table.
|
||||||
|
SettingsTable = &schema.Table{
|
||||||
|
Name: "settings",
|
||||||
|
Columns: SettingsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{SettingsColumns[0]},
|
||||||
|
}
|
||||||
|
// UsageLogsColumns holds the columns for the "usage_logs" table.
|
||||||
|
UsageLogsColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "request_id", Type: field.TypeString, Size: 64},
|
||||||
|
{Name: "model", Type: field.TypeString, Size: 100},
|
||||||
|
{Name: "input_tokens", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "output_tokens", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "cache_creation_tokens", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "cache_read_tokens", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "cache_creation_5m_tokens", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "cache_creation_1h_tokens", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "input_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
|
||||||
|
{Name: "output_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
|
||||||
|
{Name: "cache_creation_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
|
||||||
|
{Name: "cache_read_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
|
||||||
|
{Name: "total_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
|
||||||
|
{Name: "actual_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
|
||||||
|
{Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
|
||||||
|
{Name: "account_rate_multiplier", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
|
||||||
|
{Name: "billing_type", Type: field.TypeInt8, Default: 0},
|
||||||
|
{Name: "stream", Type: field.TypeBool, Default: false},
|
||||||
|
{Name: "duration_ms", Type: field.TypeInt, Nullable: true},
|
||||||
|
{Name: "first_token_ms", Type: field.TypeInt, Nullable: true},
|
||||||
|
{Name: "user_agent", Type: field.TypeString, Nullable: true, Size: 512},
|
||||||
|
{Name: "ip_address", Type: field.TypeString, Nullable: true, Size: 45},
|
||||||
|
{Name: "image_count", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "image_size", Type: field.TypeString, Nullable: true, Size: 10},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "api_key_id", Type: field.TypeInt64},
|
||||||
|
{Name: "account_id", Type: field.TypeInt64},
|
||||||
|
{Name: "group_id", Type: field.TypeInt64, Nullable: true},
|
||||||
|
{Name: "user_id", Type: field.TypeInt64},
|
||||||
|
{Name: "subscription_id", Type: field.TypeInt64, Nullable: true},
|
||||||
|
}
|
||||||
|
// UsageLogsTable holds the schema information for the "usage_logs" table.
|
||||||
|
UsageLogsTable = &schema.Table{
|
||||||
|
Name: "usage_logs",
|
||||||
|
Columns: UsageLogsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{UsageLogsColumns[0]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "usage_logs_api_keys_usage_logs",
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[26]},
|
||||||
|
RefColumns: []*schema.Column{APIKeysColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "usage_logs_accounts_usage_logs",
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[27]},
|
||||||
|
RefColumns: []*schema.Column{AccountsColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "usage_logs_groups_usage_logs",
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[28]},
|
||||||
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
|
OnDelete: schema.SetNull,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "usage_logs_users_usage_logs",
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[29]},
|
||||||
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "usage_logs_user_subscriptions_usage_logs",
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[30]},
|
||||||
|
RefColumns: []*schema.Column{UserSubscriptionsColumns[0]},
|
||||||
|
OnDelete: schema.SetNull,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "usagelog_user_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[29]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_api_key_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[26]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_account_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[27]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_group_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[28]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_subscription_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[30]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_created_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[25]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_model",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[2]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_request_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[1]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_user_id_created_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[29], UsageLogsColumns[25]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usagelog_api_key_id_created_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsageLogsColumns[26], UsageLogsColumns[25]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// UsersColumns holds the columns for the "users" table.
|
||||||
|
UsersColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "email", Type: field.TypeString, Size: 255},
|
||||||
|
{Name: "password_hash", Type: field.TypeString, Size: 255},
|
||||||
|
{Name: "role", Type: field.TypeString, Size: 20, Default: "user"},
|
||||||
|
{Name: "balance", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}},
|
||||||
|
{Name: "concurrency", Type: field.TypeInt, Default: 5},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
{Name: "username", Type: field.TypeString, Size: 100, Default: ""},
|
||||||
|
{Name: "notes", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
}
|
||||||
|
// UsersTable holds the schema information for the "users" table.
|
||||||
|
UsersTable = &schema.Table{
|
||||||
|
Name: "users",
|
||||||
|
Columns: UsersColumns,
|
||||||
|
PrimaryKey: []*schema.Column{UsersColumns[0]},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "user_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsersColumns[9]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "user_deleted_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UsersColumns[3]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// UserAllowedGroupsColumns holds the columns for the "user_allowed_groups" table.
|
||||||
|
UserAllowedGroupsColumns = []*schema.Column{
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "user_id", Type: field.TypeInt64},
|
||||||
|
{Name: "group_id", Type: field.TypeInt64},
|
||||||
|
}
|
||||||
|
// UserAllowedGroupsTable holds the schema information for the "user_allowed_groups" table.
|
||||||
|
UserAllowedGroupsTable = &schema.Table{
|
||||||
|
Name: "user_allowed_groups",
|
||||||
|
Columns: UserAllowedGroupsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{UserAllowedGroupsColumns[1], UserAllowedGroupsColumns[2]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "user_allowed_groups_users_user",
|
||||||
|
Columns: []*schema.Column{UserAllowedGroupsColumns[1]},
|
||||||
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "user_allowed_groups_groups_group",
|
||||||
|
Columns: []*schema.Column{UserAllowedGroupsColumns[2]},
|
||||||
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "userallowedgroup_group_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserAllowedGroupsColumns[2]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// UserAttributeDefinitionsColumns holds the columns for the "user_attribute_definitions" table.
|
||||||
|
UserAttributeDefinitionsColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "key", Type: field.TypeString, Size: 100},
|
||||||
|
{Name: "name", Type: field.TypeString, Size: 255},
|
||||||
|
{Name: "description", Type: field.TypeString, Default: "", SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
{Name: "type", Type: field.TypeString, Size: 20},
|
||||||
|
{Name: "options", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||||
|
{Name: "required", Type: field.TypeBool, Default: false},
|
||||||
|
{Name: "validation", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
|
||||||
|
{Name: "placeholder", Type: field.TypeString, Size: 255, Default: ""},
|
||||||
|
{Name: "display_order", Type: field.TypeInt, Default: 0},
|
||||||
|
{Name: "enabled", Type: field.TypeBool, Default: true},
|
||||||
|
}
|
||||||
|
// UserAttributeDefinitionsTable holds the schema information for the "user_attribute_definitions" table.
|
||||||
|
UserAttributeDefinitionsTable = &schema.Table{
|
||||||
|
Name: "user_attribute_definitions",
|
||||||
|
Columns: UserAttributeDefinitionsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{UserAttributeDefinitionsColumns[0]},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "userattributedefinition_key",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserAttributeDefinitionsColumns[4]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "userattributedefinition_enabled",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserAttributeDefinitionsColumns[13]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "userattributedefinition_display_order",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserAttributeDefinitionsColumns[12]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "userattributedefinition_deleted_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserAttributeDefinitionsColumns[3]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// UserAttributeValuesColumns holds the columns for the "user_attribute_values" table.
|
||||||
|
UserAttributeValuesColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "value", Type: field.TypeString, Size: 2147483647, Default: ""},
|
||||||
|
{Name: "user_id", Type: field.TypeInt64},
|
||||||
|
{Name: "attribute_id", Type: field.TypeInt64},
|
||||||
|
}
|
||||||
|
// UserAttributeValuesTable holds the schema information for the "user_attribute_values" table.
|
||||||
|
UserAttributeValuesTable = &schema.Table{
|
||||||
|
Name: "user_attribute_values",
|
||||||
|
Columns: UserAttributeValuesColumns,
|
||||||
|
PrimaryKey: []*schema.Column{UserAttributeValuesColumns[0]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "user_attribute_values_users_attribute_values",
|
||||||
|
Columns: []*schema.Column{UserAttributeValuesColumns[4]},
|
||||||
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "user_attribute_values_user_attribute_definitions_values",
|
||||||
|
Columns: []*schema.Column{UserAttributeValuesColumns[5]},
|
||||||
|
RefColumns: []*schema.Column{UserAttributeDefinitionsColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "userattributevalue_user_id_attribute_id",
|
||||||
|
Unique: true,
|
||||||
|
Columns: []*schema.Column{UserAttributeValuesColumns[4], UserAttributeValuesColumns[5]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "userattributevalue_attribute_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserAttributeValuesColumns[5]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// UserSubscriptionsColumns holds the columns for the "user_subscriptions" table.
|
||||||
|
UserSubscriptionsColumns = []*schema.Column{
|
||||||
|
{Name: "id", Type: field.TypeInt64, Increment: true},
|
||||||
|
{Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "deleted_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "starts_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "expires_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
|
||||||
|
{Name: "daily_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "weekly_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "monthly_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "daily_usage_usd", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
|
||||||
|
{Name: "weekly_usage_usd", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
|
||||||
|
{Name: "monthly_usage_usd", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
|
||||||
|
{Name: "assigned_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}},
|
||||||
|
{Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
|
||||||
|
{Name: "group_id", Type: field.TypeInt64},
|
||||||
|
{Name: "user_id", Type: field.TypeInt64},
|
||||||
|
{Name: "assigned_by", Type: field.TypeInt64, Nullable: true},
|
||||||
|
}
|
||||||
|
// UserSubscriptionsTable holds the schema information for the "user_subscriptions" table.
|
||||||
|
UserSubscriptionsTable = &schema.Table{
|
||||||
|
Name: "user_subscriptions",
|
||||||
|
Columns: UserSubscriptionsColumns,
|
||||||
|
PrimaryKey: []*schema.Column{UserSubscriptionsColumns[0]},
|
||||||
|
ForeignKeys: []*schema.ForeignKey{
|
||||||
|
{
|
||||||
|
Symbol: "user_subscriptions_groups_subscriptions",
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[15]},
|
||||||
|
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "user_subscriptions_users_subscriptions",
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[16]},
|
||||||
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
|
OnDelete: schema.NoAction,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Symbol: "user_subscriptions_users_assigned_subscriptions",
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[17]},
|
||||||
|
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||||
|
OnDelete: schema.SetNull,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Indexes: []*schema.Index{
|
||||||
|
{
|
||||||
|
Name: "usersubscription_user_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[16]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usersubscription_group_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[15]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usersubscription_status",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[6]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usersubscription_expires_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[5]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usersubscription_assigned_by",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[17]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usersubscription_user_id_group_id",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[16], UserSubscriptionsColumns[15]},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "usersubscription_deleted_at",
|
||||||
|
Unique: false,
|
||||||
|
Columns: []*schema.Column{UserSubscriptionsColumns[3]},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// Tables holds all the tables in the schema.
|
||||||
|
Tables = []*schema.Table{
|
||||||
|
APIKeysTable,
|
||||||
|
AccountsTable,
|
||||||
|
AccountGroupsTable,
|
||||||
|
GroupsTable,
|
||||||
|
PromoCodesTable,
|
||||||
|
PromoCodeUsagesTable,
|
||||||
|
ProxiesTable,
|
||||||
|
RedeemCodesTable,
|
||||||
|
SettingsTable,
|
||||||
|
UsageLogsTable,
|
||||||
|
UsersTable,
|
||||||
|
UserAllowedGroupsTable,
|
||||||
|
UserAttributeDefinitionsTable,
|
||||||
|
UserAttributeValuesTable,
|
||||||
|
UserSubscriptionsTable,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
APIKeysTable.ForeignKeys[0].RefTable = GroupsTable
|
||||||
|
APIKeysTable.ForeignKeys[1].RefTable = UsersTable
|
||||||
|
APIKeysTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "api_keys",
|
||||||
|
}
|
||||||
|
AccountsTable.ForeignKeys[0].RefTable = ProxiesTable
|
||||||
|
AccountsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "accounts",
|
||||||
|
}
|
||||||
|
AccountGroupsTable.ForeignKeys[0].RefTable = AccountsTable
|
||||||
|
AccountGroupsTable.ForeignKeys[1].RefTable = GroupsTable
|
||||||
|
AccountGroupsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "account_groups",
|
||||||
|
}
|
||||||
|
GroupsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "groups",
|
||||||
|
}
|
||||||
|
PromoCodesTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "promo_codes",
|
||||||
|
}
|
||||||
|
PromoCodeUsagesTable.ForeignKeys[0].RefTable = PromoCodesTable
|
||||||
|
PromoCodeUsagesTable.ForeignKeys[1].RefTable = UsersTable
|
||||||
|
PromoCodeUsagesTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "promo_code_usages",
|
||||||
|
}
|
||||||
|
ProxiesTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "proxies",
|
||||||
|
}
|
||||||
|
RedeemCodesTable.ForeignKeys[0].RefTable = GroupsTable
|
||||||
|
RedeemCodesTable.ForeignKeys[1].RefTable = UsersTable
|
||||||
|
RedeemCodesTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "redeem_codes",
|
||||||
|
}
|
||||||
|
SettingsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "settings",
|
||||||
|
}
|
||||||
|
UsageLogsTable.ForeignKeys[0].RefTable = APIKeysTable
|
||||||
|
UsageLogsTable.ForeignKeys[1].RefTable = AccountsTable
|
||||||
|
UsageLogsTable.ForeignKeys[2].RefTable = GroupsTable
|
||||||
|
UsageLogsTable.ForeignKeys[3].RefTable = UsersTable
|
||||||
|
UsageLogsTable.ForeignKeys[4].RefTable = UserSubscriptionsTable
|
||||||
|
UsageLogsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "usage_logs",
|
||||||
|
}
|
||||||
|
UsersTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "users",
|
||||||
|
}
|
||||||
|
UserAllowedGroupsTable.ForeignKeys[0].RefTable = UsersTable
|
||||||
|
UserAllowedGroupsTable.ForeignKeys[1].RefTable = GroupsTable
|
||||||
|
UserAllowedGroupsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "user_allowed_groups",
|
||||||
|
}
|
||||||
|
UserAttributeDefinitionsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "user_attribute_definitions",
|
||||||
|
}
|
||||||
|
UserAttributeValuesTable.ForeignKeys[0].RefTable = UsersTable
|
||||||
|
UserAttributeValuesTable.ForeignKeys[1].RefTable = UserAttributeDefinitionsTable
|
||||||
|
UserAttributeValuesTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "user_attribute_values",
|
||||||
|
}
|
||||||
|
UserSubscriptionsTable.ForeignKeys[0].RefTable = GroupsTable
|
||||||
|
UserSubscriptionsTable.ForeignKeys[1].RefTable = UsersTable
|
||||||
|
UserSubscriptionsTable.ForeignKeys[2].RefTable = UsersTable
|
||||||
|
UserSubscriptionsTable.Annotation = &entsql.Annotation{
|
||||||
|
Table: "user_subscriptions",
|
||||||
|
}
|
||||||
|
}
|
||||||
18629
backend/ent/mutation.go
Normal file
18629
backend/ent/mutation.go
Normal file
File diff suppressed because it is too large
Load Diff
52
backend/ent/predicate/predicate.go
Normal file
52
backend/ent/predicate/predicate.go
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package predicate
|
||||||
|
|
||||||
|
import (
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIKey is the predicate function for apikey builders.
|
||||||
|
type APIKey func(*sql.Selector)
|
||||||
|
|
||||||
|
// Account is the predicate function for account builders.
|
||||||
|
type Account func(*sql.Selector)
|
||||||
|
|
||||||
|
// AccountGroup is the predicate function for accountgroup builders.
|
||||||
|
type AccountGroup func(*sql.Selector)
|
||||||
|
|
||||||
|
// Group is the predicate function for group builders.
|
||||||
|
type Group func(*sql.Selector)
|
||||||
|
|
||||||
|
// PromoCode is the predicate function for promocode builders.
|
||||||
|
type PromoCode func(*sql.Selector)
|
||||||
|
|
||||||
|
// PromoCodeUsage is the predicate function for promocodeusage builders.
|
||||||
|
type PromoCodeUsage func(*sql.Selector)
|
||||||
|
|
||||||
|
// Proxy is the predicate function for proxy builders.
|
||||||
|
type Proxy func(*sql.Selector)
|
||||||
|
|
||||||
|
// RedeemCode is the predicate function for redeemcode builders.
|
||||||
|
type RedeemCode func(*sql.Selector)
|
||||||
|
|
||||||
|
// Setting is the predicate function for setting builders.
|
||||||
|
type Setting func(*sql.Selector)
|
||||||
|
|
||||||
|
// UsageLog is the predicate function for usagelog builders.
|
||||||
|
type UsageLog func(*sql.Selector)
|
||||||
|
|
||||||
|
// User is the predicate function for user builders.
|
||||||
|
type User func(*sql.Selector)
|
||||||
|
|
||||||
|
// UserAllowedGroup is the predicate function for userallowedgroup builders.
|
||||||
|
type UserAllowedGroup func(*sql.Selector)
|
||||||
|
|
||||||
|
// UserAttributeDefinition is the predicate function for userattributedefinition builders.
|
||||||
|
type UserAttributeDefinition func(*sql.Selector)
|
||||||
|
|
||||||
|
// UserAttributeValue is the predicate function for userattributevalue builders.
|
||||||
|
type UserAttributeValue func(*sql.Selector)
|
||||||
|
|
||||||
|
// UserSubscription is the predicate function for usersubscription builders.
|
||||||
|
type UserSubscription func(*sql.Selector)
|
||||||
228
backend/ent/promocode.go
Normal file
228
backend/ent/promocode.go
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCode is the model entity for the PromoCode schema.
|
||||||
|
type PromoCode struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// 优惠码
|
||||||
|
Code string `json:"code,omitempty"`
|
||||||
|
// 赠送余额金额
|
||||||
|
BonusAmount float64 `json:"bonus_amount,omitempty"`
|
||||||
|
// 最大使用次数,0表示无限制
|
||||||
|
MaxUses int `json:"max_uses,omitempty"`
|
||||||
|
// 已使用次数
|
||||||
|
UsedCount int `json:"used_count,omitempty"`
|
||||||
|
// 状态: active, disabled
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// 过期时间,null表示永不过期
|
||||||
|
ExpiresAt *time.Time `json:"expires_at,omitempty"`
|
||||||
|
// 备注
|
||||||
|
Notes *string `json:"notes,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the PromoCodeQuery when eager-loading is set.
|
||||||
|
Edges PromoCodeEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type PromoCodeEdges struct {
|
||||||
|
// UsageRecords holds the value of the usage_records edge.
|
||||||
|
UsageRecords []*PromoCodeUsage `json:"usage_records,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [1]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsageRecordsOrErr returns the UsageRecords value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e PromoCodeEdges) UsageRecordsOrErr() ([]*PromoCodeUsage, error) {
|
||||||
|
if e.loadedTypes[0] {
|
||||||
|
return e.UsageRecords, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "usage_records"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*PromoCode) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case promocode.FieldBonusAmount:
|
||||||
|
values[i] = new(sql.NullFloat64)
|
||||||
|
case promocode.FieldID, promocode.FieldMaxUses, promocode.FieldUsedCount:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case promocode.FieldCode, promocode.FieldStatus, promocode.FieldNotes:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case promocode.FieldExpiresAt, promocode.FieldCreatedAt, promocode.FieldUpdatedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the PromoCode fields.
|
||||||
|
func (_m *PromoCode) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case promocode.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case promocode.FieldCode:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field code", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Code = value.String
|
||||||
|
}
|
||||||
|
case promocode.FieldBonusAmount:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field bonus_amount", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.BonusAmount = value.Float64
|
||||||
|
}
|
||||||
|
case promocode.FieldMaxUses:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field max_uses", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.MaxUses = int(value.Int64)
|
||||||
|
}
|
||||||
|
case promocode.FieldUsedCount:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field used_count", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UsedCount = int(value.Int64)
|
||||||
|
}
|
||||||
|
case promocode.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case promocode.FieldExpiresAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field expires_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ExpiresAt = new(time.Time)
|
||||||
|
*_m.ExpiresAt = value.Time
|
||||||
|
}
|
||||||
|
case promocode.FieldNotes:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field notes", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Notes = new(string)
|
||||||
|
*_m.Notes = value.String
|
||||||
|
}
|
||||||
|
case promocode.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case promocode.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the PromoCode.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *PromoCode) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageRecords queries the "usage_records" edge of the PromoCode entity.
|
||||||
|
func (_m *PromoCode) QueryUsageRecords() *PromoCodeUsageQuery {
|
||||||
|
return NewPromoCodeClient(_m.config).QueryUsageRecords(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this PromoCode.
|
||||||
|
// Note that you need to call PromoCode.Unwrap() before calling this method if this PromoCode
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *PromoCode) Update() *PromoCodeUpdateOne {
|
||||||
|
return NewPromoCodeClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the PromoCode entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *PromoCode) Unwrap() *PromoCode {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: PromoCode is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *PromoCode) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("PromoCode(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("code=")
|
||||||
|
builder.WriteString(_m.Code)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("bonus_amount=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.BonusAmount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("max_uses=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.MaxUses))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("used_count=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.UsedCount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.ExpiresAt; v != nil {
|
||||||
|
builder.WriteString("expires_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Notes; v != nil {
|
||||||
|
builder.WriteString("notes=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodes is a parsable slice of PromoCode.
|
||||||
|
type PromoCodes []*PromoCode
|
||||||
165
backend/ent/promocode/promocode.go
Normal file
165
backend/ent/promocode/promocode.go
Normal file
@@ -0,0 +1,165 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package promocode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the promocode type in the database.
|
||||||
|
Label = "promo_code"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCode holds the string denoting the code field in the database.
|
||||||
|
FieldCode = "code"
|
||||||
|
// FieldBonusAmount holds the string denoting the bonus_amount field in the database.
|
||||||
|
FieldBonusAmount = "bonus_amount"
|
||||||
|
// FieldMaxUses holds the string denoting the max_uses field in the database.
|
||||||
|
FieldMaxUses = "max_uses"
|
||||||
|
// FieldUsedCount holds the string denoting the used_count field in the database.
|
||||||
|
FieldUsedCount = "used_count"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldExpiresAt holds the string denoting the expires_at field in the database.
|
||||||
|
FieldExpiresAt = "expires_at"
|
||||||
|
// FieldNotes holds the string denoting the notes field in the database.
|
||||||
|
FieldNotes = "notes"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// EdgeUsageRecords holds the string denoting the usage_records edge name in mutations.
|
||||||
|
EdgeUsageRecords = "usage_records"
|
||||||
|
// Table holds the table name of the promocode in the database.
|
||||||
|
Table = "promo_codes"
|
||||||
|
// UsageRecordsTable is the table that holds the usage_records relation/edge.
|
||||||
|
UsageRecordsTable = "promo_code_usages"
|
||||||
|
// UsageRecordsInverseTable is the table name for the PromoCodeUsage entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "promocodeusage" package.
|
||||||
|
UsageRecordsInverseTable = "promo_code_usages"
|
||||||
|
// UsageRecordsColumn is the table column denoting the usage_records relation/edge.
|
||||||
|
UsageRecordsColumn = "promo_code_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for promocode fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCode,
|
||||||
|
FieldBonusAmount,
|
||||||
|
FieldMaxUses,
|
||||||
|
FieldUsedCount,
|
||||||
|
FieldStatus,
|
||||||
|
FieldExpiresAt,
|
||||||
|
FieldNotes,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// CodeValidator is a validator for the "code" field. It is called by the builders before save.
|
||||||
|
CodeValidator func(string) error
|
||||||
|
// DefaultBonusAmount holds the default value on creation for the "bonus_amount" field.
|
||||||
|
DefaultBonusAmount float64
|
||||||
|
// DefaultMaxUses holds the default value on creation for the "max_uses" field.
|
||||||
|
DefaultMaxUses int
|
||||||
|
// DefaultUsedCount holds the default value on creation for the "used_count" field.
|
||||||
|
DefaultUsedCount int
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the PromoCode queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCode orders the results by the code field.
|
||||||
|
func ByCode(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCode, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByBonusAmount orders the results by the bonus_amount field.
|
||||||
|
func ByBonusAmount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldBonusAmount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByMaxUses orders the results by the max_uses field.
|
||||||
|
func ByMaxUses(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldMaxUses, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsedCount orders the results by the used_count field.
|
||||||
|
func ByUsedCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsedCount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByExpiresAt orders the results by the expires_at field.
|
||||||
|
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByNotes orders the results by the notes field.
|
||||||
|
func ByNotes(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldNotes, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageRecordsCount orders the results by usage_records count.
|
||||||
|
func ByUsageRecordsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newUsageRecordsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsageRecords orders the results by usage_records terms.
|
||||||
|
func ByUsageRecords(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUsageRecordsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newUsageRecordsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UsageRecordsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageRecordsTable, UsageRecordsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
594
backend/ent/promocode/where.go
Normal file
594
backend/ent/promocode/where.go
Normal file
@@ -0,0 +1,594 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package promocode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code applies equality check predicate on the "code" field. It's identical to CodeEQ.
|
||||||
|
func Code(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmount applies equality check predicate on the "bonus_amount" field. It's identical to BonusAmountEQ.
|
||||||
|
func BonusAmount(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUses applies equality check predicate on the "max_uses" field. It's identical to MaxUsesEQ.
|
||||||
|
func MaxUses(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCount applies equality check predicate on the "used_count" field. It's identical to UsedCountEQ.
|
||||||
|
func UsedCount(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status applies equality check predicate on the "status" field. It's identical to StatusEQ.
|
||||||
|
func Status(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ.
|
||||||
|
func ExpiresAt(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ.
|
||||||
|
func Notes(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeEQ applies the EQ predicate on the "code" field.
|
||||||
|
func CodeEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeNEQ applies the NEQ predicate on the "code" field.
|
||||||
|
func CodeNEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeIn applies the In predicate on the "code" field.
|
||||||
|
func CodeIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldCode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeNotIn applies the NotIn predicate on the "code" field.
|
||||||
|
func CodeNotIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldCode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeGT applies the GT predicate on the "code" field.
|
||||||
|
func CodeGT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeGTE applies the GTE predicate on the "code" field.
|
||||||
|
func CodeGTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeLT applies the LT predicate on the "code" field.
|
||||||
|
func CodeLT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeLTE applies the LTE predicate on the "code" field.
|
||||||
|
func CodeLTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeContains applies the Contains predicate on the "code" field.
|
||||||
|
func CodeContains(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContains(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeHasPrefix applies the HasPrefix predicate on the "code" field.
|
||||||
|
func CodeHasPrefix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasPrefix(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeHasSuffix applies the HasSuffix predicate on the "code" field.
|
||||||
|
func CodeHasSuffix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasSuffix(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeEqualFold applies the EqualFold predicate on the "code" field.
|
||||||
|
func CodeEqualFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEqualFold(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeContainsFold applies the ContainsFold predicate on the "code" field.
|
||||||
|
func CodeContainsFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContainsFold(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountEQ applies the EQ predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountEQ(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountNEQ applies the NEQ predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountNEQ(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountIn applies the In predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountIn(vs ...float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldBonusAmount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountNotIn applies the NotIn predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountNotIn(vs ...float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldBonusAmount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountGT applies the GT predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountGT(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountGTE applies the GTE predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountGTE(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountLT applies the LT predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountLT(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountLTE applies the LTE predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountLTE(v float64) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesEQ applies the EQ predicate on the "max_uses" field.
|
||||||
|
func MaxUsesEQ(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesNEQ applies the NEQ predicate on the "max_uses" field.
|
||||||
|
func MaxUsesNEQ(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesIn applies the In predicate on the "max_uses" field.
|
||||||
|
func MaxUsesIn(vs ...int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldMaxUses, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesNotIn applies the NotIn predicate on the "max_uses" field.
|
||||||
|
func MaxUsesNotIn(vs ...int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldMaxUses, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesGT applies the GT predicate on the "max_uses" field.
|
||||||
|
func MaxUsesGT(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesGTE applies the GTE predicate on the "max_uses" field.
|
||||||
|
func MaxUsesGTE(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesLT applies the LT predicate on the "max_uses" field.
|
||||||
|
func MaxUsesLT(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxUsesLTE applies the LTE predicate on the "max_uses" field.
|
||||||
|
func MaxUsesLTE(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldMaxUses, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountEQ applies the EQ predicate on the "used_count" field.
|
||||||
|
func UsedCountEQ(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountNEQ applies the NEQ predicate on the "used_count" field.
|
||||||
|
func UsedCountNEQ(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountIn applies the In predicate on the "used_count" field.
|
||||||
|
func UsedCountIn(vs ...int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldUsedCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountNotIn applies the NotIn predicate on the "used_count" field.
|
||||||
|
func UsedCountNotIn(vs ...int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldUsedCount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountGT applies the GT predicate on the "used_count" field.
|
||||||
|
func UsedCountGT(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountGTE applies the GTE predicate on the "used_count" field.
|
||||||
|
func UsedCountGTE(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountLT applies the LT predicate on the "used_count" field.
|
||||||
|
func UsedCountLT(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedCountLTE applies the LTE predicate on the "used_count" field.
|
||||||
|
func UsedCountLTE(v int) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldUsedCount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEQ applies the EQ predicate on the "status" field.
|
||||||
|
func StatusEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNEQ applies the NEQ predicate on the "status" field.
|
||||||
|
func StatusNEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusIn applies the In predicate on the "status" field.
|
||||||
|
func StatusIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNotIn applies the NotIn predicate on the "status" field.
|
||||||
|
func StatusNotIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGT applies the GT predicate on the "status" field.
|
||||||
|
func StatusGT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGTE applies the GTE predicate on the "status" field.
|
||||||
|
func StatusGTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLT applies the LT predicate on the "status" field.
|
||||||
|
func StatusLT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLTE applies the LTE predicate on the "status" field.
|
||||||
|
func StatusLTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContains applies the Contains predicate on the "status" field.
|
||||||
|
func StatusContains(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContains(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasPrefix applies the HasPrefix predicate on the "status" field.
|
||||||
|
func StatusHasPrefix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasPrefix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasSuffix applies the HasSuffix predicate on the "status" field.
|
||||||
|
func StatusHasSuffix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasSuffix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEqualFold applies the EqualFold predicate on the "status" field.
|
||||||
|
func StatusEqualFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEqualFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContainsFold applies the ContainsFold predicate on the "status" field.
|
||||||
|
func StatusContainsFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContainsFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtEQ applies the EQ predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtNEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtIn applies the In predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldExpiresAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtNotIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldExpiresAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtGT applies the GT predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtGT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtGTE applies the GTE predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtGTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtLT applies the LT predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtLT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtLTE applies the LTE predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtLTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldExpiresAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtIsNil applies the IsNil predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtIsNil() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIsNull(FieldExpiresAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpiresAtNotNil applies the NotNil predicate on the "expires_at" field.
|
||||||
|
func ExpiresAtNotNil() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotNull(FieldExpiresAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesEQ applies the EQ predicate on the "notes" field.
|
||||||
|
func NotesEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesNEQ applies the NEQ predicate on the "notes" field.
|
||||||
|
func NotesNEQ(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesIn applies the In predicate on the "notes" field.
|
||||||
|
func NotesIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldNotes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesNotIn applies the NotIn predicate on the "notes" field.
|
||||||
|
func NotesNotIn(vs ...string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldNotes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesGT applies the GT predicate on the "notes" field.
|
||||||
|
func NotesGT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesGTE applies the GTE predicate on the "notes" field.
|
||||||
|
func NotesGTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesLT applies the LT predicate on the "notes" field.
|
||||||
|
func NotesLT(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesLTE applies the LTE predicate on the "notes" field.
|
||||||
|
func NotesLTE(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesContains applies the Contains predicate on the "notes" field.
|
||||||
|
func NotesContains(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContains(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesHasPrefix applies the HasPrefix predicate on the "notes" field.
|
||||||
|
func NotesHasPrefix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasPrefix(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesHasSuffix applies the HasSuffix predicate on the "notes" field.
|
||||||
|
func NotesHasSuffix(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldHasSuffix(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesIsNil applies the IsNil predicate on the "notes" field.
|
||||||
|
func NotesIsNil() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIsNull(FieldNotes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesNotNil applies the NotNil predicate on the "notes" field.
|
||||||
|
func NotesNotNil() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotNull(FieldNotes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesEqualFold applies the EqualFold predicate on the "notes" field.
|
||||||
|
func NotesEqualFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEqualFold(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesContainsFold applies the ContainsFold predicate on the "notes" field.
|
||||||
|
func NotesContainsFold(v string) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldContainsFold(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUsageRecords applies the HasEdge predicate on the "usage_records" edge.
|
||||||
|
func HasUsageRecords() predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, UsageRecordsTable, UsageRecordsColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUsageRecordsWith applies the HasEdge predicate on the "usage_records" edge with a given conditions (other predicates).
|
||||||
|
func HasUsageRecordsWith(preds ...predicate.PromoCodeUsage) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(func(s *sql.Selector) {
|
||||||
|
step := newUsageRecordsStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.PromoCode) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.PromoCode) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.PromoCode) predicate.PromoCode {
|
||||||
|
return predicate.PromoCode(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
1081
backend/ent/promocode_create.go
Normal file
1081
backend/ent/promocode_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/promocode_delete.go
Normal file
88
backend/ent/promocode_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeDelete is the builder for deleting a PromoCode entity.
|
||||||
|
type PromoCodeDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeDelete builder.
|
||||||
|
func (_d *PromoCodeDelete) Where(ps ...predicate.PromoCode) *PromoCodeDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *PromoCodeDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *PromoCodeDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *PromoCodeDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(promocode.Table, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeDeleteOne is the builder for deleting a single PromoCode entity.
|
||||||
|
type PromoCodeDeleteOne struct {
|
||||||
|
_d *PromoCodeDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeDelete builder.
|
||||||
|
func (_d *PromoCodeDeleteOne) Where(ps ...predicate.PromoCode) *PromoCodeDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *PromoCodeDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{promocode.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *PromoCodeDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
643
backend/ent/promocode_query.go
Normal file
643
backend/ent/promocode_query.go
Normal file
@@ -0,0 +1,643 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeQuery is the builder for querying PromoCode entities.
|
||||||
|
type PromoCodeQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []promocode.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.PromoCode
|
||||||
|
withUsageRecords *PromoCodeUsageQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the PromoCodeQuery builder.
|
||||||
|
func (_q *PromoCodeQuery) Where(ps ...predicate.PromoCode) *PromoCodeQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *PromoCodeQuery) Limit(limit int) *PromoCodeQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *PromoCodeQuery) Offset(offset int) *PromoCodeQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *PromoCodeQuery) Unique(unique bool) *PromoCodeQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *PromoCodeQuery) Order(o ...promocode.OrderOption) *PromoCodeQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUsageRecords chains the current query on the "usage_records" edge.
|
||||||
|
func (_q *PromoCodeQuery) QueryUsageRecords() *PromoCodeUsageQuery {
|
||||||
|
query := (&PromoCodeUsageClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(promocode.Table, promocode.FieldID, selector),
|
||||||
|
sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, false, promocode.UsageRecordsTable, promocode.UsageRecordsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first PromoCode entity from the query.
|
||||||
|
// Returns a *NotFoundError when no PromoCode was found.
|
||||||
|
func (_q *PromoCodeQuery) First(ctx context.Context) (*PromoCode, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{promocode.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) FirstX(ctx context.Context) *PromoCode {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first PromoCode ID from the query.
|
||||||
|
// Returns a *NotFoundError when no PromoCode ID was found.
|
||||||
|
func (_q *PromoCodeQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{promocode.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single PromoCode entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one PromoCode entity is found.
|
||||||
|
// Returns a *NotFoundError when no PromoCode entities are found.
|
||||||
|
func (_q *PromoCodeQuery) Only(ctx context.Context) (*PromoCode, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{promocode.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{promocode.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) OnlyX(ctx context.Context) *PromoCode {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only PromoCode ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one PromoCode ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *PromoCodeQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{promocode.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{promocode.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of PromoCodes.
|
||||||
|
func (_q *PromoCodeQuery) All(ctx context.Context) ([]*PromoCode, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*PromoCode, *PromoCodeQuery]()
|
||||||
|
return withInterceptors[[]*PromoCode](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) AllX(ctx context.Context) []*PromoCode {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of PromoCode IDs.
|
||||||
|
func (_q *PromoCodeQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(promocode.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *PromoCodeQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*PromoCodeQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *PromoCodeQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the PromoCodeQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *PromoCodeQuery) Clone() *PromoCodeQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PromoCodeQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]promocode.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.PromoCode{}, _q.predicates...),
|
||||||
|
withUsageRecords: _q.withUsageRecords.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUsageRecords tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "usage_records" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *PromoCodeQuery) WithUsageRecords(opts ...func(*PromoCodeUsageQuery)) *PromoCodeQuery {
|
||||||
|
query := (&PromoCodeUsageClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUsageRecords = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// Code string `json:"code,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.PromoCode.Query().
|
||||||
|
// GroupBy(promocode.FieldCode).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *PromoCodeQuery) GroupBy(field string, fields ...string) *PromoCodeGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &PromoCodeGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = promocode.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// Code string `json:"code,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.PromoCode.Query().
|
||||||
|
// Select(promocode.FieldCode).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *PromoCodeQuery) Select(fields ...string) *PromoCodeSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &PromoCodeSelect{PromoCodeQuery: _q}
|
||||||
|
sbuild.label = promocode.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a PromoCodeSelect configured with the given aggregations.
|
||||||
|
func (_q *PromoCodeQuery) Aggregate(fns ...AggregateFunc) *PromoCodeSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !promocode.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PromoCode, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*PromoCode{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [1]bool{
|
||||||
|
_q.withUsageRecords != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*PromoCode).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &PromoCode{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withUsageRecords; query != nil {
|
||||||
|
if err := _q.loadUsageRecords(ctx, query, nodes,
|
||||||
|
func(n *PromoCode) { n.Edges.UsageRecords = []*PromoCodeUsage{} },
|
||||||
|
func(n *PromoCode, e *PromoCodeUsage) { n.Edges.UsageRecords = append(n.Edges.UsageRecords, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) loadUsageRecords(ctx context.Context, query *PromoCodeUsageQuery, nodes []*PromoCode, init func(*PromoCode), assign func(*PromoCode, *PromoCodeUsage)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*PromoCode)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(promocodeusage.FieldPromoCodeID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(promocode.UsageRecordsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.PromoCodeID
|
||||||
|
node, ok := nodeids[fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "promo_code_id" returned %v for node %v`, fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, promocode.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != promocode.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(promocode.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = promocode.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *PromoCodeQuery) ForUpdate(opts ...sql.LockOption) *PromoCodeQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *PromoCodeQuery) ForShare(opts ...sql.LockOption) *PromoCodeQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeGroupBy is the group-by builder for PromoCode entities.
|
||||||
|
type PromoCodeGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *PromoCodeQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *PromoCodeGroupBy) Aggregate(fns ...AggregateFunc) *PromoCodeGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *PromoCodeGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*PromoCodeQuery, *PromoCodeGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *PromoCodeGroupBy) sqlScan(ctx context.Context, root *PromoCodeQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeSelect is the builder for selecting fields of PromoCode entities.
|
||||||
|
type PromoCodeSelect struct {
|
||||||
|
*PromoCodeQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *PromoCodeSelect) Aggregate(fns ...AggregateFunc) *PromoCodeSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *PromoCodeSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*PromoCodeQuery, *PromoCodeSelect](ctx, _s.PromoCodeQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *PromoCodeSelect) sqlScan(ctx context.Context, root *PromoCodeQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
745
backend/ent/promocode_update.go
Normal file
745
backend/ent/promocode_update.go
Normal file
@@ -0,0 +1,745 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUpdate is the builder for updating PromoCode entities.
|
||||||
|
type PromoCodeUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUpdate builder.
|
||||||
|
func (_u *PromoCodeUpdate) Where(ps ...predicate.PromoCode) *PromoCodeUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCode sets the "code" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetCode(v string) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetCode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCode sets the "code" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableCode(v *string) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetBonusAmount(v float64) *PromoCodeUpdate {
|
||||||
|
_u.mutation.ResetBonusAmount()
|
||||||
|
_u.mutation.SetBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableBonusAmount(v *float64) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetBonusAmount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds value to the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUpdate) AddBonusAmount(v float64) *PromoCodeUpdate {
|
||||||
|
_u.mutation.AddBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMaxUses sets the "max_uses" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetMaxUses(v int) *PromoCodeUpdate {
|
||||||
|
_u.mutation.ResetMaxUses()
|
||||||
|
_u.mutation.SetMaxUses(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableMaxUses sets the "max_uses" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableMaxUses(v *int) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetMaxUses(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMaxUses adds value to the "max_uses" field.
|
||||||
|
func (_u *PromoCodeUpdate) AddMaxUses(v int) *PromoCodeUpdate {
|
||||||
|
_u.mutation.AddMaxUses(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedCount sets the "used_count" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetUsedCount(v int) *PromoCodeUpdate {
|
||||||
|
_u.mutation.ResetUsedCount()
|
||||||
|
_u.mutation.SetUsedCount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedCount sets the "used_count" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableUsedCount(v *int) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedCount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsedCount adds value to the "used_count" field.
|
||||||
|
func (_u *PromoCodeUpdate) AddUsedCount(v int) *PromoCodeUpdate {
|
||||||
|
_u.mutation.AddUsedCount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetStatus(v string) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableStatus(v *string) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExpiresAt sets the "expires_at" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetExpiresAt(v time.Time) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetExpiresAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableExpiresAt(v *time.Time) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetExpiresAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearExpiresAt clears the value of the "expires_at" field.
|
||||||
|
func (_u *PromoCodeUpdate) ClearExpiresAt() *PromoCodeUpdate {
|
||||||
|
_u.mutation.ClearExpiresAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNotes sets the "notes" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetNotes(v string) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetNotes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotes sets the "notes" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdate) SetNillableNotes(v *string) *PromoCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearNotes clears the value of the "notes" field.
|
||||||
|
func (_u *PromoCodeUpdate) ClearNotes() *PromoCodeUpdate {
|
||||||
|
_u.mutation.ClearNotes()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *PromoCodeUpdate) SetUpdatedAt(v time.Time) *PromoCodeUpdate {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs.
|
||||||
|
func (_u *PromoCodeUpdate) AddUsageRecordIDs(ids ...int64) *PromoCodeUpdate {
|
||||||
|
_u.mutation.AddUsageRecordIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUpdate) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddUsageRecordIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeMutation object of the builder.
|
||||||
|
func (_u *PromoCodeUpdate) Mutation() *PromoCodeMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsageRecords clears all "usage_records" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUpdate) ClearUsageRecords() *PromoCodeUpdate {
|
||||||
|
_u.mutation.ClearUsageRecords()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageRecordIDs removes the "usage_records" edge to PromoCodeUsage entities by IDs.
|
||||||
|
func (_u *PromoCodeUpdate) RemoveUsageRecordIDs(ids ...int64) *PromoCodeUpdate {
|
||||||
|
_u.mutation.RemoveUsageRecordIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageRecords removes "usage_records" edges to PromoCodeUsage entities.
|
||||||
|
func (_u *PromoCodeUpdate) RemoveUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveUsageRecordIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *PromoCodeUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *PromoCodeUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *PromoCodeUpdate) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := promocode.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *PromoCodeUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.Code(); ok {
|
||||||
|
if err := promocode.CodeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := promocode.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *PromoCodeUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Code(); ok {
|
||||||
|
_spec.SetField(promocode.FieldCode, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedBonusAmount(); ok {
|
||||||
|
_spec.AddField(promocode.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.MaxUses(); ok {
|
||||||
|
_spec.SetField(promocode.FieldMaxUses, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedMaxUses(); ok {
|
||||||
|
_spec.AddField(promocode.FieldMaxUses, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedCount(); ok {
|
||||||
|
_spec.SetField(promocode.FieldUsedCount, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsedCount(); ok {
|
||||||
|
_spec.AddField(promocode.FieldUsedCount, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(promocode.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ExpiresAt(); ok {
|
||||||
|
_spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ExpiresAtCleared() {
|
||||||
|
_spec.ClearField(promocode.FieldExpiresAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Notes(); ok {
|
||||||
|
_spec.SetField(promocode.FieldNotes, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.NotesCleared() {
|
||||||
|
_spec.ClearField(promocode.FieldNotes, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsageRecordsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedUsageRecordsIDs(); len(nodes) > 0 && !_u.mutation.UsageRecordsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UsageRecordsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{promocode.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUpdateOne is the builder for updating a single PromoCode entity.
|
||||||
|
type PromoCodeUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCode sets the "code" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetCode(v string) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetCode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCode sets the "code" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableCode(v *string) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetBonusAmount(v float64) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ResetBonusAmount()
|
||||||
|
_u.mutation.SetBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableBonusAmount(v *float64) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetBonusAmount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds value to the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddBonusAmount(v float64) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.AddBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMaxUses sets the "max_uses" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetMaxUses(v int) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ResetMaxUses()
|
||||||
|
_u.mutation.SetMaxUses(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableMaxUses sets the "max_uses" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableMaxUses(v *int) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetMaxUses(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddMaxUses adds value to the "max_uses" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddMaxUses(v int) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.AddMaxUses(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedCount sets the "used_count" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetUsedCount(v int) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ResetUsedCount()
|
||||||
|
_u.mutation.SetUsedCount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedCount sets the "used_count" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableUsedCount(v *int) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedCount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsedCount adds value to the "used_count" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddUsedCount(v int) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.AddUsedCount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetStatus(v string) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableStatus(v *string) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetExpiresAt sets the "expires_at" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetExpiresAt(v time.Time) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetExpiresAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableExpiresAt(v *time.Time) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetExpiresAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearExpiresAt clears the value of the "expires_at" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) ClearExpiresAt() *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ClearExpiresAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNotes sets the "notes" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNotes(v string) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetNotes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotes sets the "notes" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetNillableNotes(v *string) *PromoCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearNotes clears the value of the "notes" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) ClearNotes() *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ClearNotes()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *PromoCodeUpdateOne) SetUpdatedAt(v time.Time) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddUsageRecordIDs(ids ...int64) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.AddUsageRecordIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUpdateOne) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddUsageRecordIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeMutation object of the builder.
|
||||||
|
func (_u *PromoCodeUpdateOne) Mutation() *PromoCodeMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsageRecords clears all "usage_records" edges to the PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUpdateOne) ClearUsageRecords() *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.ClearUsageRecords()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageRecordIDs removes the "usage_records" edge to PromoCodeUsage entities by IDs.
|
||||||
|
func (_u *PromoCodeUpdateOne) RemoveUsageRecordIDs(ids ...int64) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.RemoveUsageRecordIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveUsageRecords removes "usage_records" edges to PromoCodeUsage entities.
|
||||||
|
func (_u *PromoCodeUpdateOne) RemoveUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveUsageRecordIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUpdate builder.
|
||||||
|
func (_u *PromoCodeUpdateOne) Where(ps ...predicate.PromoCode) *PromoCodeUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *PromoCodeUpdateOne) Select(field string, fields ...string) *PromoCodeUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated PromoCode entity.
|
||||||
|
func (_u *PromoCodeUpdateOne) Save(ctx context.Context) (*PromoCode, error) {
|
||||||
|
_u.defaults()
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUpdateOne) SaveX(ctx context.Context) *PromoCode {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *PromoCodeUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *PromoCodeUpdateOne) defaults() {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
v := promocode.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *PromoCodeUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.Code(); ok {
|
||||||
|
if err := promocode.CodeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := promocode.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *PromoCodeUpdateOne) sqlSave(ctx context.Context) (_node *PromoCode, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PromoCode.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, promocode.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !promocode.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != promocode.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Code(); ok {
|
||||||
|
_spec.SetField(promocode.FieldCode, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedBonusAmount(); ok {
|
||||||
|
_spec.AddField(promocode.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.MaxUses(); ok {
|
||||||
|
_spec.SetField(promocode.FieldMaxUses, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedMaxUses(); ok {
|
||||||
|
_spec.AddField(promocode.FieldMaxUses, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedCount(); ok {
|
||||||
|
_spec.SetField(promocode.FieldUsedCount, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedUsedCount(); ok {
|
||||||
|
_spec.AddField(promocode.FieldUsedCount, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(promocode.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ExpiresAt(); ok {
|
||||||
|
_spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.ExpiresAtCleared() {
|
||||||
|
_spec.ClearField(promocode.FieldExpiresAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Notes(); ok {
|
||||||
|
_spec.SetField(promocode.FieldNotes, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.NotesCleared() {
|
||||||
|
_spec.ClearField(promocode.FieldNotes, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsageRecordsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedUsageRecordsIDs(); len(nodes) > 0 && !_u.mutation.UsageRecordsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UsageRecordsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: false,
|
||||||
|
Table: promocode.UsageRecordsTable,
|
||||||
|
Columns: []string{promocode.UsageRecordsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &PromoCode{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{promocode.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
187
backend/ent/promocodeusage.go
Normal file
187
backend/ent/promocodeusage.go
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsage is the model entity for the PromoCodeUsage schema.
|
||||||
|
type PromoCodeUsage struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// 优惠码ID
|
||||||
|
PromoCodeID int64 `json:"promo_code_id,omitempty"`
|
||||||
|
// 使用用户ID
|
||||||
|
UserID int64 `json:"user_id,omitempty"`
|
||||||
|
// 实际赠送金额
|
||||||
|
BonusAmount float64 `json:"bonus_amount,omitempty"`
|
||||||
|
// 使用时间
|
||||||
|
UsedAt time.Time `json:"used_at,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the PromoCodeUsageQuery when eager-loading is set.
|
||||||
|
Edges PromoCodeUsageEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type PromoCodeUsageEdges struct {
|
||||||
|
// PromoCode holds the value of the promo_code edge.
|
||||||
|
PromoCode *PromoCode `json:"promo_code,omitempty"`
|
||||||
|
// User holds the value of the user edge.
|
||||||
|
User *User `json:"user,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [2]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeOrErr returns the PromoCode value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e PromoCodeUsageEdges) PromoCodeOrErr() (*PromoCode, error) {
|
||||||
|
if e.PromoCode != nil {
|
||||||
|
return e.PromoCode, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: promocode.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "promo_code"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserOrErr returns the User value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e PromoCodeUsageEdges) UserOrErr() (*User, error) {
|
||||||
|
if e.User != nil {
|
||||||
|
return e.User, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: user.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "user"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*PromoCodeUsage) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case promocodeusage.FieldBonusAmount:
|
||||||
|
values[i] = new(sql.NullFloat64)
|
||||||
|
case promocodeusage.FieldID, promocodeusage.FieldPromoCodeID, promocodeusage.FieldUserID:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case promocodeusage.FieldUsedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the PromoCodeUsage fields.
|
||||||
|
func (_m *PromoCodeUsage) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case promocodeusage.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case promocodeusage.FieldPromoCodeID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field promo_code_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.PromoCodeID = value.Int64
|
||||||
|
}
|
||||||
|
case promocodeusage.FieldUserID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field user_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UserID = value.Int64
|
||||||
|
}
|
||||||
|
case promocodeusage.FieldBonusAmount:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field bonus_amount", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.BonusAmount = value.Float64
|
||||||
|
}
|
||||||
|
case promocodeusage.FieldUsedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field used_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UsedAt = value.Time
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the PromoCodeUsage.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *PromoCodeUsage) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryPromoCode queries the "promo_code" edge of the PromoCodeUsage entity.
|
||||||
|
func (_m *PromoCodeUsage) QueryPromoCode() *PromoCodeQuery {
|
||||||
|
return NewPromoCodeUsageClient(_m.config).QueryPromoCode(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser queries the "user" edge of the PromoCodeUsage entity.
|
||||||
|
func (_m *PromoCodeUsage) QueryUser() *UserQuery {
|
||||||
|
return NewPromoCodeUsageClient(_m.config).QueryUser(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this PromoCodeUsage.
|
||||||
|
// Note that you need to call PromoCodeUsage.Unwrap() before calling this method if this PromoCodeUsage
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *PromoCodeUsage) Update() *PromoCodeUsageUpdateOne {
|
||||||
|
return NewPromoCodeUsageClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the PromoCodeUsage entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *PromoCodeUsage) Unwrap() *PromoCodeUsage {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: PromoCodeUsage is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *PromoCodeUsage) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("PromoCodeUsage(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("promo_code_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.PromoCodeID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("user_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.UserID))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("bonus_amount=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.BonusAmount))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("used_at=")
|
||||||
|
builder.WriteString(_m.UsedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsages is a parsable slice of PromoCodeUsage.
|
||||||
|
type PromoCodeUsages []*PromoCodeUsage
|
||||||
125
backend/ent/promocodeusage/promocodeusage.go
Normal file
125
backend/ent/promocodeusage/promocodeusage.go
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package promocodeusage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the promocodeusage type in the database.
|
||||||
|
Label = "promo_code_usage"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldPromoCodeID holds the string denoting the promo_code_id field in the database.
|
||||||
|
FieldPromoCodeID = "promo_code_id"
|
||||||
|
// FieldUserID holds the string denoting the user_id field in the database.
|
||||||
|
FieldUserID = "user_id"
|
||||||
|
// FieldBonusAmount holds the string denoting the bonus_amount field in the database.
|
||||||
|
FieldBonusAmount = "bonus_amount"
|
||||||
|
// FieldUsedAt holds the string denoting the used_at field in the database.
|
||||||
|
FieldUsedAt = "used_at"
|
||||||
|
// EdgePromoCode holds the string denoting the promo_code edge name in mutations.
|
||||||
|
EdgePromoCode = "promo_code"
|
||||||
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
|
EdgeUser = "user"
|
||||||
|
// Table holds the table name of the promocodeusage in the database.
|
||||||
|
Table = "promo_code_usages"
|
||||||
|
// PromoCodeTable is the table that holds the promo_code relation/edge.
|
||||||
|
PromoCodeTable = "promo_code_usages"
|
||||||
|
// PromoCodeInverseTable is the table name for the PromoCode entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "promocode" package.
|
||||||
|
PromoCodeInverseTable = "promo_codes"
|
||||||
|
// PromoCodeColumn is the table column denoting the promo_code relation/edge.
|
||||||
|
PromoCodeColumn = "promo_code_id"
|
||||||
|
// UserTable is the table that holds the user relation/edge.
|
||||||
|
UserTable = "promo_code_usages"
|
||||||
|
// UserInverseTable is the table name for the User entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||||
|
UserInverseTable = "users"
|
||||||
|
// UserColumn is the table column denoting the user relation/edge.
|
||||||
|
UserColumn = "user_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for promocodeusage fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldPromoCodeID,
|
||||||
|
FieldUserID,
|
||||||
|
FieldBonusAmount,
|
||||||
|
FieldUsedAt,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultUsedAt holds the default value on creation for the "used_at" field.
|
||||||
|
DefaultUsedAt func() time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the PromoCodeUsage queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPromoCodeID orders the results by the promo_code_id field.
|
||||||
|
func ByPromoCodeID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPromoCodeID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserID orders the results by the user_id field.
|
||||||
|
func ByUserID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUserID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByBonusAmount orders the results by the bonus_amount field.
|
||||||
|
func ByBonusAmount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldBonusAmount, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsedAt orders the results by the used_at field.
|
||||||
|
func ByUsedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPromoCodeField orders the results by promo_code field.
|
||||||
|
func ByPromoCodeField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newPromoCodeStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserField orders the results by user field.
|
||||||
|
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newPromoCodeStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(PromoCodeInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, PromoCodeTable, PromoCodeColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newUserStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UserInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
257
backend/ent/promocodeusage/where.go
Normal file
257
backend/ent/promocodeusage/where.go
Normal file
@@ -0,0 +1,257 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package promocodeusage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeID applies equality check predicate on the "promo_code_id" field. It's identical to PromoCodeIDEQ.
|
||||||
|
func PromoCodeID(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldPromoCodeID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ.
|
||||||
|
func UserID(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmount applies equality check predicate on the "bonus_amount" field. It's identical to BonusAmountEQ.
|
||||||
|
func BonusAmount(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAt applies equality check predicate on the "used_at" field. It's identical to UsedAtEQ.
|
||||||
|
func UsedAt(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeIDEQ applies the EQ predicate on the "promo_code_id" field.
|
||||||
|
func PromoCodeIDEQ(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldPromoCodeID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeIDNEQ applies the NEQ predicate on the "promo_code_id" field.
|
||||||
|
func PromoCodeIDNEQ(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldPromoCodeID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeIDIn applies the In predicate on the "promo_code_id" field.
|
||||||
|
func PromoCodeIDIn(vs ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldPromoCodeID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeIDNotIn applies the NotIn predicate on the "promo_code_id" field.
|
||||||
|
func PromoCodeIDNotIn(vs ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldPromoCodeID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDEQ applies the EQ predicate on the "user_id" field.
|
||||||
|
func UserIDEQ(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNEQ applies the NEQ predicate on the "user_id" field.
|
||||||
|
func UserIDNEQ(v int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldUserID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDIn applies the In predicate on the "user_id" field.
|
||||||
|
func UserIDIn(vs ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserIDNotIn applies the NotIn predicate on the "user_id" field.
|
||||||
|
func UserIDNotIn(vs ...int64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldUserID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountEQ applies the EQ predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountEQ(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountNEQ applies the NEQ predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountNEQ(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountIn applies the In predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountIn(vs ...float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldBonusAmount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountNotIn applies the NotIn predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountNotIn(vs ...float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldBonusAmount, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountGT applies the GT predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountGT(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGT(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountGTE applies the GTE predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountGTE(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGTE(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountLT applies the LT predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountLT(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLT(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// BonusAmountLTE applies the LTE predicate on the "bonus_amount" field.
|
||||||
|
func BonusAmountLTE(v float64) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLTE(FieldBonusAmount, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtEQ applies the EQ predicate on the "used_at" field.
|
||||||
|
func UsedAtEQ(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldEQ(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtNEQ applies the NEQ predicate on the "used_at" field.
|
||||||
|
func UsedAtNEQ(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNEQ(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtIn applies the In predicate on the "used_at" field.
|
||||||
|
func UsedAtIn(vs ...time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldIn(FieldUsedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtNotIn applies the NotIn predicate on the "used_at" field.
|
||||||
|
func UsedAtNotIn(vs ...time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldNotIn(FieldUsedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtGT applies the GT predicate on the "used_at" field.
|
||||||
|
func UsedAtGT(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGT(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtGTE applies the GTE predicate on the "used_at" field.
|
||||||
|
func UsedAtGTE(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldGTE(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtLT applies the LT predicate on the "used_at" field.
|
||||||
|
func UsedAtLT(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLT(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtLTE applies the LTE predicate on the "used_at" field.
|
||||||
|
func UsedAtLTE(v time.Time) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.FieldLTE(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasPromoCode applies the HasEdge predicate on the "promo_code" edge.
|
||||||
|
func HasPromoCode() predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, PromoCodeTable, PromoCodeColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasPromoCodeWith applies the HasEdge predicate on the "promo_code" edge with a given conditions (other predicates).
|
||||||
|
func HasPromoCodeWith(preds ...predicate.PromoCode) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
step := newPromoCodeStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||||
|
func HasUser() predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||||
|
func HasUserWith(preds ...predicate.User) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(func(s *sql.Selector) {
|
||||||
|
step := newUserStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.PromoCodeUsage) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.PromoCodeUsage) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.PromoCodeUsage) predicate.PromoCodeUsage {
|
||||||
|
return predicate.PromoCodeUsage(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
696
backend/ent/promocodeusage_create.go
Normal file
696
backend/ent/promocodeusage_create.go
Normal file
@@ -0,0 +1,696 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsageCreate is the builder for creating a PromoCodeUsage entity.
|
||||||
|
type PromoCodeUsageCreate struct {
|
||||||
|
config
|
||||||
|
mutation *PromoCodeUsageMutation
|
||||||
|
hooks []Hook
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetPromoCodeID(v int64) *PromoCodeUsageCreate {
|
||||||
|
_c.mutation.SetPromoCodeID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetUserID(v int64) *PromoCodeUsageCreate {
|
||||||
|
_c.mutation.SetUserID(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetBonusAmount(v float64) *PromoCodeUsageCreate {
|
||||||
|
_c.mutation.SetBonusAmount(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetUsedAt(v time.Time) *PromoCodeUsageCreate {
|
||||||
|
_c.mutation.SetUsedAt(v)
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedAt sets the "used_at" field if the given value is not nil.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetNillableUsedAt(v *time.Time) *PromoCodeUsageCreate {
|
||||||
|
if v != nil {
|
||||||
|
_c.SetUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _c
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCode sets the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetPromoCode(v *PromoCode) *PromoCodeUsageCreate {
|
||||||
|
return _c.SetPromoCodeID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_c *PromoCodeUsageCreate) SetUser(v *User) *PromoCodeUsageCreate {
|
||||||
|
return _c.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeUsageMutation object of the builder.
|
||||||
|
func (_c *PromoCodeUsageCreate) Mutation() *PromoCodeUsageMutation {
|
||||||
|
return _c.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the PromoCodeUsage in the database.
|
||||||
|
func (_c *PromoCodeUsageCreate) Save(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
_c.defaults()
|
||||||
|
return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX calls Save and panics if Save returns an error.
|
||||||
|
func (_c *PromoCodeUsageCreate) SaveX(ctx context.Context) *PromoCodeUsage {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *PromoCodeUsageCreate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *PromoCodeUsageCreate) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_c *PromoCodeUsageCreate) defaults() {
|
||||||
|
if _, ok := _c.mutation.UsedAt(); !ok {
|
||||||
|
v := promocodeusage.DefaultUsedAt()
|
||||||
|
_c.mutation.SetUsedAt(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_c *PromoCodeUsageCreate) check() error {
|
||||||
|
if _, ok := _c.mutation.PromoCodeID(); !ok {
|
||||||
|
return &ValidationError{Name: "promo_code_id", err: errors.New(`ent: missing required field "PromoCodeUsage.promo_code_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.UserID(); !ok {
|
||||||
|
return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "PromoCodeUsage.user_id"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.BonusAmount(); !ok {
|
||||||
|
return &ValidationError{Name: "bonus_amount", err: errors.New(`ent: missing required field "PromoCodeUsage.bonus_amount"`)}
|
||||||
|
}
|
||||||
|
if _, ok := _c.mutation.UsedAt(); !ok {
|
||||||
|
return &ValidationError{Name: "used_at", err: errors.New(`ent: missing required field "PromoCodeUsage.used_at"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.PromoCodeIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "promo_code", err: errors.New(`ent: missing required edge "PromoCodeUsage.promo_code"`)}
|
||||||
|
}
|
||||||
|
if len(_c.mutation.UserIDs()) == 0 {
|
||||||
|
return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "PromoCodeUsage.user"`)}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *PromoCodeUsageCreate) sqlSave(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
if err := _c.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_node, _spec := _c.createSpec()
|
||||||
|
if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
id := _spec.ID.Value.(int64)
|
||||||
|
_node.ID = int64(id)
|
||||||
|
_c.mutation.id = &_node.ID
|
||||||
|
_c.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_c *PromoCodeUsageCreate) createSpec() (*PromoCodeUsage, *sqlgraph.CreateSpec) {
|
||||||
|
var (
|
||||||
|
_node = &PromoCodeUsage{config: _c.config}
|
||||||
|
_spec = sqlgraph.NewCreateSpec(promocodeusage.Table, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
)
|
||||||
|
_spec.OnConflict = _c.conflict
|
||||||
|
if value, ok := _c.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
_node.BonusAmount = value
|
||||||
|
}
|
||||||
|
if value, ok := _c.mutation.UsedAt(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value)
|
||||||
|
_node.UsedAt = value
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.PromoCodeIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.PromoCodeID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
if nodes := _c.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_node.UserID = nodes[0]
|
||||||
|
_spec.Edges = append(_spec.Edges, edge)
|
||||||
|
}
|
||||||
|
return _node, _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// SetPromoCodeID(v).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.PromoCodeUsageUpsert) {
|
||||||
|
// SetPromoCodeID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *PromoCodeUsageCreate) OnConflict(opts ...sql.ConflictOption) *PromoCodeUsageUpsertOne {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &PromoCodeUsageUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *PromoCodeUsageCreate) OnConflictColumns(columns ...string) *PromoCodeUsageUpsertOne {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &PromoCodeUsageUpsertOne{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// PromoCodeUsageUpsertOne is the builder for "upsert"-ing
|
||||||
|
// one PromoCodeUsage node.
|
||||||
|
PromoCodeUsageUpsertOne struct {
|
||||||
|
create *PromoCodeUsageCreate
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageUpsert is the "OnConflict" setter.
|
||||||
|
PromoCodeUsageUpsert struct {
|
||||||
|
*sql.UpdateSet
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) SetPromoCodeID(v int64) *PromoCodeUsageUpsert {
|
||||||
|
u.Set(promocodeusage.FieldPromoCodeID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsert) UpdatePromoCodeID() *PromoCodeUsageUpsert {
|
||||||
|
u.SetExcluded(promocodeusage.FieldPromoCodeID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) SetUserID(v int64) *PromoCodeUsageUpsert {
|
||||||
|
u.Set(promocodeusage.FieldUserID, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserID sets the "user_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsert) UpdateUserID() *PromoCodeUsageUpsert {
|
||||||
|
u.SetExcluded(promocodeusage.FieldUserID)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) SetBonusAmount(v float64) *PromoCodeUsageUpsert {
|
||||||
|
u.Set(promocodeusage.FieldBonusAmount, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsert) UpdateBonusAmount() *PromoCodeUsageUpsert {
|
||||||
|
u.SetExcluded(promocodeusage.FieldBonusAmount)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds v to the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) AddBonusAmount(v float64) *PromoCodeUsageUpsert {
|
||||||
|
u.Add(promocodeusage.FieldBonusAmount, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (u *PromoCodeUsageUpsert) SetUsedAt(v time.Time) *PromoCodeUsageUpsert {
|
||||||
|
u.Set(promocodeusage.FieldUsedAt, v)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsedAt sets the "used_at" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsert) UpdateUsedAt() *PromoCodeUsageUpsert {
|
||||||
|
u.SetExcluded(promocodeusage.FieldUsedAt)
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that were set on create.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdateNewValues() *PromoCodeUsageUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *PromoCodeUsageUpsertOne) Ignore() *PromoCodeUsageUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) DoNothing() *PromoCodeUsageUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the PromoCodeUsageCreate.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) Update(set func(*PromoCodeUsageUpsert)) *PromoCodeUsageUpsertOne {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&PromoCodeUsageUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) SetPromoCodeID(v int64) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetPromoCodeID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdatePromoCodeID() *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdatePromoCodeID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) SetUserID(v int64) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetUserID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserID sets the "user_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdateUserID() *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateUserID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) SetBonusAmount(v float64) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetBonusAmount(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds v to the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) AddBonusAmount(v float64) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.AddBonusAmount(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdateBonusAmount() *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateBonusAmount()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) SetUsedAt(v time.Time) *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetUsedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsedAt sets the "used_at" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) UpdateUsedAt() *PromoCodeUsageUpsertOne {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateUsedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) Exec(ctx context.Context) error {
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for PromoCodeUsageCreate.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the UPSERT query and returns the inserted/updated ID.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) ID(ctx context.Context) (id int64, err error) {
|
||||||
|
node, err := u.create.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return id, err
|
||||||
|
}
|
||||||
|
return node.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDX is like ID, but panics if an error occurs.
|
||||||
|
func (u *PromoCodeUsageUpsertOne) IDX(ctx context.Context) int64 {
|
||||||
|
id, err := u.ID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageCreateBulk is the builder for creating many PromoCodeUsage entities in bulk.
|
||||||
|
type PromoCodeUsageCreateBulk struct {
|
||||||
|
config
|
||||||
|
err error
|
||||||
|
builders []*PromoCodeUsageCreate
|
||||||
|
conflict []sql.ConflictOption
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save creates the PromoCodeUsage entities in the database.
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) Save(ctx context.Context) ([]*PromoCodeUsage, error) {
|
||||||
|
if _c.err != nil {
|
||||||
|
return nil, _c.err
|
||||||
|
}
|
||||||
|
specs := make([]*sqlgraph.CreateSpec, len(_c.builders))
|
||||||
|
nodes := make([]*PromoCodeUsage, len(_c.builders))
|
||||||
|
mutators := make([]Mutator, len(_c.builders))
|
||||||
|
for i := range _c.builders {
|
||||||
|
func(i int, root context.Context) {
|
||||||
|
builder := _c.builders[i]
|
||||||
|
builder.defaults()
|
||||||
|
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||||
|
mutation, ok := m.(*PromoCodeUsageMutation)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||||
|
}
|
||||||
|
if err := builder.check(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
builder.mutation = mutation
|
||||||
|
var err error
|
||||||
|
nodes[i], specs[i] = builder.createSpec()
|
||||||
|
if i < len(mutators)-1 {
|
||||||
|
_, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation)
|
||||||
|
} else {
|
||||||
|
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||||
|
spec.OnConflict = _c.conflict
|
||||||
|
// Invoke the actual operation on the latest mutation in the chain.
|
||||||
|
if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil {
|
||||||
|
if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mutation.id = &nodes[i].ID
|
||||||
|
if specs[i].ID.Value != nil {
|
||||||
|
id := specs[i].ID.Value.(int64)
|
||||||
|
nodes[i].ID = int64(id)
|
||||||
|
}
|
||||||
|
mutation.done = true
|
||||||
|
return nodes[i], nil
|
||||||
|
})
|
||||||
|
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||||
|
mut = builder.hooks[i](mut)
|
||||||
|
}
|
||||||
|
mutators[i] = mut
|
||||||
|
}(i, ctx)
|
||||||
|
}
|
||||||
|
if len(mutators) > 0 {
|
||||||
|
if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) SaveX(ctx context.Context) []*PromoCodeUsage {
|
||||||
|
v, err := _c.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) Exec(ctx context.Context) error {
|
||||||
|
_, err := _c.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := _c.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause
|
||||||
|
// of the `INSERT` statement. For example:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.CreateBulk(builders...).
|
||||||
|
// OnConflict(
|
||||||
|
// // Update the row with the new values
|
||||||
|
// // the was proposed for insertion.
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// // Override some of the fields with custom
|
||||||
|
// // update values.
|
||||||
|
// Update(func(u *ent.PromoCodeUsageUpsert) {
|
||||||
|
// SetPromoCodeID(v+v).
|
||||||
|
// }).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) OnConflict(opts ...sql.ConflictOption) *PromoCodeUsageUpsertBulk {
|
||||||
|
_c.conflict = opts
|
||||||
|
return &PromoCodeUsageUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConflictColumns calls `OnConflict` and configures the columns
|
||||||
|
// as conflict target. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(sql.ConflictColumns(columns...)).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (_c *PromoCodeUsageCreateBulk) OnConflictColumns(columns ...string) *PromoCodeUsageUpsertBulk {
|
||||||
|
_c.conflict = append(_c.conflict, sql.ConflictColumns(columns...))
|
||||||
|
return &PromoCodeUsageUpsertBulk{
|
||||||
|
create: _c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageUpsertBulk is the builder for "upsert"-ing
|
||||||
|
// a bulk of PromoCodeUsage nodes.
|
||||||
|
type PromoCodeUsageUpsertBulk struct {
|
||||||
|
create *PromoCodeUsageCreateBulk
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateNewValues updates the mutable fields using the new values that
|
||||||
|
// were set on create. Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(
|
||||||
|
// sql.ResolveWithNewValues(),
|
||||||
|
// ).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdateNewValues() *PromoCodeUsageUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore sets each column to itself in case of conflict.
|
||||||
|
// Using this option is equivalent to using:
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Create().
|
||||||
|
// OnConflict(sql.ResolveWithIgnore()).
|
||||||
|
// Exec(ctx)
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) Ignore() *PromoCodeUsageUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoNothing configures the conflict_action to `DO NOTHING`.
|
||||||
|
// Supported only by SQLite and PostgreSQL.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) DoNothing() *PromoCodeUsageUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.DoNothing())
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update allows overriding fields `UPDATE` values. See the PromoCodeUsageCreateBulk.OnConflict
|
||||||
|
// documentation for more info.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) Update(set func(*PromoCodeUsageUpsert)) *PromoCodeUsageUpsertBulk {
|
||||||
|
u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) {
|
||||||
|
set(&PromoCodeUsageUpsert{UpdateSet: update})
|
||||||
|
}))
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) SetPromoCodeID(v int64) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetPromoCodeID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdatePromoCodeID() *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdatePromoCodeID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) SetUserID(v int64) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetUserID(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUserID sets the "user_id" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdateUserID() *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateUserID()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) SetBonusAmount(v float64) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetBonusAmount(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds v to the "bonus_amount" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) AddBonusAmount(v float64) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.AddBonusAmount(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdateBonusAmount() *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateBonusAmount()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) SetUsedAt(v time.Time) *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.SetUsedAt(v)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateUsedAt sets the "used_at" field to the value that was provided on create.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) UpdateUsedAt() *PromoCodeUsageUpsertBulk {
|
||||||
|
return u.Update(func(s *PromoCodeUsageUpsert) {
|
||||||
|
s.UpdateUsedAt()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) Exec(ctx context.Context) error {
|
||||||
|
if u.create.err != nil {
|
||||||
|
return u.create.err
|
||||||
|
}
|
||||||
|
for i, b := range u.create.builders {
|
||||||
|
if len(b.conflict) != 0 {
|
||||||
|
return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PromoCodeUsageCreateBulk instead", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(u.create.conflict) == 0 {
|
||||||
|
return errors.New("ent: missing options for PromoCodeUsageCreateBulk.OnConflict")
|
||||||
|
}
|
||||||
|
return u.create.Exec(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (u *PromoCodeUsageUpsertBulk) ExecX(ctx context.Context) {
|
||||||
|
if err := u.create.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
88
backend/ent/promocodeusage_delete.go
Normal file
88
backend/ent/promocodeusage_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsageDelete is the builder for deleting a PromoCodeUsage entity.
|
||||||
|
type PromoCodeUsageDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeUsageMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUsageDelete builder.
|
||||||
|
func (_d *PromoCodeUsageDelete) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *PromoCodeUsageDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *PromoCodeUsageDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *PromoCodeUsageDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(promocodeusage.Table, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageDeleteOne is the builder for deleting a single PromoCodeUsage entity.
|
||||||
|
type PromoCodeUsageDeleteOne struct {
|
||||||
|
_d *PromoCodeUsageDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUsageDelete builder.
|
||||||
|
func (_d *PromoCodeUsageDeleteOne) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *PromoCodeUsageDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{promocodeusage.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *PromoCodeUsageDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
718
backend/ent/promocodeusage_query.go
Normal file
718
backend/ent/promocodeusage_query.go
Normal file
@@ -0,0 +1,718 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsageQuery is the builder for querying PromoCodeUsage entities.
|
||||||
|
type PromoCodeUsageQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []promocodeusage.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.PromoCodeUsage
|
||||||
|
withPromoCode *PromoCodeQuery
|
||||||
|
withUser *UserQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the PromoCodeUsageQuery builder.
|
||||||
|
func (_q *PromoCodeUsageQuery) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *PromoCodeUsageQuery) Limit(limit int) *PromoCodeUsageQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *PromoCodeUsageQuery) Offset(offset int) *PromoCodeUsageQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *PromoCodeUsageQuery) Unique(unique bool) *PromoCodeUsageQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *PromoCodeUsageQuery) Order(o ...promocodeusage.OrderOption) *PromoCodeUsageQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryPromoCode chains the current query on the "promo_code" edge.
|
||||||
|
func (_q *PromoCodeUsageQuery) QueryPromoCode() *PromoCodeQuery {
|
||||||
|
query := (&PromoCodeClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, selector),
|
||||||
|
sqlgraph.To(promocode.Table, promocode.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.PromoCodeTable, promocodeusage.PromoCodeColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser chains the current query on the "user" edge.
|
||||||
|
func (_q *PromoCodeUsageQuery) QueryUser() *UserQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, selector),
|
||||||
|
sqlgraph.To(user.Table, user.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.UserTable, promocodeusage.UserColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first PromoCodeUsage entity from the query.
|
||||||
|
// Returns a *NotFoundError when no PromoCodeUsage was found.
|
||||||
|
func (_q *PromoCodeUsageQuery) First(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{promocodeusage.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) FirstX(ctx context.Context) *PromoCodeUsage {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first PromoCodeUsage ID from the query.
|
||||||
|
// Returns a *NotFoundError when no PromoCodeUsage ID was found.
|
||||||
|
func (_q *PromoCodeUsageQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{promocodeusage.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single PromoCodeUsage entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one PromoCodeUsage entity is found.
|
||||||
|
// Returns a *NotFoundError when no PromoCodeUsage entities are found.
|
||||||
|
func (_q *PromoCodeUsageQuery) Only(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{promocodeusage.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{promocodeusage.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) OnlyX(ctx context.Context) *PromoCodeUsage {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only PromoCodeUsage ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one PromoCodeUsage ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *PromoCodeUsageQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{promocodeusage.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{promocodeusage.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of PromoCodeUsages.
|
||||||
|
func (_q *PromoCodeUsageQuery) All(ctx context.Context) ([]*PromoCodeUsage, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*PromoCodeUsage, *PromoCodeUsageQuery]()
|
||||||
|
return withInterceptors[[]*PromoCodeUsage](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) AllX(ctx context.Context) []*PromoCodeUsage {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of PromoCodeUsage IDs.
|
||||||
|
func (_q *PromoCodeUsageQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(promocodeusage.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *PromoCodeUsageQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*PromoCodeUsageQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *PromoCodeUsageQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *PromoCodeUsageQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the PromoCodeUsageQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *PromoCodeUsageQuery) Clone() *PromoCodeUsageQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &PromoCodeUsageQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]promocodeusage.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.PromoCodeUsage{}, _q.predicates...),
|
||||||
|
withPromoCode: _q.withPromoCode.Clone(),
|
||||||
|
withUser: _q.withUser.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithPromoCode tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "promo_code" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *PromoCodeUsageQuery) WithPromoCode(opts ...func(*PromoCodeQuery)) *PromoCodeUsageQuery {
|
||||||
|
query := (&PromoCodeClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withPromoCode = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *PromoCodeUsageQuery) WithUser(opts ...func(*UserQuery)) *PromoCodeUsageQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUser = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// PromoCodeID int64 `json:"promo_code_id,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Query().
|
||||||
|
// GroupBy(promocodeusage.FieldPromoCodeID).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *PromoCodeUsageQuery) GroupBy(field string, fields ...string) *PromoCodeUsageGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &PromoCodeUsageGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = promocodeusage.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// PromoCodeID int64 `json:"promo_code_id,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.PromoCodeUsage.Query().
|
||||||
|
// Select(promocodeusage.FieldPromoCodeID).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *PromoCodeUsageQuery) Select(fields ...string) *PromoCodeUsageSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &PromoCodeUsageSelect{PromoCodeUsageQuery: _q}
|
||||||
|
sbuild.label = promocodeusage.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a PromoCodeUsageSelect configured with the given aggregations.
|
||||||
|
func (_q *PromoCodeUsageQuery) Aggregate(fns ...AggregateFunc) *PromoCodeUsageSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !promocodeusage.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PromoCodeUsage, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*PromoCodeUsage{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [2]bool{
|
||||||
|
_q.withPromoCode != nil,
|
||||||
|
_q.withUser != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*PromoCodeUsage).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &PromoCodeUsage{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withPromoCode; query != nil {
|
||||||
|
if err := _q.loadPromoCode(ctx, query, nodes, nil,
|
||||||
|
func(n *PromoCodeUsage, e *PromoCode) { n.Edges.PromoCode = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withUser; query != nil {
|
||||||
|
if err := _q.loadUser(ctx, query, nodes, nil,
|
||||||
|
func(n *PromoCodeUsage, e *User) { n.Edges.User = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) loadPromoCode(ctx context.Context, query *PromoCodeQuery, nodes []*PromoCodeUsage, init func(*PromoCodeUsage), assign func(*PromoCodeUsage, *PromoCode)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*PromoCodeUsage)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].PromoCodeID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(promocode.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "promo_code_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *PromoCodeUsageQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*PromoCodeUsage, init func(*PromoCodeUsage), assign func(*PromoCodeUsage, *User)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*PromoCodeUsage)
|
||||||
|
for i := range nodes {
|
||||||
|
fk := nodes[i].UserID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(user.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, promocodeusage.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != promocodeusage.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withPromoCode != nil {
|
||||||
|
_spec.Node.AddColumnOnce(promocodeusage.FieldPromoCodeID)
|
||||||
|
}
|
||||||
|
if _q.withUser != nil {
|
||||||
|
_spec.Node.AddColumnOnce(promocodeusage.FieldUserID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *PromoCodeUsageQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(promocodeusage.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = promocodeusage.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *PromoCodeUsageQuery) ForUpdate(opts ...sql.LockOption) *PromoCodeUsageQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *PromoCodeUsageQuery) ForShare(opts ...sql.LockOption) *PromoCodeUsageQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageGroupBy is the group-by builder for PromoCodeUsage entities.
|
||||||
|
type PromoCodeUsageGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *PromoCodeUsageQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *PromoCodeUsageGroupBy) Aggregate(fns ...AggregateFunc) *PromoCodeUsageGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *PromoCodeUsageGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*PromoCodeUsageQuery, *PromoCodeUsageGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *PromoCodeUsageGroupBy) sqlScan(ctx context.Context, root *PromoCodeUsageQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageSelect is the builder for selecting fields of PromoCodeUsage entities.
|
||||||
|
type PromoCodeUsageSelect struct {
|
||||||
|
*PromoCodeUsageQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *PromoCodeUsageSelect) Aggregate(fns ...AggregateFunc) *PromoCodeUsageSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *PromoCodeUsageSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*PromoCodeUsageQuery, *PromoCodeUsageSelect](ctx, _s.PromoCodeUsageQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *PromoCodeUsageSelect) sqlScan(ctx context.Context, root *PromoCodeUsageQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
510
backend/ent/promocodeusage_update.go
Normal file
510
backend/ent/promocodeusage_update.go
Normal file
@@ -0,0 +1,510 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PromoCodeUsageUpdate is the builder for updating PromoCodeUsage entities.
|
||||||
|
type PromoCodeUsageUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeUsageMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUsageUpdate builder.
|
||||||
|
func (_u *PromoCodeUsageUpdate) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetPromoCodeID(v int64) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.SetPromoCodeID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePromoCodeID sets the "promo_code_id" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetNillablePromoCodeID(v *int64) *PromoCodeUsageUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPromoCodeID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetUserID(v int64) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetNillableUserID(v *int64) *PromoCodeUsageUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetBonusAmount(v float64) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.ResetBonusAmount()
|
||||||
|
_u.mutation.SetBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetNillableBonusAmount(v *float64) *PromoCodeUsageUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetBonusAmount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds value to the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) AddBonusAmount(v float64) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.AddBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetUsedAt(v time.Time) *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.SetUsedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedAt sets the "used_at" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetNillableUsedAt(v *time.Time) *PromoCodeUsageUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCode sets the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetPromoCode(v *PromoCode) *PromoCodeUsageUpdate {
|
||||||
|
return _u.SetPromoCodeID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SetUser(v *User) *PromoCodeUsageUpdate {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeUsageMutation object of the builder.
|
||||||
|
func (_u *PromoCodeUsageUpdate) Mutation() *PromoCodeUsageMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearPromoCode clears the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_u *PromoCodeUsageUpdate) ClearPromoCode() *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.ClearPromoCode()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *PromoCodeUsageUpdate) ClearUser() *PromoCodeUsageUpdate {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *PromoCodeUsageUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUsageUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *PromoCodeUsageUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUsageUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *PromoCodeUsageUpdate) check() error {
|
||||||
|
if _u.mutation.PromoCodeCleared() && len(_u.mutation.PromoCodeIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.promo_code"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *PromoCodeUsageUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedBonusAmount(); ok {
|
||||||
|
_spec.AddField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedAt(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.PromoCodeCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.PromoCodeIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{promocodeusage.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PromoCodeUsageUpdateOne is the builder for updating a single PromoCodeUsage entity.
|
||||||
|
type PromoCodeUsageUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *PromoCodeUsageMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCodeID sets the "promo_code_id" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetPromoCodeID(v int64) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.SetPromoCodeID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePromoCodeID sets the "promo_code_id" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetNillablePromoCodeID(v *int64) *PromoCodeUsageUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPromoCodeID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user_id" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetUserID(v int64) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.SetUserID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user_id" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetNillableUserID(v *int64) *PromoCodeUsageUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUserID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetBonusAmount sets the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetBonusAmount(v float64) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.ResetBonusAmount()
|
||||||
|
_u.mutation.SetBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetNillableBonusAmount(v *float64) *PromoCodeUsageUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetBonusAmount(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddBonusAmount adds value to the "bonus_amount" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) AddBonusAmount(v float64) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.AddBonusAmount(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetUsedAt(v time.Time) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.SetUsedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedAt sets the "used_at" field if the given value is not nil.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetNillableUsedAt(v *time.Time) *PromoCodeUsageUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPromoCode sets the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetPromoCode(v *PromoCode) *PromoCodeUsageUpdateOne {
|
||||||
|
return _u.SetPromoCodeID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SetUser(v *User) *PromoCodeUsageUpdateOne {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the PromoCodeUsageMutation object of the builder.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Mutation() *PromoCodeUsageMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearPromoCode clears the "promo_code" edge to the PromoCode entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) ClearPromoCode() *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.ClearPromoCode()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) ClearUser() *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the PromoCodeUsageUpdate builder.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Select(field string, fields ...string) *PromoCodeUsageUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated PromoCodeUsage entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Save(ctx context.Context) (*PromoCodeUsage, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) SaveX(ctx context.Context) *PromoCodeUsage {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) check() error {
|
||||||
|
if _u.mutation.PromoCodeCleared() && len(_u.mutation.PromoCodeIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.promo_code"`)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 {
|
||||||
|
return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.user"`)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *PromoCodeUsageUpdateOne) sqlSave(ctx context.Context) (_node *PromoCodeUsage, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PromoCodeUsage.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, promocodeusage.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !promocodeusage.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != promocodeusage.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.BonusAmount(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedBonusAmount(); ok {
|
||||||
|
_spec.AddField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedAt(); ok {
|
||||||
|
_spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.PromoCodeCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.PromoCodeIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.PromoCodeTable,
|
||||||
|
Columns: []string{promocodeusage.PromoCodeColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: promocodeusage.UserTable,
|
||||||
|
Columns: []string{promocodeusage.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &PromoCodeUsage{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{promocodeusage.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
240
backend/ent/proxy.go
Normal file
240
backend/ent/proxy.go
Normal file
@@ -0,0 +1,240 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Proxy is the model entity for the Proxy schema.
|
||||||
|
type Proxy struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// UpdatedAt holds the value of the "updated_at" field.
|
||||||
|
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||||
|
// DeletedAt holds the value of the "deleted_at" field.
|
||||||
|
DeletedAt *time.Time `json:"deleted_at,omitempty"`
|
||||||
|
// Name holds the value of the "name" field.
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
// Protocol holds the value of the "protocol" field.
|
||||||
|
Protocol string `json:"protocol,omitempty"`
|
||||||
|
// Host holds the value of the "host" field.
|
||||||
|
Host string `json:"host,omitempty"`
|
||||||
|
// Port holds the value of the "port" field.
|
||||||
|
Port int `json:"port,omitempty"`
|
||||||
|
// Username holds the value of the "username" field.
|
||||||
|
Username *string `json:"username,omitempty"`
|
||||||
|
// Password holds the value of the "password" field.
|
||||||
|
Password *string `json:"password,omitempty"`
|
||||||
|
// Status holds the value of the "status" field.
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the ProxyQuery when eager-loading is set.
|
||||||
|
Edges ProxyEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type ProxyEdges struct {
|
||||||
|
// Accounts holds the value of the accounts edge.
|
||||||
|
Accounts []*Account `json:"accounts,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [1]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// AccountsOrErr returns the Accounts value or an error if the edge
|
||||||
|
// was not loaded in eager-loading.
|
||||||
|
func (e ProxyEdges) AccountsOrErr() ([]*Account, error) {
|
||||||
|
if e.loadedTypes[0] {
|
||||||
|
return e.Accounts, nil
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "accounts"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*Proxy) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case proxy.FieldID, proxy.FieldPort:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case proxy.FieldName, proxy.FieldProtocol, proxy.FieldHost, proxy.FieldUsername, proxy.FieldPassword, proxy.FieldStatus:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case proxy.FieldCreatedAt, proxy.FieldUpdatedAt, proxy.FieldDeletedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the Proxy fields.
|
||||||
|
func (_m *Proxy) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case proxy.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case proxy.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case proxy.FieldUpdatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UpdatedAt = value.Time
|
||||||
|
}
|
||||||
|
case proxy.FieldDeletedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field deleted_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.DeletedAt = new(time.Time)
|
||||||
|
*_m.DeletedAt = value.Time
|
||||||
|
}
|
||||||
|
case proxy.FieldName:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Name = value.String
|
||||||
|
}
|
||||||
|
case proxy.FieldProtocol:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field protocol", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Protocol = value.String
|
||||||
|
}
|
||||||
|
case proxy.FieldHost:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field host", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Host = value.String
|
||||||
|
}
|
||||||
|
case proxy.FieldPort:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field port", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Port = int(value.Int64)
|
||||||
|
}
|
||||||
|
case proxy.FieldUsername:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field username", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Username = new(string)
|
||||||
|
*_m.Username = value.String
|
||||||
|
}
|
||||||
|
case proxy.FieldPassword:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field password", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Password = new(string)
|
||||||
|
*_m.Password = value.String
|
||||||
|
}
|
||||||
|
case proxy.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the ent.Value that was dynamically selected and assigned to the Proxy.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *Proxy) Value(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccounts queries the "accounts" edge of the Proxy entity.
|
||||||
|
func (_m *Proxy) QueryAccounts() *AccountQuery {
|
||||||
|
return NewProxyClient(_m.config).QueryAccounts(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this Proxy.
|
||||||
|
// Note that you need to call Proxy.Unwrap() before calling this method if this Proxy
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *Proxy) Update() *ProxyUpdateOne {
|
||||||
|
return NewProxyClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the Proxy entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *Proxy) Unwrap() *Proxy {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: Proxy is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *Proxy) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("Proxy(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("updated_at=")
|
||||||
|
builder.WriteString(_m.UpdatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.DeletedAt; v != nil {
|
||||||
|
builder.WriteString("deleted_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("name=")
|
||||||
|
builder.WriteString(_m.Name)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("protocol=")
|
||||||
|
builder.WriteString(_m.Protocol)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("host=")
|
||||||
|
builder.WriteString(_m.Host)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("port=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Port))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Username; v != nil {
|
||||||
|
builder.WriteString("username=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Password; v != nil {
|
||||||
|
builder.WriteString("password=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Proxies is a parsable slice of Proxy.
|
||||||
|
type Proxies []*Proxy
|
||||||
183
backend/ent/proxy/proxy.go
Normal file
183
backend/ent/proxy/proxy.go
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the proxy type in the database.
|
||||||
|
Label = "proxy"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||||
|
FieldUpdatedAt = "updated_at"
|
||||||
|
// FieldDeletedAt holds the string denoting the deleted_at field in the database.
|
||||||
|
FieldDeletedAt = "deleted_at"
|
||||||
|
// FieldName holds the string denoting the name field in the database.
|
||||||
|
FieldName = "name"
|
||||||
|
// FieldProtocol holds the string denoting the protocol field in the database.
|
||||||
|
FieldProtocol = "protocol"
|
||||||
|
// FieldHost holds the string denoting the host field in the database.
|
||||||
|
FieldHost = "host"
|
||||||
|
// FieldPort holds the string denoting the port field in the database.
|
||||||
|
FieldPort = "port"
|
||||||
|
// FieldUsername holds the string denoting the username field in the database.
|
||||||
|
FieldUsername = "username"
|
||||||
|
// FieldPassword holds the string denoting the password field in the database.
|
||||||
|
FieldPassword = "password"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// EdgeAccounts holds the string denoting the accounts edge name in mutations.
|
||||||
|
EdgeAccounts = "accounts"
|
||||||
|
// Table holds the table name of the proxy in the database.
|
||||||
|
Table = "proxies"
|
||||||
|
// AccountsTable is the table that holds the accounts relation/edge.
|
||||||
|
AccountsTable = "accounts"
|
||||||
|
// AccountsInverseTable is the table name for the Account entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "account" package.
|
||||||
|
AccountsInverseTable = "accounts"
|
||||||
|
// AccountsColumn is the table column denoting the accounts relation/edge.
|
||||||
|
AccountsColumn = "proxy_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for proxy fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldUpdatedAt,
|
||||||
|
FieldDeletedAt,
|
||||||
|
FieldName,
|
||||||
|
FieldProtocol,
|
||||||
|
FieldHost,
|
||||||
|
FieldPort,
|
||||||
|
FieldUsername,
|
||||||
|
FieldPassword,
|
||||||
|
FieldStatus,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that the variables below are initialized by the runtime
|
||||||
|
// package on the initialization of the application. Therefore,
|
||||||
|
// it should be imported in the main as follows:
|
||||||
|
//
|
||||||
|
// import _ "github.com/Wei-Shaw/sub2api/ent/runtime"
|
||||||
|
var (
|
||||||
|
Hooks [1]ent.Hook
|
||||||
|
Interceptors [1]ent.Interceptor
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||||
|
DefaultUpdatedAt func() time.Time
|
||||||
|
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||||
|
UpdateDefaultUpdatedAt func() time.Time
|
||||||
|
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
NameValidator func(string) error
|
||||||
|
// ProtocolValidator is a validator for the "protocol" field. It is called by the builders before save.
|
||||||
|
ProtocolValidator func(string) error
|
||||||
|
// HostValidator is a validator for the "host" field. It is called by the builders before save.
|
||||||
|
HostValidator func(string) error
|
||||||
|
// UsernameValidator is a validator for the "username" field. It is called by the builders before save.
|
||||||
|
UsernameValidator func(string) error
|
||||||
|
// PasswordValidator is a validator for the "password" field. It is called by the builders before save.
|
||||||
|
PasswordValidator func(string) error
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the Proxy queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUpdatedAt orders the results by the updated_at field.
|
||||||
|
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByDeletedAt orders the results by the deleted_at field.
|
||||||
|
func ByDeletedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldDeletedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByName orders the results by the name field.
|
||||||
|
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByProtocol orders the results by the protocol field.
|
||||||
|
func ByProtocol(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldProtocol, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByHost orders the results by the host field.
|
||||||
|
func ByHost(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldHost, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPort orders the results by the port field.
|
||||||
|
func ByPort(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPort, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsername orders the results by the username field.
|
||||||
|
func ByUsername(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsername, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByPassword orders the results by the password field.
|
||||||
|
func ByPassword(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldPassword, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccountsCount orders the results by accounts count.
|
||||||
|
func ByAccountsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborsCount(s, newAccountsStep(), opts...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByAccounts orders the results by accounts terms.
|
||||||
|
func ByAccounts(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newAccountsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newAccountsStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(AccountsInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, true, AccountsTable, AccountsColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
724
backend/ent/proxy/where.go
Normal file
724
backend/ent/proxy/where.go
Normal file
@@ -0,0 +1,724 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||||
|
func UpdatedAt(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAt applies equality check predicate on the "deleted_at" field. It's identical to DeletedAtEQ.
|
||||||
|
func DeletedAt(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||||
|
func Name(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Protocol applies equality check predicate on the "protocol" field. It's identical to ProtocolEQ.
|
||||||
|
func Protocol(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Host applies equality check predicate on the "host" field. It's identical to HostEQ.
|
||||||
|
func Host(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Port applies equality check predicate on the "port" field. It's identical to PortEQ.
|
||||||
|
func Port(v int) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldPort, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ.
|
||||||
|
func Username(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Password applies equality check predicate on the "password" field. It's identical to PasswordEQ.
|
||||||
|
func Password(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status applies equality check predicate on the "status" field. It's identical to StatusEQ.
|
||||||
|
func Status(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtEQ(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNEQ(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtIn(vs ...time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtNotIn(vs ...time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGT(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtGTE(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLT(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||||
|
func UpdatedAtLTE(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldUpdatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtEQ applies the EQ predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtEQ(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtNEQ applies the NEQ predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtNEQ(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtIn applies the In predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtIn(vs ...time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldDeletedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtNotIn applies the NotIn predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtNotIn(vs ...time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldDeletedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtGT applies the GT predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtGT(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtGTE applies the GTE predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtGTE(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtLT applies the LT predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtLT(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtLTE applies the LTE predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtLTE(v time.Time) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldDeletedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtIsNil applies the IsNil predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtIsNil() predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIsNull(FieldDeletedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeletedAtNotNil applies the NotNil predicate on the "deleted_at" field.
|
||||||
|
func DeletedAtNotNil() predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotNull(FieldDeletedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameEQ applies the EQ predicate on the "name" field.
|
||||||
|
func NameEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||||
|
func NameNEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameIn applies the In predicate on the "name" field.
|
||||||
|
func NameIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldName, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||||
|
func NameNotIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldName, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameGT applies the GT predicate on the "name" field.
|
||||||
|
func NameGT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameGTE applies the GTE predicate on the "name" field.
|
||||||
|
func NameGTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameLT applies the LT predicate on the "name" field.
|
||||||
|
func NameLT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameLTE applies the LTE predicate on the "name" field.
|
||||||
|
func NameLTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameContains applies the Contains predicate on the "name" field.
|
||||||
|
func NameContains(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContains(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||||
|
func NameHasPrefix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasPrefix(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||||
|
func NameHasSuffix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasSuffix(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||||
|
func NameEqualFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEqualFold(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||||
|
func NameContainsFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContainsFold(FieldName, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolEQ applies the EQ predicate on the "protocol" field.
|
||||||
|
func ProtocolEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolNEQ applies the NEQ predicate on the "protocol" field.
|
||||||
|
func ProtocolNEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolIn applies the In predicate on the "protocol" field.
|
||||||
|
func ProtocolIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldProtocol, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolNotIn applies the NotIn predicate on the "protocol" field.
|
||||||
|
func ProtocolNotIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldProtocol, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolGT applies the GT predicate on the "protocol" field.
|
||||||
|
func ProtocolGT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolGTE applies the GTE predicate on the "protocol" field.
|
||||||
|
func ProtocolGTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolLT applies the LT predicate on the "protocol" field.
|
||||||
|
func ProtocolLT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolLTE applies the LTE predicate on the "protocol" field.
|
||||||
|
func ProtocolLTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolContains applies the Contains predicate on the "protocol" field.
|
||||||
|
func ProtocolContains(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContains(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolHasPrefix applies the HasPrefix predicate on the "protocol" field.
|
||||||
|
func ProtocolHasPrefix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasPrefix(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolHasSuffix applies the HasSuffix predicate on the "protocol" field.
|
||||||
|
func ProtocolHasSuffix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasSuffix(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolEqualFold applies the EqualFold predicate on the "protocol" field.
|
||||||
|
func ProtocolEqualFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEqualFold(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProtocolContainsFold applies the ContainsFold predicate on the "protocol" field.
|
||||||
|
func ProtocolContainsFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContainsFold(FieldProtocol, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostEQ applies the EQ predicate on the "host" field.
|
||||||
|
func HostEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostNEQ applies the NEQ predicate on the "host" field.
|
||||||
|
func HostNEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostIn applies the In predicate on the "host" field.
|
||||||
|
func HostIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldHost, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostNotIn applies the NotIn predicate on the "host" field.
|
||||||
|
func HostNotIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldHost, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostGT applies the GT predicate on the "host" field.
|
||||||
|
func HostGT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostGTE applies the GTE predicate on the "host" field.
|
||||||
|
func HostGTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostLT applies the LT predicate on the "host" field.
|
||||||
|
func HostLT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostLTE applies the LTE predicate on the "host" field.
|
||||||
|
func HostLTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostContains applies the Contains predicate on the "host" field.
|
||||||
|
func HostContains(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContains(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostHasPrefix applies the HasPrefix predicate on the "host" field.
|
||||||
|
func HostHasPrefix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasPrefix(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostHasSuffix applies the HasSuffix predicate on the "host" field.
|
||||||
|
func HostHasSuffix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasSuffix(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostEqualFold applies the EqualFold predicate on the "host" field.
|
||||||
|
func HostEqualFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEqualFold(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HostContainsFold applies the ContainsFold predicate on the "host" field.
|
||||||
|
func HostContainsFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContainsFold(FieldHost, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortEQ applies the EQ predicate on the "port" field.
|
||||||
|
func PortEQ(v int) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldPort, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortNEQ applies the NEQ predicate on the "port" field.
|
||||||
|
func PortNEQ(v int) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldPort, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortIn applies the In predicate on the "port" field.
|
||||||
|
func PortIn(vs ...int) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldPort, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortNotIn applies the NotIn predicate on the "port" field.
|
||||||
|
func PortNotIn(vs ...int) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldPort, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortGT applies the GT predicate on the "port" field.
|
||||||
|
func PortGT(v int) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldPort, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortGTE applies the GTE predicate on the "port" field.
|
||||||
|
func PortGTE(v int) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldPort, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortLT applies the LT predicate on the "port" field.
|
||||||
|
func PortLT(v int) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldPort, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortLTE applies the LTE predicate on the "port" field.
|
||||||
|
func PortLTE(v int) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldPort, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameEQ applies the EQ predicate on the "username" field.
|
||||||
|
func UsernameEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameNEQ applies the NEQ predicate on the "username" field.
|
||||||
|
func UsernameNEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameIn applies the In predicate on the "username" field.
|
||||||
|
func UsernameIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldUsername, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameNotIn applies the NotIn predicate on the "username" field.
|
||||||
|
func UsernameNotIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldUsername, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameGT applies the GT predicate on the "username" field.
|
||||||
|
func UsernameGT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameGTE applies the GTE predicate on the "username" field.
|
||||||
|
func UsernameGTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameLT applies the LT predicate on the "username" field.
|
||||||
|
func UsernameLT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameLTE applies the LTE predicate on the "username" field.
|
||||||
|
func UsernameLTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameContains applies the Contains predicate on the "username" field.
|
||||||
|
func UsernameContains(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContains(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameHasPrefix applies the HasPrefix predicate on the "username" field.
|
||||||
|
func UsernameHasPrefix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasPrefix(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameHasSuffix applies the HasSuffix predicate on the "username" field.
|
||||||
|
func UsernameHasSuffix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasSuffix(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameIsNil applies the IsNil predicate on the "username" field.
|
||||||
|
func UsernameIsNil() predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIsNull(FieldUsername))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameNotNil applies the NotNil predicate on the "username" field.
|
||||||
|
func UsernameNotNil() predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotNull(FieldUsername))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameEqualFold applies the EqualFold predicate on the "username" field.
|
||||||
|
func UsernameEqualFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEqualFold(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsernameContainsFold applies the ContainsFold predicate on the "username" field.
|
||||||
|
func UsernameContainsFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContainsFold(FieldUsername, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordEQ applies the EQ predicate on the "password" field.
|
||||||
|
func PasswordEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordNEQ applies the NEQ predicate on the "password" field.
|
||||||
|
func PasswordNEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordIn applies the In predicate on the "password" field.
|
||||||
|
func PasswordIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldPassword, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordNotIn applies the NotIn predicate on the "password" field.
|
||||||
|
func PasswordNotIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldPassword, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordGT applies the GT predicate on the "password" field.
|
||||||
|
func PasswordGT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordGTE applies the GTE predicate on the "password" field.
|
||||||
|
func PasswordGTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordLT applies the LT predicate on the "password" field.
|
||||||
|
func PasswordLT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordLTE applies the LTE predicate on the "password" field.
|
||||||
|
func PasswordLTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordContains applies the Contains predicate on the "password" field.
|
||||||
|
func PasswordContains(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContains(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordHasPrefix applies the HasPrefix predicate on the "password" field.
|
||||||
|
func PasswordHasPrefix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasPrefix(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordHasSuffix applies the HasSuffix predicate on the "password" field.
|
||||||
|
func PasswordHasSuffix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasSuffix(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordIsNil applies the IsNil predicate on the "password" field.
|
||||||
|
func PasswordIsNil() predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIsNull(FieldPassword))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordNotNil applies the NotNil predicate on the "password" field.
|
||||||
|
func PasswordNotNil() predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotNull(FieldPassword))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordEqualFold applies the EqualFold predicate on the "password" field.
|
||||||
|
func PasswordEqualFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEqualFold(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// PasswordContainsFold applies the ContainsFold predicate on the "password" field.
|
||||||
|
func PasswordContainsFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContainsFold(FieldPassword, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEQ applies the EQ predicate on the "status" field.
|
||||||
|
func StatusEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNEQ applies the NEQ predicate on the "status" field.
|
||||||
|
func StatusNEQ(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusIn applies the In predicate on the "status" field.
|
||||||
|
func StatusIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNotIn applies the NotIn predicate on the "status" field.
|
||||||
|
func StatusNotIn(vs ...string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldNotIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGT applies the GT predicate on the "status" field.
|
||||||
|
func StatusGT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGTE applies the GTE predicate on the "status" field.
|
||||||
|
func StatusGTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldGTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLT applies the LT predicate on the "status" field.
|
||||||
|
func StatusLT(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLTE applies the LTE predicate on the "status" field.
|
||||||
|
func StatusLTE(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldLTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContains applies the Contains predicate on the "status" field.
|
||||||
|
func StatusContains(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContains(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasPrefix applies the HasPrefix predicate on the "status" field.
|
||||||
|
func StatusHasPrefix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasPrefix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasSuffix applies the HasSuffix predicate on the "status" field.
|
||||||
|
func StatusHasSuffix(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldHasSuffix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEqualFold applies the EqualFold predicate on the "status" field.
|
||||||
|
func StatusEqualFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldEqualFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContainsFold applies the ContainsFold predicate on the "status" field.
|
||||||
|
func StatusContainsFold(v string) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.FieldContainsFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAccounts applies the HasEdge predicate on the "accounts" edge.
|
||||||
|
func HasAccounts() predicate.Proxy {
|
||||||
|
return predicate.Proxy(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, true, AccountsTable, AccountsColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasAccountsWith applies the HasEdge predicate on the "accounts" edge with a given conditions (other predicates).
|
||||||
|
func HasAccountsWith(preds ...predicate.Account) predicate.Proxy {
|
||||||
|
return predicate.Proxy(func(s *sql.Selector) {
|
||||||
|
step := newAccountsStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.Proxy) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.Proxy) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.Proxy) predicate.Proxy {
|
||||||
|
return predicate.Proxy(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
1112
backend/ent/proxy_create.go
Normal file
1112
backend/ent/proxy_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/proxy_delete.go
Normal file
88
backend/ent/proxy_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProxyDelete is the builder for deleting a Proxy entity.
|
||||||
|
type ProxyDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *ProxyMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the ProxyDelete builder.
|
||||||
|
func (_d *ProxyDelete) Where(ps ...predicate.Proxy) *ProxyDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *ProxyDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *ProxyDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *ProxyDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(proxy.Table, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyDeleteOne is the builder for deleting a single Proxy entity.
|
||||||
|
type ProxyDeleteOne struct {
|
||||||
|
_d *ProxyDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the ProxyDelete builder.
|
||||||
|
func (_d *ProxyDeleteOne) Where(ps ...predicate.Proxy) *ProxyDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *ProxyDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{proxy.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *ProxyDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
646
backend/ent/proxy_query.go
Normal file
646
backend/ent/proxy_query.go
Normal file
@@ -0,0 +1,646 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"database/sql/driver"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProxyQuery is the builder for querying Proxy entities.
|
||||||
|
type ProxyQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []proxy.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.Proxy
|
||||||
|
withAccounts *AccountQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the ProxyQuery builder.
|
||||||
|
func (_q *ProxyQuery) Where(ps ...predicate.Proxy) *ProxyQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *ProxyQuery) Limit(limit int) *ProxyQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *ProxyQuery) Offset(offset int) *ProxyQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *ProxyQuery) Unique(unique bool) *ProxyQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *ProxyQuery) Order(o ...proxy.OrderOption) *ProxyQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryAccounts chains the current query on the "accounts" edge.
|
||||||
|
func (_q *ProxyQuery) QueryAccounts() *AccountQuery {
|
||||||
|
query := (&AccountClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(proxy.Table, proxy.FieldID, selector),
|
||||||
|
sqlgraph.To(account.Table, account.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.O2M, true, proxy.AccountsTable, proxy.AccountsColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first Proxy entity from the query.
|
||||||
|
// Returns a *NotFoundError when no Proxy was found.
|
||||||
|
func (_q *ProxyQuery) First(ctx context.Context) (*Proxy, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{proxy.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *ProxyQuery) FirstX(ctx context.Context) *Proxy {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first Proxy ID from the query.
|
||||||
|
// Returns a *NotFoundError when no Proxy ID was found.
|
||||||
|
func (_q *ProxyQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{proxy.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *ProxyQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single Proxy entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one Proxy entity is found.
|
||||||
|
// Returns a *NotFoundError when no Proxy entities are found.
|
||||||
|
func (_q *ProxyQuery) Only(ctx context.Context) (*Proxy, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{proxy.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{proxy.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *ProxyQuery) OnlyX(ctx context.Context) *Proxy {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only Proxy ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one Proxy ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *ProxyQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{proxy.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{proxy.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *ProxyQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of Proxies.
|
||||||
|
func (_q *ProxyQuery) All(ctx context.Context) ([]*Proxy, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*Proxy, *ProxyQuery]()
|
||||||
|
return withInterceptors[[]*Proxy](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *ProxyQuery) AllX(ctx context.Context) []*Proxy {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of Proxy IDs.
|
||||||
|
func (_q *ProxyQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(proxy.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *ProxyQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *ProxyQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*ProxyQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *ProxyQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *ProxyQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *ProxyQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the ProxyQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *ProxyQuery) Clone() *ProxyQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &ProxyQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]proxy.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.Proxy{}, _q.predicates...),
|
||||||
|
withAccounts: _q.withAccounts.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithAccounts tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "accounts" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *ProxyQuery) WithAccounts(opts ...func(*AccountQuery)) *ProxyQuery {
|
||||||
|
query := (&AccountClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withAccounts = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.Proxy.Query().
|
||||||
|
// GroupBy(proxy.FieldCreatedAt).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *ProxyQuery) GroupBy(field string, fields ...string) *ProxyGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &ProxyGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = proxy.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.Proxy.Query().
|
||||||
|
// Select(proxy.FieldCreatedAt).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *ProxyQuery) Select(fields ...string) *ProxySelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &ProxySelect{ProxyQuery: _q}
|
||||||
|
sbuild.label = proxy.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a ProxySelect configured with the given aggregations.
|
||||||
|
func (_q *ProxyQuery) Aggregate(fns ...AggregateFunc) *ProxySelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ProxyQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !proxy.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ProxyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Proxy, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*Proxy{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [1]bool{
|
||||||
|
_q.withAccounts != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*Proxy).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &Proxy{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withAccounts; query != nil {
|
||||||
|
if err := _q.loadAccounts(ctx, query, nodes,
|
||||||
|
func(n *Proxy) { n.Edges.Accounts = []*Account{} },
|
||||||
|
func(n *Proxy, e *Account) { n.Edges.Accounts = append(n.Edges.Accounts, e) }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ProxyQuery) loadAccounts(ctx context.Context, query *AccountQuery, nodes []*Proxy, init func(*Proxy), assign func(*Proxy, *Account)) error {
|
||||||
|
fks := make([]driver.Value, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64]*Proxy)
|
||||||
|
for i := range nodes {
|
||||||
|
fks = append(fks, nodes[i].ID)
|
||||||
|
nodeids[nodes[i].ID] = nodes[i]
|
||||||
|
if init != nil {
|
||||||
|
init(nodes[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(query.ctx.Fields) > 0 {
|
||||||
|
query.ctx.AppendFieldOnce(account.FieldProxyID)
|
||||||
|
}
|
||||||
|
query.Where(predicate.Account(func(s *sql.Selector) {
|
||||||
|
s.Where(sql.InValues(s.C(proxy.AccountsColumn), fks...))
|
||||||
|
}))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
fk := n.ProxyID
|
||||||
|
if fk == nil {
|
||||||
|
return fmt.Errorf(`foreign-key "proxy_id" is nil for node %v`, n.ID)
|
||||||
|
}
|
||||||
|
node, ok := nodeids[*fk]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected referenced foreign-key "proxy_id" returned %v for node %v`, *fk, n.ID)
|
||||||
|
}
|
||||||
|
assign(node, n)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ProxyQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ProxyQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(proxy.Table, proxy.Columns, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, proxy.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != proxy.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *ProxyQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(proxy.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = proxy.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *ProxyQuery) ForUpdate(opts ...sql.LockOption) *ProxyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *ProxyQuery) ForShare(opts ...sql.LockOption) *ProxyQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyGroupBy is the group-by builder for Proxy entities.
|
||||||
|
type ProxyGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *ProxyQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *ProxyGroupBy) Aggregate(fns ...AggregateFunc) *ProxyGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *ProxyGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*ProxyQuery, *ProxyGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *ProxyGroupBy) sqlScan(ctx context.Context, root *ProxyQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxySelect is the builder for selecting fields of Proxy entities.
|
||||||
|
type ProxySelect struct {
|
||||||
|
*ProxyQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *ProxySelect) Aggregate(fns ...AggregateFunc) *ProxySelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *ProxySelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*ProxyQuery, *ProxySelect](ctx, _s.ProxyQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *ProxySelect) sqlScan(ctx context.Context, root *ProxyQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
809
backend/ent/proxy_update.go
Normal file
809
backend/ent/proxy_update.go
Normal file
@@ -0,0 +1,809 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProxyUpdate is the builder for updating Proxy entities.
|
||||||
|
type ProxyUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *ProxyMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the ProxyUpdate builder.
|
||||||
|
func (_u *ProxyUpdate) Where(ps ...predicate.Proxy) *ProxyUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *ProxyUpdate) SetUpdatedAt(v time.Time) *ProxyUpdate {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeletedAt sets the "deleted_at" field.
|
||||||
|
func (_u *ProxyUpdate) SetDeletedAt(v time.Time) *ProxyUpdate {
|
||||||
|
_u.mutation.SetDeletedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdate) SetNillableDeletedAt(v *time.Time) *ProxyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetDeletedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||||
|
func (_u *ProxyUpdate) ClearDeletedAt() *ProxyUpdate {
|
||||||
|
_u.mutation.ClearDeletedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetName sets the "name" field.
|
||||||
|
func (_u *ProxyUpdate) SetName(v string) *ProxyUpdate {
|
||||||
|
_u.mutation.SetName(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableName sets the "name" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdate) SetNillableName(v *string) *ProxyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetName(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProtocol sets the "protocol" field.
|
||||||
|
func (_u *ProxyUpdate) SetProtocol(v string) *ProxyUpdate {
|
||||||
|
_u.mutation.SetProtocol(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProtocol sets the "protocol" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdate) SetNillableProtocol(v *string) *ProxyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProtocol(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHost sets the "host" field.
|
||||||
|
func (_u *ProxyUpdate) SetHost(v string) *ProxyUpdate {
|
||||||
|
_u.mutation.SetHost(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableHost sets the "host" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdate) SetNillableHost(v *string) *ProxyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetHost(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPort sets the "port" field.
|
||||||
|
func (_u *ProxyUpdate) SetPort(v int) *ProxyUpdate {
|
||||||
|
_u.mutation.ResetPort()
|
||||||
|
_u.mutation.SetPort(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePort sets the "port" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdate) SetNillablePort(v *int) *ProxyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPort(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPort adds value to the "port" field.
|
||||||
|
func (_u *ProxyUpdate) AddPort(v int) *ProxyUpdate {
|
||||||
|
_u.mutation.AddPort(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsername sets the "username" field.
|
||||||
|
func (_u *ProxyUpdate) SetUsername(v string) *ProxyUpdate {
|
||||||
|
_u.mutation.SetUsername(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsername sets the "username" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdate) SetNillableUsername(v *string) *ProxyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsername(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsername clears the value of the "username" field.
|
||||||
|
func (_u *ProxyUpdate) ClearUsername() *ProxyUpdate {
|
||||||
|
_u.mutation.ClearUsername()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPassword sets the "password" field.
|
||||||
|
func (_u *ProxyUpdate) SetPassword(v string) *ProxyUpdate {
|
||||||
|
_u.mutation.SetPassword(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePassword sets the "password" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdate) SetNillablePassword(v *string) *ProxyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPassword(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearPassword clears the value of the "password" field.
|
||||||
|
func (_u *ProxyUpdate) ClearPassword() *ProxyUpdate {
|
||||||
|
_u.mutation.ClearPassword()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *ProxyUpdate) SetStatus(v string) *ProxyUpdate {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdate) SetNillableStatus(v *string) *ProxyUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAccountIDs adds the "accounts" edge to the Account entity by IDs.
|
||||||
|
func (_u *ProxyUpdate) AddAccountIDs(ids ...int64) *ProxyUpdate {
|
||||||
|
_u.mutation.AddAccountIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAccounts adds the "accounts" edges to the Account entity.
|
||||||
|
func (_u *ProxyUpdate) AddAccounts(v ...*Account) *ProxyUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddAccountIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the ProxyMutation object of the builder.
|
||||||
|
func (_u *ProxyUpdate) Mutation() *ProxyMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAccounts clears all "accounts" edges to the Account entity.
|
||||||
|
func (_u *ProxyUpdate) ClearAccounts() *ProxyUpdate {
|
||||||
|
_u.mutation.ClearAccounts()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAccountIDs removes the "accounts" edge to Account entities by IDs.
|
||||||
|
func (_u *ProxyUpdate) RemoveAccountIDs(ids ...int64) *ProxyUpdate {
|
||||||
|
_u.mutation.RemoveAccountIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAccounts removes "accounts" edges to Account entities.
|
||||||
|
func (_u *ProxyUpdate) RemoveAccounts(v ...*Account) *ProxyUpdate {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveAccountIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *ProxyUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
if err := _u.defaults(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *ProxyUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *ProxyUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *ProxyUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *ProxyUpdate) defaults() error {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
if proxy.UpdateDefaultUpdatedAt == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized proxy.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
v := proxy.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *ProxyUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.Name(); ok {
|
||||||
|
if err := proxy.NameValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Proxy.name": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Protocol(); ok {
|
||||||
|
if err := proxy.ProtocolValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "protocol", err: fmt.Errorf(`ent: validator failed for field "Proxy.protocol": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Host(); ok {
|
||||||
|
if err := proxy.HostValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "host", err: fmt.Errorf(`ent: validator failed for field "Proxy.host": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Username(); ok {
|
||||||
|
if err := proxy.UsernameValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "Proxy.username": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Password(); ok {
|
||||||
|
if err := proxy.PasswordValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "Proxy.password": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := proxy.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Proxy.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *ProxyUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(proxy.Table, proxy.Columns, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(proxy.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.DeletedAt(); ok {
|
||||||
|
_spec.SetField(proxy.FieldDeletedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.DeletedAtCleared() {
|
||||||
|
_spec.ClearField(proxy.FieldDeletedAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Name(); ok {
|
||||||
|
_spec.SetField(proxy.FieldName, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Protocol(); ok {
|
||||||
|
_spec.SetField(proxy.FieldProtocol, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Host(); ok {
|
||||||
|
_spec.SetField(proxy.FieldHost, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Port(); ok {
|
||||||
|
_spec.SetField(proxy.FieldPort, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedPort(); ok {
|
||||||
|
_spec.AddField(proxy.FieldPort, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Username(); ok {
|
||||||
|
_spec.SetField(proxy.FieldUsername, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsernameCleared() {
|
||||||
|
_spec.ClearField(proxy.FieldUsername, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Password(); ok {
|
||||||
|
_spec.SetField(proxy.FieldPassword, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.PasswordCleared() {
|
||||||
|
_spec.ClearField(proxy.FieldPassword, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(proxy.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.AccountsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: true,
|
||||||
|
Table: proxy.AccountsTable,
|
||||||
|
Columns: []string{proxy.AccountsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedAccountsIDs(); len(nodes) > 0 && !_u.mutation.AccountsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: true,
|
||||||
|
Table: proxy.AccountsTable,
|
||||||
|
Columns: []string{proxy.AccountsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AccountsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: true,
|
||||||
|
Table: proxy.AccountsTable,
|
||||||
|
Columns: []string{proxy.AccountsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{proxy.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProxyUpdateOne is the builder for updating a single Proxy entity.
|
||||||
|
type ProxyUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *ProxyMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUpdatedAt sets the "updated_at" field.
|
||||||
|
func (_u *ProxyUpdateOne) SetUpdatedAt(v time.Time) *ProxyUpdateOne {
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeletedAt sets the "deleted_at" field.
|
||||||
|
func (_u *ProxyUpdateOne) SetDeletedAt(v time.Time) *ProxyUpdateOne {
|
||||||
|
_u.mutation.SetDeletedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableDeletedAt sets the "deleted_at" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdateOne) SetNillableDeletedAt(v *time.Time) *ProxyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetDeletedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearDeletedAt clears the value of the "deleted_at" field.
|
||||||
|
func (_u *ProxyUpdateOne) ClearDeletedAt() *ProxyUpdateOne {
|
||||||
|
_u.mutation.ClearDeletedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetName sets the "name" field.
|
||||||
|
func (_u *ProxyUpdateOne) SetName(v string) *ProxyUpdateOne {
|
||||||
|
_u.mutation.SetName(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableName sets the "name" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdateOne) SetNillableName(v *string) *ProxyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetName(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetProtocol sets the "protocol" field.
|
||||||
|
func (_u *ProxyUpdateOne) SetProtocol(v string) *ProxyUpdateOne {
|
||||||
|
_u.mutation.SetProtocol(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableProtocol sets the "protocol" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdateOne) SetNillableProtocol(v *string) *ProxyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetProtocol(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHost sets the "host" field.
|
||||||
|
func (_u *ProxyUpdateOne) SetHost(v string) *ProxyUpdateOne {
|
||||||
|
_u.mutation.SetHost(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableHost sets the "host" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdateOne) SetNillableHost(v *string) *ProxyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetHost(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPort sets the "port" field.
|
||||||
|
func (_u *ProxyUpdateOne) SetPort(v int) *ProxyUpdateOne {
|
||||||
|
_u.mutation.ResetPort()
|
||||||
|
_u.mutation.SetPort(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePort sets the "port" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdateOne) SetNillablePort(v *int) *ProxyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPort(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddPort adds value to the "port" field.
|
||||||
|
func (_u *ProxyUpdateOne) AddPort(v int) *ProxyUpdateOne {
|
||||||
|
_u.mutation.AddPort(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsername sets the "username" field.
|
||||||
|
func (_u *ProxyUpdateOne) SetUsername(v string) *ProxyUpdateOne {
|
||||||
|
_u.mutation.SetUsername(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsername sets the "username" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdateOne) SetNillableUsername(v *string) *ProxyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsername(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsername clears the value of the "username" field.
|
||||||
|
func (_u *ProxyUpdateOne) ClearUsername() *ProxyUpdateOne {
|
||||||
|
_u.mutation.ClearUsername()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetPassword sets the "password" field.
|
||||||
|
func (_u *ProxyUpdateOne) SetPassword(v string) *ProxyUpdateOne {
|
||||||
|
_u.mutation.SetPassword(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillablePassword sets the "password" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdateOne) SetNillablePassword(v *string) *ProxyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetPassword(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearPassword clears the value of the "password" field.
|
||||||
|
func (_u *ProxyUpdateOne) ClearPassword() *ProxyUpdateOne {
|
||||||
|
_u.mutation.ClearPassword()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *ProxyUpdateOne) SetStatus(v string) *ProxyUpdateOne {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *ProxyUpdateOne) SetNillableStatus(v *string) *ProxyUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAccountIDs adds the "accounts" edge to the Account entity by IDs.
|
||||||
|
func (_u *ProxyUpdateOne) AddAccountIDs(ids ...int64) *ProxyUpdateOne {
|
||||||
|
_u.mutation.AddAccountIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddAccounts adds the "accounts" edges to the Account entity.
|
||||||
|
func (_u *ProxyUpdateOne) AddAccounts(v ...*Account) *ProxyUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.AddAccountIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the ProxyMutation object of the builder.
|
||||||
|
func (_u *ProxyUpdateOne) Mutation() *ProxyMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearAccounts clears all "accounts" edges to the Account entity.
|
||||||
|
func (_u *ProxyUpdateOne) ClearAccounts() *ProxyUpdateOne {
|
||||||
|
_u.mutation.ClearAccounts()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAccountIDs removes the "accounts" edge to Account entities by IDs.
|
||||||
|
func (_u *ProxyUpdateOne) RemoveAccountIDs(ids ...int64) *ProxyUpdateOne {
|
||||||
|
_u.mutation.RemoveAccountIDs(ids...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveAccounts removes "accounts" edges to Account entities.
|
||||||
|
func (_u *ProxyUpdateOne) RemoveAccounts(v ...*Account) *ProxyUpdateOne {
|
||||||
|
ids := make([]int64, len(v))
|
||||||
|
for i := range v {
|
||||||
|
ids[i] = v[i].ID
|
||||||
|
}
|
||||||
|
return _u.RemoveAccountIDs(ids...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the ProxyUpdate builder.
|
||||||
|
func (_u *ProxyUpdateOne) Where(ps ...predicate.Proxy) *ProxyUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *ProxyUpdateOne) Select(field string, fields ...string) *ProxyUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated Proxy entity.
|
||||||
|
func (_u *ProxyUpdateOne) Save(ctx context.Context) (*Proxy, error) {
|
||||||
|
if err := _u.defaults(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *ProxyUpdateOne) SaveX(ctx context.Context) *Proxy {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *ProxyUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *ProxyUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaults sets the default values of the builder before save.
|
||||||
|
func (_u *ProxyUpdateOne) defaults() error {
|
||||||
|
if _, ok := _u.mutation.UpdatedAt(); !ok {
|
||||||
|
if proxy.UpdateDefaultUpdatedAt == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized proxy.UpdateDefaultUpdatedAt (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
v := proxy.UpdateDefaultUpdatedAt()
|
||||||
|
_u.mutation.SetUpdatedAt(v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *ProxyUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.Name(); ok {
|
||||||
|
if err := proxy.NameValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Proxy.name": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Protocol(); ok {
|
||||||
|
if err := proxy.ProtocolValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "protocol", err: fmt.Errorf(`ent: validator failed for field "Proxy.protocol": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Host(); ok {
|
||||||
|
if err := proxy.HostValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "host", err: fmt.Errorf(`ent: validator failed for field "Proxy.host": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Username(); ok {
|
||||||
|
if err := proxy.UsernameValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "Proxy.username": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Password(); ok {
|
||||||
|
if err := proxy.PasswordValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "password", err: fmt.Errorf(`ent: validator failed for field "Proxy.password": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := proxy.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Proxy.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *ProxyUpdateOne) sqlSave(ctx context.Context) (_node *Proxy, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(proxy.Table, proxy.Columns, sqlgraph.NewFieldSpec(proxy.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Proxy.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, proxy.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !proxy.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != proxy.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UpdatedAt(); ok {
|
||||||
|
_spec.SetField(proxy.FieldUpdatedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.DeletedAt(); ok {
|
||||||
|
_spec.SetField(proxy.FieldDeletedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.DeletedAtCleared() {
|
||||||
|
_spec.ClearField(proxy.FieldDeletedAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Name(); ok {
|
||||||
|
_spec.SetField(proxy.FieldName, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Protocol(); ok {
|
||||||
|
_spec.SetField(proxy.FieldProtocol, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Host(); ok {
|
||||||
|
_spec.SetField(proxy.FieldHost, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Port(); ok {
|
||||||
|
_spec.SetField(proxy.FieldPort, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedPort(); ok {
|
||||||
|
_spec.AddField(proxy.FieldPort, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Username(); ok {
|
||||||
|
_spec.SetField(proxy.FieldUsername, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsernameCleared() {
|
||||||
|
_spec.ClearField(proxy.FieldUsername, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Password(); ok {
|
||||||
|
_spec.SetField(proxy.FieldPassword, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.PasswordCleared() {
|
||||||
|
_spec.ClearField(proxy.FieldPassword, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(proxy.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.AccountsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: true,
|
||||||
|
Table: proxy.AccountsTable,
|
||||||
|
Columns: []string{proxy.AccountsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.RemovedAccountsIDs(); len(nodes) > 0 && !_u.mutation.AccountsCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: true,
|
||||||
|
Table: proxy.AccountsTable,
|
||||||
|
Columns: []string{proxy.AccountsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.AccountsIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.O2M,
|
||||||
|
Inverse: true,
|
||||||
|
Table: proxy.AccountsTable,
|
||||||
|
Columns: []string{proxy.AccountsColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(account.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &Proxy{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{proxy.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
267
backend/ent/redeemcode.go
Normal file
267
backend/ent/redeemcode.go
Normal file
@@ -0,0 +1,267 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RedeemCode is the model entity for the RedeemCode schema.
|
||||||
|
type RedeemCode struct {
|
||||||
|
config `json:"-"`
|
||||||
|
// ID of the ent.
|
||||||
|
ID int64 `json:"id,omitempty"`
|
||||||
|
// Code holds the value of the "code" field.
|
||||||
|
Code string `json:"code,omitempty"`
|
||||||
|
// Type holds the value of the "type" field.
|
||||||
|
Type string `json:"type,omitempty"`
|
||||||
|
// Value holds the value of the "value" field.
|
||||||
|
Value float64 `json:"value,omitempty"`
|
||||||
|
// Status holds the value of the "status" field.
|
||||||
|
Status string `json:"status,omitempty"`
|
||||||
|
// UsedBy holds the value of the "used_by" field.
|
||||||
|
UsedBy *int64 `json:"used_by,omitempty"`
|
||||||
|
// UsedAt holds the value of the "used_at" field.
|
||||||
|
UsedAt *time.Time `json:"used_at,omitempty"`
|
||||||
|
// Notes holds the value of the "notes" field.
|
||||||
|
Notes *string `json:"notes,omitempty"`
|
||||||
|
// CreatedAt holds the value of the "created_at" field.
|
||||||
|
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||||
|
// GroupID holds the value of the "group_id" field.
|
||||||
|
GroupID *int64 `json:"group_id,omitempty"`
|
||||||
|
// ValidityDays holds the value of the "validity_days" field.
|
||||||
|
ValidityDays int `json:"validity_days,omitempty"`
|
||||||
|
// Edges holds the relations/edges for other nodes in the graph.
|
||||||
|
// The values are being populated by the RedeemCodeQuery when eager-loading is set.
|
||||||
|
Edges RedeemCodeEdges `json:"edges"`
|
||||||
|
selectValues sql.SelectValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedeemCodeEdges holds the relations/edges for other nodes in the graph.
|
||||||
|
type RedeemCodeEdges struct {
|
||||||
|
// User holds the value of the user edge.
|
||||||
|
User *User `json:"user,omitempty"`
|
||||||
|
// Group holds the value of the group edge.
|
||||||
|
Group *Group `json:"group,omitempty"`
|
||||||
|
// loadedTypes holds the information for reporting if a
|
||||||
|
// type was loaded (or requested) in eager-loading or not.
|
||||||
|
loadedTypes [2]bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// UserOrErr returns the User value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e RedeemCodeEdges) UserOrErr() (*User, error) {
|
||||||
|
if e.User != nil {
|
||||||
|
return e.User, nil
|
||||||
|
} else if e.loadedTypes[0] {
|
||||||
|
return nil, &NotFoundError{label: user.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "user"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupOrErr returns the Group value or an error if the edge
|
||||||
|
// was not loaded in eager-loading, or loaded but was not found.
|
||||||
|
func (e RedeemCodeEdges) GroupOrErr() (*Group, error) {
|
||||||
|
if e.Group != nil {
|
||||||
|
return e.Group, nil
|
||||||
|
} else if e.loadedTypes[1] {
|
||||||
|
return nil, &NotFoundError{label: group.Label}
|
||||||
|
}
|
||||||
|
return nil, &NotLoadedError{edge: "group"}
|
||||||
|
}
|
||||||
|
|
||||||
|
// scanValues returns the types for scanning values from sql.Rows.
|
||||||
|
func (*RedeemCode) scanValues(columns []string) ([]any, error) {
|
||||||
|
values := make([]any, len(columns))
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case redeemcode.FieldValue:
|
||||||
|
values[i] = new(sql.NullFloat64)
|
||||||
|
case redeemcode.FieldID, redeemcode.FieldUsedBy, redeemcode.FieldGroupID, redeemcode.FieldValidityDays:
|
||||||
|
values[i] = new(sql.NullInt64)
|
||||||
|
case redeemcode.FieldCode, redeemcode.FieldType, redeemcode.FieldStatus, redeemcode.FieldNotes:
|
||||||
|
values[i] = new(sql.NullString)
|
||||||
|
case redeemcode.FieldUsedAt, redeemcode.FieldCreatedAt:
|
||||||
|
values[i] = new(sql.NullTime)
|
||||||
|
default:
|
||||||
|
values[i] = new(sql.UnknownType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||||
|
// to the RedeemCode fields.
|
||||||
|
func (_m *RedeemCode) assignValues(columns []string, values []any) error {
|
||||||
|
if m, n := len(values), len(columns); m < n {
|
||||||
|
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||||
|
}
|
||||||
|
for i := range columns {
|
||||||
|
switch columns[i] {
|
||||||
|
case redeemcode.FieldID:
|
||||||
|
value, ok := values[i].(*sql.NullInt64)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field id", value)
|
||||||
|
}
|
||||||
|
_m.ID = int64(value.Int64)
|
||||||
|
case redeemcode.FieldCode:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field code", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Code = value.String
|
||||||
|
}
|
||||||
|
case redeemcode.FieldType:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field type", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Type = value.String
|
||||||
|
}
|
||||||
|
case redeemcode.FieldValue:
|
||||||
|
if value, ok := values[i].(*sql.NullFloat64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field value", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Value = value.Float64
|
||||||
|
}
|
||||||
|
case redeemcode.FieldStatus:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field status", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Status = value.String
|
||||||
|
}
|
||||||
|
case redeemcode.FieldUsedBy:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field used_by", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UsedBy = new(int64)
|
||||||
|
*_m.UsedBy = value.Int64
|
||||||
|
}
|
||||||
|
case redeemcode.FieldUsedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field used_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.UsedAt = new(time.Time)
|
||||||
|
*_m.UsedAt = value.Time
|
||||||
|
}
|
||||||
|
case redeemcode.FieldNotes:
|
||||||
|
if value, ok := values[i].(*sql.NullString); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field notes", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.Notes = new(string)
|
||||||
|
*_m.Notes = value.String
|
||||||
|
}
|
||||||
|
case redeemcode.FieldCreatedAt:
|
||||||
|
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.CreatedAt = value.Time
|
||||||
|
}
|
||||||
|
case redeemcode.FieldGroupID:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field group_id", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.GroupID = new(int64)
|
||||||
|
*_m.GroupID = value.Int64
|
||||||
|
}
|
||||||
|
case redeemcode.FieldValidityDays:
|
||||||
|
if value, ok := values[i].(*sql.NullInt64); !ok {
|
||||||
|
return fmt.Errorf("unexpected type %T for field validity_days", values[i])
|
||||||
|
} else if value.Valid {
|
||||||
|
_m.ValidityDays = int(value.Int64)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
_m.selectValues.Set(columns[i], values[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetValue returns the ent.Value that was dynamically selected and assigned to the RedeemCode.
|
||||||
|
// This includes values selected through modifiers, order, etc.
|
||||||
|
func (_m *RedeemCode) GetValue(name string) (ent.Value, error) {
|
||||||
|
return _m.selectValues.Get(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser queries the "user" edge of the RedeemCode entity.
|
||||||
|
func (_m *RedeemCode) QueryUser() *UserQuery {
|
||||||
|
return NewRedeemCodeClient(_m.config).QueryUser(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup queries the "group" edge of the RedeemCode entity.
|
||||||
|
func (_m *RedeemCode) QueryGroup() *GroupQuery {
|
||||||
|
return NewRedeemCodeClient(_m.config).QueryGroup(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update returns a builder for updating this RedeemCode.
|
||||||
|
// Note that you need to call RedeemCode.Unwrap() before calling this method if this RedeemCode
|
||||||
|
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||||
|
func (_m *RedeemCode) Update() *RedeemCodeUpdateOne {
|
||||||
|
return NewRedeemCodeClient(_m.config).UpdateOne(_m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap unwraps the RedeemCode entity that was returned from a transaction after it was closed,
|
||||||
|
// so that all future queries will be executed through the driver which created the transaction.
|
||||||
|
func (_m *RedeemCode) Unwrap() *RedeemCode {
|
||||||
|
_tx, ok := _m.config.driver.(*txDriver)
|
||||||
|
if !ok {
|
||||||
|
panic("ent: RedeemCode is not a transactional entity")
|
||||||
|
}
|
||||||
|
_m.config.driver = _tx.drv
|
||||||
|
return _m
|
||||||
|
}
|
||||||
|
|
||||||
|
// String implements the fmt.Stringer.
|
||||||
|
func (_m *RedeemCode) String() string {
|
||||||
|
var builder strings.Builder
|
||||||
|
builder.WriteString("RedeemCode(")
|
||||||
|
builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID))
|
||||||
|
builder.WriteString("code=")
|
||||||
|
builder.WriteString(_m.Code)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("type=")
|
||||||
|
builder.WriteString(_m.Type)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("value=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.Value))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("status=")
|
||||||
|
builder.WriteString(_m.Status)
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.UsedBy; v != nil {
|
||||||
|
builder.WriteString("used_by=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.UsedAt; v != nil {
|
||||||
|
builder.WriteString("used_at=")
|
||||||
|
builder.WriteString(v.Format(time.ANSIC))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.Notes; v != nil {
|
||||||
|
builder.WriteString("notes=")
|
||||||
|
builder.WriteString(*v)
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("created_at=")
|
||||||
|
builder.WriteString(_m.CreatedAt.Format(time.ANSIC))
|
||||||
|
builder.WriteString(", ")
|
||||||
|
if v := _m.GroupID; v != nil {
|
||||||
|
builder.WriteString("group_id=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", *v))
|
||||||
|
}
|
||||||
|
builder.WriteString(", ")
|
||||||
|
builder.WriteString("validity_days=")
|
||||||
|
builder.WriteString(fmt.Sprintf("%v", _m.ValidityDays))
|
||||||
|
builder.WriteByte(')')
|
||||||
|
return builder.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedeemCodes is a parsable slice of RedeemCode.
|
||||||
|
type RedeemCodes []*RedeemCode
|
||||||
187
backend/ent/redeemcode/redeemcode.go
Normal file
187
backend/ent/redeemcode/redeemcode.go
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package redeemcode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Label holds the string label denoting the redeemcode type in the database.
|
||||||
|
Label = "redeem_code"
|
||||||
|
// FieldID holds the string denoting the id field in the database.
|
||||||
|
FieldID = "id"
|
||||||
|
// FieldCode holds the string denoting the code field in the database.
|
||||||
|
FieldCode = "code"
|
||||||
|
// FieldType holds the string denoting the type field in the database.
|
||||||
|
FieldType = "type"
|
||||||
|
// FieldValue holds the string denoting the value field in the database.
|
||||||
|
FieldValue = "value"
|
||||||
|
// FieldStatus holds the string denoting the status field in the database.
|
||||||
|
FieldStatus = "status"
|
||||||
|
// FieldUsedBy holds the string denoting the used_by field in the database.
|
||||||
|
FieldUsedBy = "used_by"
|
||||||
|
// FieldUsedAt holds the string denoting the used_at field in the database.
|
||||||
|
FieldUsedAt = "used_at"
|
||||||
|
// FieldNotes holds the string denoting the notes field in the database.
|
||||||
|
FieldNotes = "notes"
|
||||||
|
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||||
|
FieldCreatedAt = "created_at"
|
||||||
|
// FieldGroupID holds the string denoting the group_id field in the database.
|
||||||
|
FieldGroupID = "group_id"
|
||||||
|
// FieldValidityDays holds the string denoting the validity_days field in the database.
|
||||||
|
FieldValidityDays = "validity_days"
|
||||||
|
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||||
|
EdgeUser = "user"
|
||||||
|
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||||
|
EdgeGroup = "group"
|
||||||
|
// Table holds the table name of the redeemcode in the database.
|
||||||
|
Table = "redeem_codes"
|
||||||
|
// UserTable is the table that holds the user relation/edge.
|
||||||
|
UserTable = "redeem_codes"
|
||||||
|
// UserInverseTable is the table name for the User entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||||
|
UserInverseTable = "users"
|
||||||
|
// UserColumn is the table column denoting the user relation/edge.
|
||||||
|
UserColumn = "used_by"
|
||||||
|
// GroupTable is the table that holds the group relation/edge.
|
||||||
|
GroupTable = "redeem_codes"
|
||||||
|
// GroupInverseTable is the table name for the Group entity.
|
||||||
|
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||||
|
GroupInverseTable = "groups"
|
||||||
|
// GroupColumn is the table column denoting the group relation/edge.
|
||||||
|
GroupColumn = "group_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Columns holds all SQL columns for redeemcode fields.
|
||||||
|
var Columns = []string{
|
||||||
|
FieldID,
|
||||||
|
FieldCode,
|
||||||
|
FieldType,
|
||||||
|
FieldValue,
|
||||||
|
FieldStatus,
|
||||||
|
FieldUsedBy,
|
||||||
|
FieldUsedAt,
|
||||||
|
FieldNotes,
|
||||||
|
FieldCreatedAt,
|
||||||
|
FieldGroupID,
|
||||||
|
FieldValidityDays,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||||
|
func ValidColumn(column string) bool {
|
||||||
|
for i := range Columns {
|
||||||
|
if column == Columns[i] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// CodeValidator is a validator for the "code" field. It is called by the builders before save.
|
||||||
|
CodeValidator func(string) error
|
||||||
|
// DefaultType holds the default value on creation for the "type" field.
|
||||||
|
DefaultType string
|
||||||
|
// TypeValidator is a validator for the "type" field. It is called by the builders before save.
|
||||||
|
TypeValidator func(string) error
|
||||||
|
// DefaultValue holds the default value on creation for the "value" field.
|
||||||
|
DefaultValue float64
|
||||||
|
// DefaultStatus holds the default value on creation for the "status" field.
|
||||||
|
DefaultStatus string
|
||||||
|
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
StatusValidator func(string) error
|
||||||
|
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||||
|
DefaultCreatedAt func() time.Time
|
||||||
|
// DefaultValidityDays holds the default value on creation for the "validity_days" field.
|
||||||
|
DefaultValidityDays int
|
||||||
|
)
|
||||||
|
|
||||||
|
// OrderOption defines the ordering options for the RedeemCode queries.
|
||||||
|
type OrderOption func(*sql.Selector)
|
||||||
|
|
||||||
|
// ByID orders the results by the id field.
|
||||||
|
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCode orders the results by the code field.
|
||||||
|
func ByCode(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCode, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByType orders the results by the type field.
|
||||||
|
func ByType(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldType, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByValue orders the results by the value field.
|
||||||
|
func ByValue(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldValue, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByStatus orders the results by the status field.
|
||||||
|
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldStatus, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsedBy orders the results by the used_by field.
|
||||||
|
func ByUsedBy(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsedBy, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUsedAt orders the results by the used_at field.
|
||||||
|
func ByUsedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldUsedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByNotes orders the results by the notes field.
|
||||||
|
func ByNotes(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldNotes, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByCreatedAt orders the results by the created_at field.
|
||||||
|
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupID orders the results by the group_id field.
|
||||||
|
func ByGroupID(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldGroupID, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByValidityDays orders the results by the validity_days field.
|
||||||
|
func ByValidityDays(opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return sql.OrderByField(FieldValidityDays, opts...).ToFunc()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByUserField orders the results by user field.
|
||||||
|
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByGroupField orders the results by group field.
|
||||||
|
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||||
|
return func(s *sql.Selector) {
|
||||||
|
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func newUserStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(UserInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
func newGroupStep() *sqlgraph.Step {
|
||||||
|
return sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.To(GroupInverseTable, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||||
|
)
|
||||||
|
}
|
||||||
667
backend/ent/redeemcode/where.go
Normal file
667
backend/ent/redeemcode/where.go
Normal file
@@ -0,0 +1,667 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package redeemcode
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ID filters vertices based on their ID field.
|
||||||
|
func ID(id int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDEQ applies the EQ predicate on the ID field.
|
||||||
|
func IDEQ(id int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNEQ applies the NEQ predicate on the ID field.
|
||||||
|
func IDNEQ(id int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDIn applies the In predicate on the ID field.
|
||||||
|
func IDIn(ids ...int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDNotIn applies the NotIn predicate on the ID field.
|
||||||
|
func IDNotIn(ids ...int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldID, ids...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGT applies the GT predicate on the ID field.
|
||||||
|
func IDGT(id int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDGTE applies the GTE predicate on the ID field.
|
||||||
|
func IDGTE(id int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLT applies the LT predicate on the ID field.
|
||||||
|
func IDLT(id int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLT(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDLTE applies the LTE predicate on the ID field.
|
||||||
|
func IDLTE(id int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLTE(FieldID, id))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Code applies equality check predicate on the "code" field. It's identical to CodeEQ.
|
||||||
|
func Code(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type applies equality check predicate on the "type" field. It's identical to TypeEQ.
|
||||||
|
func Type(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value applies equality check predicate on the "value" field. It's identical to ValueEQ.
|
||||||
|
func Value(v float64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status applies equality check predicate on the "status" field. It's identical to StatusEQ.
|
||||||
|
func Status(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedBy applies equality check predicate on the "used_by" field. It's identical to UsedByEQ.
|
||||||
|
func UsedBy(v int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldUsedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAt applies equality check predicate on the "used_at" field. It's identical to UsedAtEQ.
|
||||||
|
func UsedAt(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ.
|
||||||
|
func Notes(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||||
|
func CreatedAt(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ.
|
||||||
|
func GroupID(v int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidityDays applies equality check predicate on the "validity_days" field. It's identical to ValidityDaysEQ.
|
||||||
|
func ValidityDays(v int) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldValidityDays, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeEQ applies the EQ predicate on the "code" field.
|
||||||
|
func CodeEQ(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeNEQ applies the NEQ predicate on the "code" field.
|
||||||
|
func CodeNEQ(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeIn applies the In predicate on the "code" field.
|
||||||
|
func CodeIn(vs ...string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldCode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeNotIn applies the NotIn predicate on the "code" field.
|
||||||
|
func CodeNotIn(vs ...string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldCode, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeGT applies the GT predicate on the "code" field.
|
||||||
|
func CodeGT(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGT(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeGTE applies the GTE predicate on the "code" field.
|
||||||
|
func CodeGTE(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGTE(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeLT applies the LT predicate on the "code" field.
|
||||||
|
func CodeLT(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLT(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeLTE applies the LTE predicate on the "code" field.
|
||||||
|
func CodeLTE(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLTE(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeContains applies the Contains predicate on the "code" field.
|
||||||
|
func CodeContains(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldContains(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeHasPrefix applies the HasPrefix predicate on the "code" field.
|
||||||
|
func CodeHasPrefix(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldHasPrefix(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeHasSuffix applies the HasSuffix predicate on the "code" field.
|
||||||
|
func CodeHasSuffix(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldHasSuffix(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeEqualFold applies the EqualFold predicate on the "code" field.
|
||||||
|
func CodeEqualFold(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEqualFold(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CodeContainsFold applies the ContainsFold predicate on the "code" field.
|
||||||
|
func CodeContainsFold(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldContainsFold(FieldCode, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeEQ applies the EQ predicate on the "type" field.
|
||||||
|
func TypeEQ(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeNEQ applies the NEQ predicate on the "type" field.
|
||||||
|
func TypeNEQ(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeIn applies the In predicate on the "type" field.
|
||||||
|
func TypeIn(vs ...string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldType, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeNotIn applies the NotIn predicate on the "type" field.
|
||||||
|
func TypeNotIn(vs ...string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldType, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeGT applies the GT predicate on the "type" field.
|
||||||
|
func TypeGT(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGT(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeGTE applies the GTE predicate on the "type" field.
|
||||||
|
func TypeGTE(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGTE(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeLT applies the LT predicate on the "type" field.
|
||||||
|
func TypeLT(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLT(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeLTE applies the LTE predicate on the "type" field.
|
||||||
|
func TypeLTE(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLTE(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeContains applies the Contains predicate on the "type" field.
|
||||||
|
func TypeContains(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldContains(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeHasPrefix applies the HasPrefix predicate on the "type" field.
|
||||||
|
func TypeHasPrefix(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldHasPrefix(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeHasSuffix applies the HasSuffix predicate on the "type" field.
|
||||||
|
func TypeHasSuffix(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldHasSuffix(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeEqualFold applies the EqualFold predicate on the "type" field.
|
||||||
|
func TypeEqualFold(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEqualFold(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// TypeContainsFold applies the ContainsFold predicate on the "type" field.
|
||||||
|
func TypeContainsFold(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldContainsFold(FieldType, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueEQ applies the EQ predicate on the "value" field.
|
||||||
|
func ValueEQ(v float64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueNEQ applies the NEQ predicate on the "value" field.
|
||||||
|
func ValueNEQ(v float64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueIn applies the In predicate on the "value" field.
|
||||||
|
func ValueIn(vs ...float64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldValue, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueNotIn applies the NotIn predicate on the "value" field.
|
||||||
|
func ValueNotIn(vs ...float64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldValue, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueGT applies the GT predicate on the "value" field.
|
||||||
|
func ValueGT(v float64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGT(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueGTE applies the GTE predicate on the "value" field.
|
||||||
|
func ValueGTE(v float64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGTE(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueLT applies the LT predicate on the "value" field.
|
||||||
|
func ValueLT(v float64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLT(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValueLTE applies the LTE predicate on the "value" field.
|
||||||
|
func ValueLTE(v float64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLTE(FieldValue, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEQ applies the EQ predicate on the "status" field.
|
||||||
|
func StatusEQ(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNEQ applies the NEQ predicate on the "status" field.
|
||||||
|
func StatusNEQ(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusIn applies the In predicate on the "status" field.
|
||||||
|
func StatusIn(vs ...string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusNotIn applies the NotIn predicate on the "status" field.
|
||||||
|
func StatusNotIn(vs ...string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldStatus, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGT applies the GT predicate on the "status" field.
|
||||||
|
func StatusGT(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusGTE applies the GTE predicate on the "status" field.
|
||||||
|
func StatusGTE(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLT applies the LT predicate on the "status" field.
|
||||||
|
func StatusLT(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLT(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusLTE applies the LTE predicate on the "status" field.
|
||||||
|
func StatusLTE(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLTE(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContains applies the Contains predicate on the "status" field.
|
||||||
|
func StatusContains(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldContains(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasPrefix applies the HasPrefix predicate on the "status" field.
|
||||||
|
func StatusHasPrefix(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldHasPrefix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusHasSuffix applies the HasSuffix predicate on the "status" field.
|
||||||
|
func StatusHasSuffix(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldHasSuffix(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusEqualFold applies the EqualFold predicate on the "status" field.
|
||||||
|
func StatusEqualFold(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEqualFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatusContainsFold applies the ContainsFold predicate on the "status" field.
|
||||||
|
func StatusContainsFold(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldContainsFold(FieldStatus, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedByEQ applies the EQ predicate on the "used_by" field.
|
||||||
|
func UsedByEQ(v int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldUsedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedByNEQ applies the NEQ predicate on the "used_by" field.
|
||||||
|
func UsedByNEQ(v int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldUsedBy, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedByIn applies the In predicate on the "used_by" field.
|
||||||
|
func UsedByIn(vs ...int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldUsedBy, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedByNotIn applies the NotIn predicate on the "used_by" field.
|
||||||
|
func UsedByNotIn(vs ...int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldUsedBy, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedByIsNil applies the IsNil predicate on the "used_by" field.
|
||||||
|
func UsedByIsNil() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIsNull(FieldUsedBy))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedByNotNil applies the NotNil predicate on the "used_by" field.
|
||||||
|
func UsedByNotNil() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotNull(FieldUsedBy))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtEQ applies the EQ predicate on the "used_at" field.
|
||||||
|
func UsedAtEQ(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtNEQ applies the NEQ predicate on the "used_at" field.
|
||||||
|
func UsedAtNEQ(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtIn applies the In predicate on the "used_at" field.
|
||||||
|
func UsedAtIn(vs ...time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldUsedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtNotIn applies the NotIn predicate on the "used_at" field.
|
||||||
|
func UsedAtNotIn(vs ...time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldUsedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtGT applies the GT predicate on the "used_at" field.
|
||||||
|
func UsedAtGT(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGT(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtGTE applies the GTE predicate on the "used_at" field.
|
||||||
|
func UsedAtGTE(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGTE(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtLT applies the LT predicate on the "used_at" field.
|
||||||
|
func UsedAtLT(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLT(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtLTE applies the LTE predicate on the "used_at" field.
|
||||||
|
func UsedAtLTE(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLTE(FieldUsedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtIsNil applies the IsNil predicate on the "used_at" field.
|
||||||
|
func UsedAtIsNil() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIsNull(FieldUsedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// UsedAtNotNil applies the NotNil predicate on the "used_at" field.
|
||||||
|
func UsedAtNotNil() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotNull(FieldUsedAt))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesEQ applies the EQ predicate on the "notes" field.
|
||||||
|
func NotesEQ(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesNEQ applies the NEQ predicate on the "notes" field.
|
||||||
|
func NotesNEQ(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesIn applies the In predicate on the "notes" field.
|
||||||
|
func NotesIn(vs ...string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldNotes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesNotIn applies the NotIn predicate on the "notes" field.
|
||||||
|
func NotesNotIn(vs ...string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldNotes, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesGT applies the GT predicate on the "notes" field.
|
||||||
|
func NotesGT(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGT(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesGTE applies the GTE predicate on the "notes" field.
|
||||||
|
func NotesGTE(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGTE(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesLT applies the LT predicate on the "notes" field.
|
||||||
|
func NotesLT(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLT(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesLTE applies the LTE predicate on the "notes" field.
|
||||||
|
func NotesLTE(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLTE(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesContains applies the Contains predicate on the "notes" field.
|
||||||
|
func NotesContains(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldContains(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesHasPrefix applies the HasPrefix predicate on the "notes" field.
|
||||||
|
func NotesHasPrefix(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldHasPrefix(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesHasSuffix applies the HasSuffix predicate on the "notes" field.
|
||||||
|
func NotesHasSuffix(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldHasSuffix(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesIsNil applies the IsNil predicate on the "notes" field.
|
||||||
|
func NotesIsNil() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIsNull(FieldNotes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesNotNil applies the NotNil predicate on the "notes" field.
|
||||||
|
func NotesNotNil() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotNull(FieldNotes))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesEqualFold applies the EqualFold predicate on the "notes" field.
|
||||||
|
func NotesEqualFold(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEqualFold(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NotesContainsFold applies the ContainsFold predicate on the "notes" field.
|
||||||
|
func NotesContainsFold(v string) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldContainsFold(FieldNotes, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtEQ(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||||
|
func CreatedAtNEQ(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||||
|
func CreatedAtIn(vs ...time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||||
|
func CreatedAtNotIn(vs ...time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||||
|
func CreatedAtGT(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtGTE(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||||
|
func CreatedAtLT(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLT(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||||
|
func CreatedAtLTE(v time.Time) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLTE(FieldCreatedAt, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDEQ applies the EQ predicate on the "group_id" field.
|
||||||
|
func GroupIDEQ(v int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNEQ applies the NEQ predicate on the "group_id" field.
|
||||||
|
func GroupIDNEQ(v int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldGroupID, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDIn applies the In predicate on the "group_id" field.
|
||||||
|
func GroupIDIn(vs ...int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldGroupID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNotIn applies the NotIn predicate on the "group_id" field.
|
||||||
|
func GroupIDNotIn(vs ...int64) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldGroupID, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDIsNil applies the IsNil predicate on the "group_id" field.
|
||||||
|
func GroupIDIsNil() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIsNull(FieldGroupID))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupIDNotNil applies the NotNil predicate on the "group_id" field.
|
||||||
|
func GroupIDNotNil() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotNull(FieldGroupID))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidityDaysEQ applies the EQ predicate on the "validity_days" field.
|
||||||
|
func ValidityDaysEQ(v int) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldEQ(FieldValidityDays, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidityDaysNEQ applies the NEQ predicate on the "validity_days" field.
|
||||||
|
func ValidityDaysNEQ(v int) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNEQ(FieldValidityDays, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidityDaysIn applies the In predicate on the "validity_days" field.
|
||||||
|
func ValidityDaysIn(vs ...int) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldIn(FieldValidityDays, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidityDaysNotIn applies the NotIn predicate on the "validity_days" field.
|
||||||
|
func ValidityDaysNotIn(vs ...int) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldNotIn(FieldValidityDays, vs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidityDaysGT applies the GT predicate on the "validity_days" field.
|
||||||
|
func ValidityDaysGT(v int) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGT(FieldValidityDays, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidityDaysGTE applies the GTE predicate on the "validity_days" field.
|
||||||
|
func ValidityDaysGTE(v int) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldGTE(FieldValidityDays, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidityDaysLT applies the LT predicate on the "validity_days" field.
|
||||||
|
func ValidityDaysLT(v int) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLT(FieldValidityDays, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidityDaysLTE applies the LTE predicate on the "validity_days" field.
|
||||||
|
func ValidityDaysLTE(v int) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.FieldLTE(FieldValidityDays, v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||||
|
func HasUser() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||||
|
func HasUserWith(preds ...predicate.User) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(func(s *sql.Selector) {
|
||||||
|
step := newUserStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGroup applies the HasEdge predicate on the "group" edge.
|
||||||
|
func HasGroup() predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(func(s *sql.Selector) {
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(Table, FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||||
|
)
|
||||||
|
sqlgraph.HasNeighbors(s, step)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||||
|
func HasGroupWith(preds ...predicate.Group) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(func(s *sql.Selector) {
|
||||||
|
step := newGroupStep()
|
||||||
|
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||||
|
for _, p := range preds {
|
||||||
|
p(s)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// And groups predicates with the AND operator between them.
|
||||||
|
func And(predicates ...predicate.RedeemCode) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.AndPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Or groups predicates with the OR operator between them.
|
||||||
|
func Or(predicates ...predicate.RedeemCode) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.OrPredicates(predicates...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not applies the not operator on the given predicate.
|
||||||
|
func Not(p predicate.RedeemCode) predicate.RedeemCode {
|
||||||
|
return predicate.RedeemCode(sql.NotPredicates(p))
|
||||||
|
}
|
||||||
1177
backend/ent/redeemcode_create.go
Normal file
1177
backend/ent/redeemcode_create.go
Normal file
File diff suppressed because it is too large
Load Diff
88
backend/ent/redeemcode_delete.go
Normal file
88
backend/ent/redeemcode_delete.go
Normal file
@@ -0,0 +1,88 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RedeemCodeDelete is the builder for deleting a RedeemCode entity.
|
||||||
|
type RedeemCodeDelete struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *RedeemCodeMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the RedeemCodeDelete builder.
|
||||||
|
func (_d *RedeemCodeDelete) Where(ps ...predicate.RedeemCode) *RedeemCodeDelete {
|
||||||
|
_d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||||
|
func (_d *RedeemCodeDelete) Exec(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *RedeemCodeDelete) ExecX(ctx context.Context) int {
|
||||||
|
n, err := _d.Exec(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_d *RedeemCodeDelete) sqlExec(ctx context.Context) (int, error) {
|
||||||
|
_spec := sqlgraph.NewDeleteSpec(redeemcode.Table, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64))
|
||||||
|
if ps := _d.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec)
|
||||||
|
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
_d.mutation.done = true
|
||||||
|
return affected, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedeemCodeDeleteOne is the builder for deleting a single RedeemCode entity.
|
||||||
|
type RedeemCodeDeleteOne struct {
|
||||||
|
_d *RedeemCodeDelete
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the RedeemCodeDelete builder.
|
||||||
|
func (_d *RedeemCodeDeleteOne) Where(ps ...predicate.RedeemCode) *RedeemCodeDeleteOne {
|
||||||
|
_d._d.mutation.Where(ps...)
|
||||||
|
return _d
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the deletion query.
|
||||||
|
func (_d *RedeemCodeDeleteOne) Exec(ctx context.Context) error {
|
||||||
|
n, err := _d._d.Exec(ctx)
|
||||||
|
switch {
|
||||||
|
case err != nil:
|
||||||
|
return err
|
||||||
|
case n == 0:
|
||||||
|
return &NotFoundError{redeemcode.Label}
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_d *RedeemCodeDeleteOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _d.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
724
backend/ent/redeemcode_query.go
Normal file
724
backend/ent/redeemcode_query.go
Normal file
@@ -0,0 +1,724 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RedeemCodeQuery is the builder for querying RedeemCode entities.
|
||||||
|
type RedeemCodeQuery struct {
|
||||||
|
config
|
||||||
|
ctx *QueryContext
|
||||||
|
order []redeemcode.OrderOption
|
||||||
|
inters []Interceptor
|
||||||
|
predicates []predicate.RedeemCode
|
||||||
|
withUser *UserQuery
|
||||||
|
withGroup *GroupQuery
|
||||||
|
modifiers []func(*sql.Selector)
|
||||||
|
// intermediate query (i.e. traversal path).
|
||||||
|
sql *sql.Selector
|
||||||
|
path func(context.Context) (*sql.Selector, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where adds a new predicate for the RedeemCodeQuery builder.
|
||||||
|
func (_q *RedeemCodeQuery) Where(ps ...predicate.RedeemCode) *RedeemCodeQuery {
|
||||||
|
_q.predicates = append(_q.predicates, ps...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Limit the number of records to be returned by this query.
|
||||||
|
func (_q *RedeemCodeQuery) Limit(limit int) *RedeemCodeQuery {
|
||||||
|
_q.ctx.Limit = &limit
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offset to start from.
|
||||||
|
func (_q *RedeemCodeQuery) Offset(offset int) *RedeemCodeQuery {
|
||||||
|
_q.ctx.Offset = &offset
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unique configures the query builder to filter duplicate records on query.
|
||||||
|
// By default, unique is set to true, and can be disabled using this method.
|
||||||
|
func (_q *RedeemCodeQuery) Unique(unique bool) *RedeemCodeQuery {
|
||||||
|
_q.ctx.Unique = &unique
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// Order specifies how the records should be ordered.
|
||||||
|
func (_q *RedeemCodeQuery) Order(o ...redeemcode.OrderOption) *RedeemCodeQuery {
|
||||||
|
_q.order = append(_q.order, o...)
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryUser chains the current query on the "user" edge.
|
||||||
|
func (_q *RedeemCodeQuery) QueryUser() *UserQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(redeemcode.Table, redeemcode.FieldID, selector),
|
||||||
|
sqlgraph.To(user.Table, user.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.UserTable, redeemcode.UserColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// QueryGroup chains the current query on the "group" edge.
|
||||||
|
func (_q *RedeemCodeQuery) QueryGroup() *GroupQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
selector := _q.sqlQuery(ctx)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
step := sqlgraph.NewStep(
|
||||||
|
sqlgraph.From(redeemcode.Table, redeemcode.FieldID, selector),
|
||||||
|
sqlgraph.To(group.Table, group.FieldID),
|
||||||
|
sqlgraph.Edge(sqlgraph.M2O, true, redeemcode.GroupTable, redeemcode.GroupColumn),
|
||||||
|
)
|
||||||
|
fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step)
|
||||||
|
return fromU, nil
|
||||||
|
}
|
||||||
|
return query
|
||||||
|
}
|
||||||
|
|
||||||
|
// First returns the first RedeemCode entity from the query.
|
||||||
|
// Returns a *NotFoundError when no RedeemCode was found.
|
||||||
|
func (_q *RedeemCodeQuery) First(ctx context.Context) (*RedeemCode, error) {
|
||||||
|
nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nil, &NotFoundError{redeemcode.Label}
|
||||||
|
}
|
||||||
|
return nodes[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstX is like First, but panics if an error occurs.
|
||||||
|
func (_q *RedeemCodeQuery) FirstX(ctx context.Context) *RedeemCode {
|
||||||
|
node, err := _q.First(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstID returns the first RedeemCode ID from the query.
|
||||||
|
// Returns a *NotFoundError when no RedeemCode ID was found.
|
||||||
|
func (_q *RedeemCodeQuery) FirstID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
err = &NotFoundError{redeemcode.Label}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return ids[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||||
|
func (_q *RedeemCodeQuery) FirstIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.FirstID(ctx)
|
||||||
|
if err != nil && !IsNotFound(err) {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only returns a single RedeemCode entity found by the query, ensuring it only returns one.
|
||||||
|
// Returns a *NotSingularError when more than one RedeemCode entity is found.
|
||||||
|
// Returns a *NotFoundError when no RedeemCode entities are found.
|
||||||
|
func (_q *RedeemCodeQuery) Only(ctx context.Context) (*RedeemCode, error) {
|
||||||
|
nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
switch len(nodes) {
|
||||||
|
case 1:
|
||||||
|
return nodes[0], nil
|
||||||
|
case 0:
|
||||||
|
return nil, &NotFoundError{redeemcode.Label}
|
||||||
|
default:
|
||||||
|
return nil, &NotSingularError{redeemcode.Label}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyX is like Only, but panics if an error occurs.
|
||||||
|
func (_q *RedeemCodeQuery) OnlyX(ctx context.Context) *RedeemCode {
|
||||||
|
node, err := _q.Only(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyID is like Only, but returns the only RedeemCode ID in the query.
|
||||||
|
// Returns a *NotSingularError when more than one RedeemCode ID is found.
|
||||||
|
// Returns a *NotFoundError when no entities are found.
|
||||||
|
func (_q *RedeemCodeQuery) OnlyID(ctx context.Context) (id int64, err error) {
|
||||||
|
var ids []int64
|
||||||
|
if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch len(ids) {
|
||||||
|
case 1:
|
||||||
|
id = ids[0]
|
||||||
|
case 0:
|
||||||
|
err = &NotFoundError{redeemcode.Label}
|
||||||
|
default:
|
||||||
|
err = &NotSingularError{redeemcode.Label}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||||
|
func (_q *RedeemCodeQuery) OnlyIDX(ctx context.Context) int64 {
|
||||||
|
id, err := _q.OnlyID(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
|
||||||
|
// All executes the query and returns a list of RedeemCodes.
|
||||||
|
func (_q *RedeemCodeQuery) All(ctx context.Context) ([]*RedeemCode, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
qr := querierAll[[]*RedeemCode, *RedeemCodeQuery]()
|
||||||
|
return withInterceptors[[]*RedeemCode](ctx, _q, qr, _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AllX is like All, but panics if an error occurs.
|
||||||
|
func (_q *RedeemCodeQuery) AllX(ctx context.Context) []*RedeemCode {
|
||||||
|
nodes, err := _q.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDs executes the query and returns a list of RedeemCode IDs.
|
||||||
|
func (_q *RedeemCodeQuery) IDs(ctx context.Context) (ids []int64, err error) {
|
||||||
|
if _q.ctx.Unique == nil && _q.path != nil {
|
||||||
|
_q.Unique(true)
|
||||||
|
}
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs)
|
||||||
|
if err = _q.Select(redeemcode.FieldID).Scan(ctx, &ids); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IDsX is like IDs, but panics if an error occurs.
|
||||||
|
func (_q *RedeemCodeQuery) IDsX(ctx context.Context) []int64 {
|
||||||
|
ids, err := _q.IDs(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return ids
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count returns the count of the given query.
|
||||||
|
func (_q *RedeemCodeQuery) Count(ctx context.Context) (int, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount)
|
||||||
|
if err := _q.prepareQuery(ctx); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return withInterceptors[int](ctx, _q, querierCount[*RedeemCodeQuery](), _q.inters)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CountX is like Count, but panics if an error occurs.
|
||||||
|
func (_q *RedeemCodeQuery) CountX(ctx context.Context) int {
|
||||||
|
count, err := _q.Count(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return count
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exist returns true if the query has elements in the graph.
|
||||||
|
func (_q *RedeemCodeQuery) Exist(ctx context.Context) (bool, error) {
|
||||||
|
ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist)
|
||||||
|
switch _, err := _q.FirstID(ctx); {
|
||||||
|
case IsNotFound(err):
|
||||||
|
return false, nil
|
||||||
|
case err != nil:
|
||||||
|
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||||
|
default:
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExistX is like Exist, but panics if an error occurs.
|
||||||
|
func (_q *RedeemCodeQuery) ExistX(ctx context.Context) bool {
|
||||||
|
exist, err := _q.Exist(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return exist
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone returns a duplicate of the RedeemCodeQuery builder, including all associated steps. It can be
|
||||||
|
// used to prepare common query builders and use them differently after the clone is made.
|
||||||
|
func (_q *RedeemCodeQuery) Clone() *RedeemCodeQuery {
|
||||||
|
if _q == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &RedeemCodeQuery{
|
||||||
|
config: _q.config,
|
||||||
|
ctx: _q.ctx.Clone(),
|
||||||
|
order: append([]redeemcode.OrderOption{}, _q.order...),
|
||||||
|
inters: append([]Interceptor{}, _q.inters...),
|
||||||
|
predicates: append([]predicate.RedeemCode{}, _q.predicates...),
|
||||||
|
withUser: _q.withUser.Clone(),
|
||||||
|
withGroup: _q.withGroup.Clone(),
|
||||||
|
// clone intermediate query.
|
||||||
|
sql: _q.sql.Clone(),
|
||||||
|
path: _q.path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *RedeemCodeQuery) WithUser(opts ...func(*UserQuery)) *RedeemCodeQuery {
|
||||||
|
query := (&UserClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withUser = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||||
|
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||||
|
func (_q *RedeemCodeQuery) WithGroup(opts ...func(*GroupQuery)) *RedeemCodeQuery {
|
||||||
|
query := (&GroupClient{config: _q.config}).Query()
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(query)
|
||||||
|
}
|
||||||
|
_q.withGroup = query
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// GroupBy is used to group vertices by one or more fields/columns.
|
||||||
|
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// Code string `json:"code,omitempty"`
|
||||||
|
// Count int `json:"count,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.RedeemCode.Query().
|
||||||
|
// GroupBy(redeemcode.FieldCode).
|
||||||
|
// Aggregate(ent.Count()).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *RedeemCodeQuery) GroupBy(field string, fields ...string) *RedeemCodeGroupBy {
|
||||||
|
_q.ctx.Fields = append([]string{field}, fields...)
|
||||||
|
grbuild := &RedeemCodeGroupBy{build: _q}
|
||||||
|
grbuild.flds = &_q.ctx.Fields
|
||||||
|
grbuild.label = redeemcode.Label
|
||||||
|
grbuild.scan = grbuild.Scan
|
||||||
|
return grbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows the selection one or more fields/columns for the given query,
|
||||||
|
// instead of selecting all fields in the entity.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var v []struct {
|
||||||
|
// Code string `json:"code,omitempty"`
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// client.RedeemCode.Query().
|
||||||
|
// Select(redeemcode.FieldCode).
|
||||||
|
// Scan(ctx, &v)
|
||||||
|
func (_q *RedeemCodeQuery) Select(fields ...string) *RedeemCodeSelect {
|
||||||
|
_q.ctx.Fields = append(_q.ctx.Fields, fields...)
|
||||||
|
sbuild := &RedeemCodeSelect{RedeemCodeQuery: _q}
|
||||||
|
sbuild.label = redeemcode.Label
|
||||||
|
sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan
|
||||||
|
return sbuild
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate returns a RedeemCodeSelect configured with the given aggregations.
|
||||||
|
func (_q *RedeemCodeQuery) Aggregate(fns ...AggregateFunc) *RedeemCodeSelect {
|
||||||
|
return _q.Select().Aggregate(fns...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *RedeemCodeQuery) prepareQuery(ctx context.Context) error {
|
||||||
|
for _, inter := range _q.inters {
|
||||||
|
if inter == nil {
|
||||||
|
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||||
|
}
|
||||||
|
if trv, ok := inter.(Traverser); ok {
|
||||||
|
if err := trv.Traverse(ctx, _q); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, f := range _q.ctx.Fields {
|
||||||
|
if !redeemcode.ValidColumn(f) {
|
||||||
|
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.path != nil {
|
||||||
|
prev, err := _q.path(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_q.sql = prev
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *RedeemCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*RedeemCode, error) {
|
||||||
|
var (
|
||||||
|
nodes = []*RedeemCode{}
|
||||||
|
_spec = _q.querySpec()
|
||||||
|
loadedTypes = [2]bool{
|
||||||
|
_q.withUser != nil,
|
||||||
|
_q.withGroup != nil,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||||
|
return (*RedeemCode).scanValues(nil, columns)
|
||||||
|
}
|
||||||
|
_spec.Assign = func(columns []string, values []any) error {
|
||||||
|
node := &RedeemCode{config: _q.config}
|
||||||
|
nodes = append(nodes, node)
|
||||||
|
node.Edges.loadedTypes = loadedTypes
|
||||||
|
return node.assignValues(columns, values)
|
||||||
|
}
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
for i := range hooks {
|
||||||
|
hooks[i](ctx, _spec)
|
||||||
|
}
|
||||||
|
if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if len(nodes) == 0 {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
if query := _q.withUser; query != nil {
|
||||||
|
if err := _q.loadUser(ctx, query, nodes, nil,
|
||||||
|
func(n *RedeemCode, e *User) { n.Edges.User = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if query := _q.withGroup; query != nil {
|
||||||
|
if err := _q.loadGroup(ctx, query, nodes, nil,
|
||||||
|
func(n *RedeemCode, e *Group) { n.Edges.Group = e }); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *RedeemCodeQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*RedeemCode, init func(*RedeemCode), assign func(*RedeemCode, *User)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*RedeemCode)
|
||||||
|
for i := range nodes {
|
||||||
|
if nodes[i].UsedBy == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fk := *nodes[i].UsedBy
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(user.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "used_by" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (_q *RedeemCodeQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*RedeemCode, init func(*RedeemCode), assign func(*RedeemCode, *Group)) error {
|
||||||
|
ids := make([]int64, 0, len(nodes))
|
||||||
|
nodeids := make(map[int64][]*RedeemCode)
|
||||||
|
for i := range nodes {
|
||||||
|
if nodes[i].GroupID == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fk := *nodes[i].GroupID
|
||||||
|
if _, ok := nodeids[fk]; !ok {
|
||||||
|
ids = append(ids, fk)
|
||||||
|
}
|
||||||
|
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||||
|
}
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
query.Where(group.IDIn(ids...))
|
||||||
|
neighbors, err := query.All(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, n := range neighbors {
|
||||||
|
nodes, ok := nodeids[n.ID]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID)
|
||||||
|
}
|
||||||
|
for i := range nodes {
|
||||||
|
assign(nodes[i], n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *RedeemCodeQuery) sqlCount(ctx context.Context) (int, error) {
|
||||||
|
_spec := _q.querySpec()
|
||||||
|
if len(_q.modifiers) > 0 {
|
||||||
|
_spec.Modifiers = _q.modifiers
|
||||||
|
}
|
||||||
|
_spec.Node.Columns = _q.ctx.Fields
|
||||||
|
if len(_q.ctx.Fields) > 0 {
|
||||||
|
_spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique
|
||||||
|
}
|
||||||
|
return sqlgraph.CountNodes(ctx, _q.driver, _spec)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *RedeemCodeQuery) querySpec() *sqlgraph.QuerySpec {
|
||||||
|
_spec := sqlgraph.NewQuerySpec(redeemcode.Table, redeemcode.Columns, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64))
|
||||||
|
_spec.From = _q.sql
|
||||||
|
if unique := _q.ctx.Unique; unique != nil {
|
||||||
|
_spec.Unique = *unique
|
||||||
|
} else if _q.path != nil {
|
||||||
|
_spec.Unique = true
|
||||||
|
}
|
||||||
|
if fields := _q.ctx.Fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, redeemcode.FieldID)
|
||||||
|
for i := range fields {
|
||||||
|
if fields[i] != redeemcode.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _q.withUser != nil {
|
||||||
|
_spec.Node.AddColumnOnce(redeemcode.FieldUsedBy)
|
||||||
|
}
|
||||||
|
if _q.withGroup != nil {
|
||||||
|
_spec.Node.AddColumnOnce(redeemcode.FieldGroupID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _q.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
_spec.Limit = *limit
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
_spec.Offset = *offset
|
||||||
|
}
|
||||||
|
if ps := _q.order; len(ps) > 0 {
|
||||||
|
_spec.Order = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return _spec
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_q *RedeemCodeQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||||
|
builder := sql.Dialect(_q.driver.Dialect())
|
||||||
|
t1 := builder.Table(redeemcode.Table)
|
||||||
|
columns := _q.ctx.Fields
|
||||||
|
if len(columns) == 0 {
|
||||||
|
columns = redeemcode.Columns
|
||||||
|
}
|
||||||
|
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||||
|
if _q.sql != nil {
|
||||||
|
selector = _q.sql
|
||||||
|
selector.Select(selector.Columns(columns...)...)
|
||||||
|
}
|
||||||
|
if _q.ctx.Unique != nil && *_q.ctx.Unique {
|
||||||
|
selector.Distinct()
|
||||||
|
}
|
||||||
|
for _, m := range _q.modifiers {
|
||||||
|
m(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.predicates {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
for _, p := range _q.order {
|
||||||
|
p(selector)
|
||||||
|
}
|
||||||
|
if offset := _q.ctx.Offset; offset != nil {
|
||||||
|
// limit is mandatory for offset clause. We start
|
||||||
|
// with default value, and override it below if needed.
|
||||||
|
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||||
|
}
|
||||||
|
if limit := _q.ctx.Limit; limit != nil {
|
||||||
|
selector.Limit(*limit)
|
||||||
|
}
|
||||||
|
return selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForUpdate locks the selected rows against concurrent updates, and prevent them from being
|
||||||
|
// updated, deleted or "selected ... for update" by other sessions, until the transaction is
|
||||||
|
// either committed or rolled-back.
|
||||||
|
func (_q *RedeemCodeQuery) ForUpdate(opts ...sql.LockOption) *RedeemCodeQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForUpdate(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock
|
||||||
|
// on any rows that are read. Other sessions can read the rows, but cannot modify them
|
||||||
|
// until your transaction commits.
|
||||||
|
func (_q *RedeemCodeQuery) ForShare(opts ...sql.LockOption) *RedeemCodeQuery {
|
||||||
|
if _q.driver.Dialect() == dialect.Postgres {
|
||||||
|
_q.Unique(false)
|
||||||
|
}
|
||||||
|
_q.modifiers = append(_q.modifiers, func(s *sql.Selector) {
|
||||||
|
s.ForShare(opts...)
|
||||||
|
})
|
||||||
|
return _q
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedeemCodeGroupBy is the group-by builder for RedeemCode entities.
|
||||||
|
type RedeemCodeGroupBy struct {
|
||||||
|
selector
|
||||||
|
build *RedeemCodeQuery
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the group-by query.
|
||||||
|
func (_g *RedeemCodeGroupBy) Aggregate(fns ...AggregateFunc) *RedeemCodeGroupBy {
|
||||||
|
_g.fns = append(_g.fns, fns...)
|
||||||
|
return _g
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_g *RedeemCodeGroupBy) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy)
|
||||||
|
if err := _g.build.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*RedeemCodeQuery, *RedeemCodeGroupBy](ctx, _g.build, _g, _g.build.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_g *RedeemCodeGroupBy) sqlScan(ctx context.Context, root *RedeemCodeQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx).Select()
|
||||||
|
aggregation := make([]string, 0, len(_g.fns))
|
||||||
|
for _, fn := range _g.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
if len(selector.SelectedColumns()) == 0 {
|
||||||
|
columns := make([]string, 0, len(*_g.flds)+len(_g.fns))
|
||||||
|
for _, f := range *_g.flds {
|
||||||
|
columns = append(columns, selector.C(f))
|
||||||
|
}
|
||||||
|
columns = append(columns, aggregation...)
|
||||||
|
selector.Select(columns...)
|
||||||
|
}
|
||||||
|
selector.GroupBy(selector.Columns(*_g.flds...)...)
|
||||||
|
if err := selector.Err(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _g.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedeemCodeSelect is the builder for selecting fields of RedeemCode entities.
|
||||||
|
type RedeemCodeSelect struct {
|
||||||
|
*RedeemCodeQuery
|
||||||
|
selector
|
||||||
|
}
|
||||||
|
|
||||||
|
// Aggregate adds the given aggregation functions to the selector query.
|
||||||
|
func (_s *RedeemCodeSelect) Aggregate(fns ...AggregateFunc) *RedeemCodeSelect {
|
||||||
|
_s.fns = append(_s.fns, fns...)
|
||||||
|
return _s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan applies the selector query and scans the result into the given value.
|
||||||
|
func (_s *RedeemCodeSelect) Scan(ctx context.Context, v any) error {
|
||||||
|
ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect)
|
||||||
|
if err := _s.prepareQuery(ctx); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return scanWithInterceptors[*RedeemCodeQuery, *RedeemCodeSelect](ctx, _s.RedeemCodeQuery, _s, _s.inters, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_s *RedeemCodeSelect) sqlScan(ctx context.Context, root *RedeemCodeQuery, v any) error {
|
||||||
|
selector := root.sqlQuery(ctx)
|
||||||
|
aggregation := make([]string, 0, len(_s.fns))
|
||||||
|
for _, fn := range _s.fns {
|
||||||
|
aggregation = append(aggregation, fn(selector))
|
||||||
|
}
|
||||||
|
switch n := len(*_s.selector.flds); {
|
||||||
|
case n == 0 && len(aggregation) > 0:
|
||||||
|
selector.Select(aggregation...)
|
||||||
|
case n != 0 && len(aggregation) > 0:
|
||||||
|
selector.AppendSelect(aggregation...)
|
||||||
|
}
|
||||||
|
rows := &sql.Rows{}
|
||||||
|
query, args := selector.Query()
|
||||||
|
if err := _s.driver.Query(ctx, query, args, rows); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer rows.Close()
|
||||||
|
return sql.ScanSlice(rows, v)
|
||||||
|
}
|
||||||
806
backend/ent/redeemcode_update.go
Normal file
806
backend/ent/redeemcode_update.go
Normal file
@@ -0,0 +1,806 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/predicate"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RedeemCodeUpdate is the builder for updating RedeemCode entities.
|
||||||
|
type RedeemCodeUpdate struct {
|
||||||
|
config
|
||||||
|
hooks []Hook
|
||||||
|
mutation *RedeemCodeMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the RedeemCodeUpdate builder.
|
||||||
|
func (_u *RedeemCodeUpdate) Where(ps ...predicate.RedeemCode) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCode sets the "code" field.
|
||||||
|
func (_u *RedeemCodeUpdate) SetCode(v string) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.SetCode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCode sets the "code" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableCode(v *string) *RedeemCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetType sets the "type" field.
|
||||||
|
func (_u *RedeemCodeUpdate) SetType(v string) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.SetType(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableType sets the "type" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableType(v *string) *RedeemCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetType(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue sets the "value" field.
|
||||||
|
func (_u *RedeemCodeUpdate) SetValue(v float64) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.ResetValue()
|
||||||
|
_u.mutation.SetValue(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableValue sets the "value" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableValue(v *float64) *RedeemCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetValue(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddValue adds value to the "value" field.
|
||||||
|
func (_u *RedeemCodeUpdate) AddValue(v float64) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.AddValue(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *RedeemCodeUpdate) SetStatus(v string) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableStatus(v *string) *RedeemCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedBy sets the "used_by" field.
|
||||||
|
func (_u *RedeemCodeUpdate) SetUsedBy(v int64) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.SetUsedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedBy sets the "used_by" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableUsedBy(v *int64) *RedeemCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedBy(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsedBy clears the value of the "used_by" field.
|
||||||
|
func (_u *RedeemCodeUpdate) ClearUsedBy() *RedeemCodeUpdate {
|
||||||
|
_u.mutation.ClearUsedBy()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (_u *RedeemCodeUpdate) SetUsedAt(v time.Time) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.SetUsedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedAt sets the "used_at" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableUsedAt(v *time.Time) *RedeemCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsedAt clears the value of the "used_at" field.
|
||||||
|
func (_u *RedeemCodeUpdate) ClearUsedAt() *RedeemCodeUpdate {
|
||||||
|
_u.mutation.ClearUsedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNotes sets the "notes" field.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNotes(v string) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.SetNotes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotes sets the "notes" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableNotes(v *string) *RedeemCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearNotes clears the value of the "notes" field.
|
||||||
|
func (_u *RedeemCodeUpdate) ClearNotes() *RedeemCodeUpdate {
|
||||||
|
_u.mutation.ClearNotes()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_u *RedeemCodeUpdate) SetGroupID(v int64) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.SetGroupID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableGroupID sets the "group_id" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableGroupID(v *int64) *RedeemCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetGroupID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroupID clears the value of the "group_id" field.
|
||||||
|
func (_u *RedeemCodeUpdate) ClearGroupID() *RedeemCodeUpdate {
|
||||||
|
_u.mutation.ClearGroupID()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValidityDays sets the "validity_days" field.
|
||||||
|
func (_u *RedeemCodeUpdate) SetValidityDays(v int) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.ResetValidityDays()
|
||||||
|
_u.mutation.SetValidityDays(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableValidityDays sets the "validity_days" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableValidityDays(v *int) *RedeemCodeUpdate {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetValidityDays(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddValidityDays adds value to the "validity_days" field.
|
||||||
|
func (_u *RedeemCodeUpdate) AddValidityDays(v int) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.AddValidityDays(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user" edge to the User entity by ID.
|
||||||
|
func (_u *RedeemCodeUpdate) SetUserID(id int64) *RedeemCodeUpdate {
|
||||||
|
_u.mutation.SetUserID(id)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdate) SetNillableUserID(id *int64) *RedeemCodeUpdate {
|
||||||
|
if id != nil {
|
||||||
|
_u = _u.SetUserID(*id)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *RedeemCodeUpdate) SetUser(v *User) *RedeemCodeUpdate {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_u *RedeemCodeUpdate) SetGroup(v *Group) *RedeemCodeUpdate {
|
||||||
|
return _u.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the RedeemCodeMutation object of the builder.
|
||||||
|
func (_u *RedeemCodeUpdate) Mutation() *RedeemCodeMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *RedeemCodeUpdate) ClearUser() *RedeemCodeUpdate {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroup clears the "group" edge to the Group entity.
|
||||||
|
func (_u *RedeemCodeUpdate) ClearGroup() *RedeemCodeUpdate {
|
||||||
|
_u.mutation.ClearGroup()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||||
|
func (_u *RedeemCodeUpdate) Save(ctx context.Context) (int, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *RedeemCodeUpdate) SaveX(ctx context.Context) int {
|
||||||
|
affected, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return affected
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query.
|
||||||
|
func (_u *RedeemCodeUpdate) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *RedeemCodeUpdate) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *RedeemCodeUpdate) check() error {
|
||||||
|
if v, ok := _u.mutation.Code(); ok {
|
||||||
|
if err := redeemcode.CodeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.code": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.GetType(); ok {
|
||||||
|
if err := redeemcode.TypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := redeemcode.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *RedeemCodeUpdate) sqlSave(ctx context.Context) (_node int, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(redeemcode.Table, redeemcode.Columns, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64))
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Code(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldCode, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.GetType(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldType, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Value(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldValue, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedValue(); ok {
|
||||||
|
_spec.AddField(redeemcode.FieldValue, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedAt(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldUsedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsedAtCleared() {
|
||||||
|
_spec.ClearField(redeemcode.FieldUsedAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Notes(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldNotes, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.NotesCleared() {
|
||||||
|
_spec.ClearField(redeemcode.FieldNotes, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ValidityDays(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldValidityDays, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedValidityDays(); ok {
|
||||||
|
_spec.AddField(redeemcode.FieldValidityDays, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: redeemcode.UserTable,
|
||||||
|
Columns: []string{redeemcode.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: redeemcode.UserTable,
|
||||||
|
Columns: []string{redeemcode.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: redeemcode.GroupTable,
|
||||||
|
Columns: []string{redeemcode.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: redeemcode.GroupTable,
|
||||||
|
Columns: []string{redeemcode.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{redeemcode.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RedeemCodeUpdateOne is the builder for updating a single RedeemCode entity.
|
||||||
|
type RedeemCodeUpdateOne struct {
|
||||||
|
config
|
||||||
|
fields []string
|
||||||
|
hooks []Hook
|
||||||
|
mutation *RedeemCodeMutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCode sets the "code" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetCode(v string) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.SetCode(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableCode sets the "code" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableCode(v *string) *RedeemCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetCode(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetType sets the "type" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetType(v string) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.SetType(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableType sets the "type" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableType(v *string) *RedeemCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetType(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValue sets the "value" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetValue(v float64) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.ResetValue()
|
||||||
|
_u.mutation.SetValue(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableValue sets the "value" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableValue(v *float64) *RedeemCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetValue(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddValue adds value to the "value" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) AddValue(v float64) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.AddValue(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStatus sets the "status" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetStatus(v string) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.SetStatus(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableStatus sets the "status" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableStatus(v *string) *RedeemCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetStatus(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedBy sets the "used_by" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetUsedBy(v int64) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.SetUsedBy(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedBy sets the "used_by" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableUsedBy(v *int64) *RedeemCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedBy(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsedBy clears the value of the "used_by" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) ClearUsedBy() *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.ClearUsedBy()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUsedAt sets the "used_at" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetUsedAt(v time.Time) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.SetUsedAt(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUsedAt sets the "used_at" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableUsedAt(v *time.Time) *RedeemCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetUsedAt(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUsedAt clears the value of the "used_at" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) ClearUsedAt() *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.ClearUsedAt()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNotes sets the "notes" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNotes(v string) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.SetNotes(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableNotes sets the "notes" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableNotes(v *string) *RedeemCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetNotes(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearNotes clears the value of the "notes" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) ClearNotes() *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.ClearNotes()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroupID sets the "group_id" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetGroupID(v int64) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.SetGroupID(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableGroupID sets the "group_id" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableGroupID(v *int64) *RedeemCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetGroupID(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroupID clears the value of the "group_id" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) ClearGroupID() *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.ClearGroupID()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetValidityDays sets the "validity_days" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetValidityDays(v int) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.ResetValidityDays()
|
||||||
|
_u.mutation.SetValidityDays(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableValidityDays sets the "validity_days" field if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableValidityDays(v *int) *RedeemCodeUpdateOne {
|
||||||
|
if v != nil {
|
||||||
|
_u.SetValidityDays(*v)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddValidityDays adds value to the "validity_days" field.
|
||||||
|
func (_u *RedeemCodeUpdateOne) AddValidityDays(v int) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.AddValidityDays(v)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUserID sets the "user" edge to the User entity by ID.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetUserID(id int64) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.SetUserID(id)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNillableUserID sets the "user" edge to the User entity by ID if the given value is not nil.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetNillableUserID(id *int64) *RedeemCodeUpdateOne {
|
||||||
|
if id != nil {
|
||||||
|
_u = _u.SetUserID(*id)
|
||||||
|
}
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetUser sets the "user" edge to the User entity.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetUser(v *User) *RedeemCodeUpdateOne {
|
||||||
|
return _u.SetUserID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetGroup sets the "group" edge to the Group entity.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SetGroup(v *Group) *RedeemCodeUpdateOne {
|
||||||
|
return _u.SetGroupID(v.ID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mutation returns the RedeemCodeMutation object of the builder.
|
||||||
|
func (_u *RedeemCodeUpdateOne) Mutation() *RedeemCodeMutation {
|
||||||
|
return _u.mutation
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearUser clears the "user" edge to the User entity.
|
||||||
|
func (_u *RedeemCodeUpdateOne) ClearUser() *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.ClearUser()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearGroup clears the "group" edge to the Group entity.
|
||||||
|
func (_u *RedeemCodeUpdateOne) ClearGroup() *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.ClearGroup()
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where appends a list predicates to the RedeemCodeUpdate builder.
|
||||||
|
func (_u *RedeemCodeUpdateOne) Where(ps ...predicate.RedeemCode) *RedeemCodeUpdateOne {
|
||||||
|
_u.mutation.Where(ps...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||||
|
// The default is selecting all fields defined in the entity schema.
|
||||||
|
func (_u *RedeemCodeUpdateOne) Select(field string, fields ...string) *RedeemCodeUpdateOne {
|
||||||
|
_u.fields = append([]string{field}, fields...)
|
||||||
|
return _u
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save executes the query and returns the updated RedeemCode entity.
|
||||||
|
func (_u *RedeemCodeUpdateOne) Save(ctx context.Context) (*RedeemCode, error) {
|
||||||
|
return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveX is like Save, but panics if an error occurs.
|
||||||
|
func (_u *RedeemCodeUpdateOne) SaveX(ctx context.Context) *RedeemCode {
|
||||||
|
node, err := _u.Save(ctx)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return node
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exec executes the query on the entity.
|
||||||
|
func (_u *RedeemCodeUpdateOne) Exec(ctx context.Context) error {
|
||||||
|
_, err := _u.Save(ctx)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExecX is like Exec, but panics if an error occurs.
|
||||||
|
func (_u *RedeemCodeUpdateOne) ExecX(ctx context.Context) {
|
||||||
|
if err := _u.Exec(ctx); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check runs all checks and user-defined validators on the builder.
|
||||||
|
func (_u *RedeemCodeUpdateOne) check() error {
|
||||||
|
if v, ok := _u.mutation.Code(); ok {
|
||||||
|
if err := redeemcode.CodeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.code": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.GetType(); ok {
|
||||||
|
if err := redeemcode.TypeValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.type": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v, ok := _u.mutation.Status(); ok {
|
||||||
|
if err := redeemcode.StatusValidator(v); err != nil {
|
||||||
|
return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "RedeemCode.status": %w`, err)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (_u *RedeemCodeUpdateOne) sqlSave(ctx context.Context) (_node *RedeemCode, err error) {
|
||||||
|
if err := _u.check(); err != nil {
|
||||||
|
return _node, err
|
||||||
|
}
|
||||||
|
_spec := sqlgraph.NewUpdateSpec(redeemcode.Table, redeemcode.Columns, sqlgraph.NewFieldSpec(redeemcode.FieldID, field.TypeInt64))
|
||||||
|
id, ok := _u.mutation.ID()
|
||||||
|
if !ok {
|
||||||
|
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "RedeemCode.id" for update`)}
|
||||||
|
}
|
||||||
|
_spec.Node.ID.Value = id
|
||||||
|
if fields := _u.fields; len(fields) > 0 {
|
||||||
|
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, redeemcode.FieldID)
|
||||||
|
for _, f := range fields {
|
||||||
|
if !redeemcode.ValidColumn(f) {
|
||||||
|
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||||
|
}
|
||||||
|
if f != redeemcode.FieldID {
|
||||||
|
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ps := _u.mutation.predicates; len(ps) > 0 {
|
||||||
|
_spec.Predicate = func(selector *sql.Selector) {
|
||||||
|
for i := range ps {
|
||||||
|
ps[i](selector)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Code(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldCode, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.GetType(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldType, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Value(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldValue, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedValue(); ok {
|
||||||
|
_spec.AddField(redeemcode.FieldValue, field.TypeFloat64, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Status(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldStatus, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.UsedAt(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldUsedAt, field.TypeTime, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UsedAtCleared() {
|
||||||
|
_spec.ClearField(redeemcode.FieldUsedAt, field.TypeTime)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.Notes(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldNotes, field.TypeString, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.NotesCleared() {
|
||||||
|
_spec.ClearField(redeemcode.FieldNotes, field.TypeString)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.ValidityDays(); ok {
|
||||||
|
_spec.SetField(redeemcode.FieldValidityDays, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if value, ok := _u.mutation.AddedValidityDays(); ok {
|
||||||
|
_spec.AddField(redeemcode.FieldValidityDays, field.TypeInt, value)
|
||||||
|
}
|
||||||
|
if _u.mutation.UserCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: redeemcode.UserTable,
|
||||||
|
Columns: []string{redeemcode.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.UserIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: redeemcode.UserTable,
|
||||||
|
Columns: []string{redeemcode.UserColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
if _u.mutation.GroupCleared() {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: redeemcode.GroupTable,
|
||||||
|
Columns: []string{redeemcode.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||||
|
}
|
||||||
|
if nodes := _u.mutation.GroupIDs(); len(nodes) > 0 {
|
||||||
|
edge := &sqlgraph.EdgeSpec{
|
||||||
|
Rel: sqlgraph.M2O,
|
||||||
|
Inverse: true,
|
||||||
|
Table: redeemcode.GroupTable,
|
||||||
|
Columns: []string{redeemcode.GroupColumn},
|
||||||
|
Bidi: false,
|
||||||
|
Target: &sqlgraph.EdgeTarget{
|
||||||
|
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeInt64),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, k := range nodes {
|
||||||
|
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||||
|
}
|
||||||
|
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||||
|
}
|
||||||
|
_node = &RedeemCode{config: _u.config}
|
||||||
|
_spec.Assign = _node.assignValues
|
||||||
|
_spec.ScanValues = _node.scanValues
|
||||||
|
if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil {
|
||||||
|
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||||
|
err = &NotFoundError{redeemcode.Label}
|
||||||
|
} else if sqlgraph.IsConstraintError(err) {
|
||||||
|
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
_u.mutation.done = true
|
||||||
|
return _node, nil
|
||||||
|
}
|
||||||
5
backend/ent/runtime.go
Normal file
5
backend/ent/runtime.go
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package ent
|
||||||
|
|
||||||
|
// The schema-stitching logic is generated in github.com/Wei-Shaw/sub2api/ent/runtime/runtime.go
|
||||||
871
backend/ent/runtime/runtime.go
Normal file
871
backend/ent/runtime/runtime.go
Normal file
@@ -0,0 +1,871 @@
|
|||||||
|
// Code generated by ent, DO NOT EDIT.
|
||||||
|
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/account"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/accountgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/apikey"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/group"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/promocodeusage"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/proxy"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/redeemcode"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/schema"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/setting"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usagelog"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/user"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/userallowedgroup"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/userattributedefinition"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/userattributevalue"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/usersubscription"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The init function reads all schema descriptors with runtime code
|
||||||
|
// (default values, validators, hooks and policies) and stitches it
|
||||||
|
// to their package variables.
|
||||||
|
func init() {
|
||||||
|
apikeyMixin := schema.APIKey{}.Mixin()
|
||||||
|
apikeyMixinHooks1 := apikeyMixin[1].Hooks()
|
||||||
|
apikey.Hooks[0] = apikeyMixinHooks1[0]
|
||||||
|
apikeyMixinInters1 := apikeyMixin[1].Interceptors()
|
||||||
|
apikey.Interceptors[0] = apikeyMixinInters1[0]
|
||||||
|
apikeyMixinFields0 := apikeyMixin[0].Fields()
|
||||||
|
_ = apikeyMixinFields0
|
||||||
|
apikeyFields := schema.APIKey{}.Fields()
|
||||||
|
_ = apikeyFields
|
||||||
|
// apikeyDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
apikeyDescCreatedAt := apikeyMixinFields0[0].Descriptor()
|
||||||
|
// apikey.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
apikey.DefaultCreatedAt = apikeyDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// apikeyDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
apikeyDescUpdatedAt := apikeyMixinFields0[1].Descriptor()
|
||||||
|
// apikey.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
apikey.DefaultUpdatedAt = apikeyDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// apikey.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
apikey.UpdateDefaultUpdatedAt = apikeyDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// apikeyDescKey is the schema descriptor for key field.
|
||||||
|
apikeyDescKey := apikeyFields[1].Descriptor()
|
||||||
|
// apikey.KeyValidator is a validator for the "key" field. It is called by the builders before save.
|
||||||
|
apikey.KeyValidator = func() func(string) error {
|
||||||
|
validators := apikeyDescKey.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(key string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// apikeyDescName is the schema descriptor for name field.
|
||||||
|
apikeyDescName := apikeyFields[2].Descriptor()
|
||||||
|
// apikey.NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
apikey.NameValidator = func() func(string) error {
|
||||||
|
validators := apikeyDescName.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(name string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// apikeyDescStatus is the schema descriptor for status field.
|
||||||
|
apikeyDescStatus := apikeyFields[4].Descriptor()
|
||||||
|
// apikey.DefaultStatus holds the default value on creation for the status field.
|
||||||
|
apikey.DefaultStatus = apikeyDescStatus.Default.(string)
|
||||||
|
// apikey.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
apikey.StatusValidator = apikeyDescStatus.Validators[0].(func(string) error)
|
||||||
|
accountMixin := schema.Account{}.Mixin()
|
||||||
|
accountMixinHooks1 := accountMixin[1].Hooks()
|
||||||
|
account.Hooks[0] = accountMixinHooks1[0]
|
||||||
|
accountMixinInters1 := accountMixin[1].Interceptors()
|
||||||
|
account.Interceptors[0] = accountMixinInters1[0]
|
||||||
|
accountMixinFields0 := accountMixin[0].Fields()
|
||||||
|
_ = accountMixinFields0
|
||||||
|
accountFields := schema.Account{}.Fields()
|
||||||
|
_ = accountFields
|
||||||
|
// accountDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
accountDescCreatedAt := accountMixinFields0[0].Descriptor()
|
||||||
|
// account.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
account.DefaultCreatedAt = accountDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// accountDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
accountDescUpdatedAt := accountMixinFields0[1].Descriptor()
|
||||||
|
// account.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
account.DefaultUpdatedAt = accountDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// account.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
account.UpdateDefaultUpdatedAt = accountDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// accountDescName is the schema descriptor for name field.
|
||||||
|
accountDescName := accountFields[0].Descriptor()
|
||||||
|
// account.NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
account.NameValidator = func() func(string) error {
|
||||||
|
validators := accountDescName.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(name string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// accountDescPlatform is the schema descriptor for platform field.
|
||||||
|
accountDescPlatform := accountFields[2].Descriptor()
|
||||||
|
// account.PlatformValidator is a validator for the "platform" field. It is called by the builders before save.
|
||||||
|
account.PlatformValidator = func() func(string) error {
|
||||||
|
validators := accountDescPlatform.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(platform string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(platform); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// accountDescType is the schema descriptor for type field.
|
||||||
|
accountDescType := accountFields[3].Descriptor()
|
||||||
|
// account.TypeValidator is a validator for the "type" field. It is called by the builders before save.
|
||||||
|
account.TypeValidator = func() func(string) error {
|
||||||
|
validators := accountDescType.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(_type string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(_type); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// accountDescCredentials is the schema descriptor for credentials field.
|
||||||
|
accountDescCredentials := accountFields[4].Descriptor()
|
||||||
|
// account.DefaultCredentials holds the default value on creation for the credentials field.
|
||||||
|
account.DefaultCredentials = accountDescCredentials.Default.(func() map[string]interface{})
|
||||||
|
// accountDescExtra is the schema descriptor for extra field.
|
||||||
|
accountDescExtra := accountFields[5].Descriptor()
|
||||||
|
// account.DefaultExtra holds the default value on creation for the extra field.
|
||||||
|
account.DefaultExtra = accountDescExtra.Default.(func() map[string]interface{})
|
||||||
|
// accountDescConcurrency is the schema descriptor for concurrency field.
|
||||||
|
accountDescConcurrency := accountFields[7].Descriptor()
|
||||||
|
// account.DefaultConcurrency holds the default value on creation for the concurrency field.
|
||||||
|
account.DefaultConcurrency = accountDescConcurrency.Default.(int)
|
||||||
|
// accountDescPriority is the schema descriptor for priority field.
|
||||||
|
accountDescPriority := accountFields[8].Descriptor()
|
||||||
|
// account.DefaultPriority holds the default value on creation for the priority field.
|
||||||
|
account.DefaultPriority = accountDescPriority.Default.(int)
|
||||||
|
// accountDescRateMultiplier is the schema descriptor for rate_multiplier field.
|
||||||
|
accountDescRateMultiplier := accountFields[9].Descriptor()
|
||||||
|
// account.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field.
|
||||||
|
account.DefaultRateMultiplier = accountDescRateMultiplier.Default.(float64)
|
||||||
|
// accountDescStatus is the schema descriptor for status field.
|
||||||
|
accountDescStatus := accountFields[10].Descriptor()
|
||||||
|
// account.DefaultStatus holds the default value on creation for the status field.
|
||||||
|
account.DefaultStatus = accountDescStatus.Default.(string)
|
||||||
|
// account.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
account.StatusValidator = accountDescStatus.Validators[0].(func(string) error)
|
||||||
|
// accountDescAutoPauseOnExpired is the schema descriptor for auto_pause_on_expired field.
|
||||||
|
accountDescAutoPauseOnExpired := accountFields[14].Descriptor()
|
||||||
|
// account.DefaultAutoPauseOnExpired holds the default value on creation for the auto_pause_on_expired field.
|
||||||
|
account.DefaultAutoPauseOnExpired = accountDescAutoPauseOnExpired.Default.(bool)
|
||||||
|
// accountDescSchedulable is the schema descriptor for schedulable field.
|
||||||
|
accountDescSchedulable := accountFields[15].Descriptor()
|
||||||
|
// account.DefaultSchedulable holds the default value on creation for the schedulable field.
|
||||||
|
account.DefaultSchedulable = accountDescSchedulable.Default.(bool)
|
||||||
|
// accountDescSessionWindowStatus is the schema descriptor for session_window_status field.
|
||||||
|
accountDescSessionWindowStatus := accountFields[21].Descriptor()
|
||||||
|
// account.SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save.
|
||||||
|
account.SessionWindowStatusValidator = accountDescSessionWindowStatus.Validators[0].(func(string) error)
|
||||||
|
accountgroupFields := schema.AccountGroup{}.Fields()
|
||||||
|
_ = accountgroupFields
|
||||||
|
// accountgroupDescPriority is the schema descriptor for priority field.
|
||||||
|
accountgroupDescPriority := accountgroupFields[2].Descriptor()
|
||||||
|
// accountgroup.DefaultPriority holds the default value on creation for the priority field.
|
||||||
|
accountgroup.DefaultPriority = accountgroupDescPriority.Default.(int)
|
||||||
|
// accountgroupDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
accountgroupDescCreatedAt := accountgroupFields[3].Descriptor()
|
||||||
|
// accountgroup.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
accountgroup.DefaultCreatedAt = accountgroupDescCreatedAt.Default.(func() time.Time)
|
||||||
|
groupMixin := schema.Group{}.Mixin()
|
||||||
|
groupMixinHooks1 := groupMixin[1].Hooks()
|
||||||
|
group.Hooks[0] = groupMixinHooks1[0]
|
||||||
|
groupMixinInters1 := groupMixin[1].Interceptors()
|
||||||
|
group.Interceptors[0] = groupMixinInters1[0]
|
||||||
|
groupMixinFields0 := groupMixin[0].Fields()
|
||||||
|
_ = groupMixinFields0
|
||||||
|
groupFields := schema.Group{}.Fields()
|
||||||
|
_ = groupFields
|
||||||
|
// groupDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
groupDescCreatedAt := groupMixinFields0[0].Descriptor()
|
||||||
|
// group.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
group.DefaultCreatedAt = groupDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// groupDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
groupDescUpdatedAt := groupMixinFields0[1].Descriptor()
|
||||||
|
// group.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
group.DefaultUpdatedAt = groupDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// group.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
group.UpdateDefaultUpdatedAt = groupDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// groupDescName is the schema descriptor for name field.
|
||||||
|
groupDescName := groupFields[0].Descriptor()
|
||||||
|
// group.NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
group.NameValidator = func() func(string) error {
|
||||||
|
validators := groupDescName.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(name string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// groupDescRateMultiplier is the schema descriptor for rate_multiplier field.
|
||||||
|
groupDescRateMultiplier := groupFields[2].Descriptor()
|
||||||
|
// group.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field.
|
||||||
|
group.DefaultRateMultiplier = groupDescRateMultiplier.Default.(float64)
|
||||||
|
// groupDescIsExclusive is the schema descriptor for is_exclusive field.
|
||||||
|
groupDescIsExclusive := groupFields[3].Descriptor()
|
||||||
|
// group.DefaultIsExclusive holds the default value on creation for the is_exclusive field.
|
||||||
|
group.DefaultIsExclusive = groupDescIsExclusive.Default.(bool)
|
||||||
|
// groupDescStatus is the schema descriptor for status field.
|
||||||
|
groupDescStatus := groupFields[4].Descriptor()
|
||||||
|
// group.DefaultStatus holds the default value on creation for the status field.
|
||||||
|
group.DefaultStatus = groupDescStatus.Default.(string)
|
||||||
|
// group.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
group.StatusValidator = groupDescStatus.Validators[0].(func(string) error)
|
||||||
|
// groupDescPlatform is the schema descriptor for platform field.
|
||||||
|
groupDescPlatform := groupFields[5].Descriptor()
|
||||||
|
// group.DefaultPlatform holds the default value on creation for the platform field.
|
||||||
|
group.DefaultPlatform = groupDescPlatform.Default.(string)
|
||||||
|
// group.PlatformValidator is a validator for the "platform" field. It is called by the builders before save.
|
||||||
|
group.PlatformValidator = groupDescPlatform.Validators[0].(func(string) error)
|
||||||
|
// groupDescSubscriptionType is the schema descriptor for subscription_type field.
|
||||||
|
groupDescSubscriptionType := groupFields[6].Descriptor()
|
||||||
|
// group.DefaultSubscriptionType holds the default value on creation for the subscription_type field.
|
||||||
|
group.DefaultSubscriptionType = groupDescSubscriptionType.Default.(string)
|
||||||
|
// group.SubscriptionTypeValidator is a validator for the "subscription_type" field. It is called by the builders before save.
|
||||||
|
group.SubscriptionTypeValidator = groupDescSubscriptionType.Validators[0].(func(string) error)
|
||||||
|
// groupDescDefaultValidityDays is the schema descriptor for default_validity_days field.
|
||||||
|
groupDescDefaultValidityDays := groupFields[10].Descriptor()
|
||||||
|
// group.DefaultDefaultValidityDays holds the default value on creation for the default_validity_days field.
|
||||||
|
group.DefaultDefaultValidityDays = groupDescDefaultValidityDays.Default.(int)
|
||||||
|
// groupDescClaudeCodeOnly is the schema descriptor for claude_code_only field.
|
||||||
|
groupDescClaudeCodeOnly := groupFields[14].Descriptor()
|
||||||
|
// group.DefaultClaudeCodeOnly holds the default value on creation for the claude_code_only field.
|
||||||
|
group.DefaultClaudeCodeOnly = groupDescClaudeCodeOnly.Default.(bool)
|
||||||
|
promocodeFields := schema.PromoCode{}.Fields()
|
||||||
|
_ = promocodeFields
|
||||||
|
// promocodeDescCode is the schema descriptor for code field.
|
||||||
|
promocodeDescCode := promocodeFields[0].Descriptor()
|
||||||
|
// promocode.CodeValidator is a validator for the "code" field. It is called by the builders before save.
|
||||||
|
promocode.CodeValidator = func() func(string) error {
|
||||||
|
validators := promocodeDescCode.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(code string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(code); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// promocodeDescBonusAmount is the schema descriptor for bonus_amount field.
|
||||||
|
promocodeDescBonusAmount := promocodeFields[1].Descriptor()
|
||||||
|
// promocode.DefaultBonusAmount holds the default value on creation for the bonus_amount field.
|
||||||
|
promocode.DefaultBonusAmount = promocodeDescBonusAmount.Default.(float64)
|
||||||
|
// promocodeDescMaxUses is the schema descriptor for max_uses field.
|
||||||
|
promocodeDescMaxUses := promocodeFields[2].Descriptor()
|
||||||
|
// promocode.DefaultMaxUses holds the default value on creation for the max_uses field.
|
||||||
|
promocode.DefaultMaxUses = promocodeDescMaxUses.Default.(int)
|
||||||
|
// promocodeDescUsedCount is the schema descriptor for used_count field.
|
||||||
|
promocodeDescUsedCount := promocodeFields[3].Descriptor()
|
||||||
|
// promocode.DefaultUsedCount holds the default value on creation for the used_count field.
|
||||||
|
promocode.DefaultUsedCount = promocodeDescUsedCount.Default.(int)
|
||||||
|
// promocodeDescStatus is the schema descriptor for status field.
|
||||||
|
promocodeDescStatus := promocodeFields[4].Descriptor()
|
||||||
|
// promocode.DefaultStatus holds the default value on creation for the status field.
|
||||||
|
promocode.DefaultStatus = promocodeDescStatus.Default.(string)
|
||||||
|
// promocode.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
promocode.StatusValidator = promocodeDescStatus.Validators[0].(func(string) error)
|
||||||
|
// promocodeDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
promocodeDescCreatedAt := promocodeFields[7].Descriptor()
|
||||||
|
// promocode.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
promocode.DefaultCreatedAt = promocodeDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// promocodeDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
promocodeDescUpdatedAt := promocodeFields[8].Descriptor()
|
||||||
|
// promocode.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
promocode.DefaultUpdatedAt = promocodeDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// promocode.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
promocode.UpdateDefaultUpdatedAt = promocodeDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
promocodeusageFields := schema.PromoCodeUsage{}.Fields()
|
||||||
|
_ = promocodeusageFields
|
||||||
|
// promocodeusageDescUsedAt is the schema descriptor for used_at field.
|
||||||
|
promocodeusageDescUsedAt := promocodeusageFields[3].Descriptor()
|
||||||
|
// promocodeusage.DefaultUsedAt holds the default value on creation for the used_at field.
|
||||||
|
promocodeusage.DefaultUsedAt = promocodeusageDescUsedAt.Default.(func() time.Time)
|
||||||
|
proxyMixin := schema.Proxy{}.Mixin()
|
||||||
|
proxyMixinHooks1 := proxyMixin[1].Hooks()
|
||||||
|
proxy.Hooks[0] = proxyMixinHooks1[0]
|
||||||
|
proxyMixinInters1 := proxyMixin[1].Interceptors()
|
||||||
|
proxy.Interceptors[0] = proxyMixinInters1[0]
|
||||||
|
proxyMixinFields0 := proxyMixin[0].Fields()
|
||||||
|
_ = proxyMixinFields0
|
||||||
|
proxyFields := schema.Proxy{}.Fields()
|
||||||
|
_ = proxyFields
|
||||||
|
// proxyDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
proxyDescCreatedAt := proxyMixinFields0[0].Descriptor()
|
||||||
|
// proxy.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
proxy.DefaultCreatedAt = proxyDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// proxyDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
proxyDescUpdatedAt := proxyMixinFields0[1].Descriptor()
|
||||||
|
// proxy.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
proxy.DefaultUpdatedAt = proxyDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// proxy.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
proxy.UpdateDefaultUpdatedAt = proxyDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// proxyDescName is the schema descriptor for name field.
|
||||||
|
proxyDescName := proxyFields[0].Descriptor()
|
||||||
|
// proxy.NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
proxy.NameValidator = func() func(string) error {
|
||||||
|
validators := proxyDescName.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(name string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// proxyDescProtocol is the schema descriptor for protocol field.
|
||||||
|
proxyDescProtocol := proxyFields[1].Descriptor()
|
||||||
|
// proxy.ProtocolValidator is a validator for the "protocol" field. It is called by the builders before save.
|
||||||
|
proxy.ProtocolValidator = func() func(string) error {
|
||||||
|
validators := proxyDescProtocol.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(protocol string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(protocol); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// proxyDescHost is the schema descriptor for host field.
|
||||||
|
proxyDescHost := proxyFields[2].Descriptor()
|
||||||
|
// proxy.HostValidator is a validator for the "host" field. It is called by the builders before save.
|
||||||
|
proxy.HostValidator = func() func(string) error {
|
||||||
|
validators := proxyDescHost.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(host string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(host); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// proxyDescUsername is the schema descriptor for username field.
|
||||||
|
proxyDescUsername := proxyFields[4].Descriptor()
|
||||||
|
// proxy.UsernameValidator is a validator for the "username" field. It is called by the builders before save.
|
||||||
|
proxy.UsernameValidator = proxyDescUsername.Validators[0].(func(string) error)
|
||||||
|
// proxyDescPassword is the schema descriptor for password field.
|
||||||
|
proxyDescPassword := proxyFields[5].Descriptor()
|
||||||
|
// proxy.PasswordValidator is a validator for the "password" field. It is called by the builders before save.
|
||||||
|
proxy.PasswordValidator = proxyDescPassword.Validators[0].(func(string) error)
|
||||||
|
// proxyDescStatus is the schema descriptor for status field.
|
||||||
|
proxyDescStatus := proxyFields[6].Descriptor()
|
||||||
|
// proxy.DefaultStatus holds the default value on creation for the status field.
|
||||||
|
proxy.DefaultStatus = proxyDescStatus.Default.(string)
|
||||||
|
// proxy.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
proxy.StatusValidator = proxyDescStatus.Validators[0].(func(string) error)
|
||||||
|
redeemcodeFields := schema.RedeemCode{}.Fields()
|
||||||
|
_ = redeemcodeFields
|
||||||
|
// redeemcodeDescCode is the schema descriptor for code field.
|
||||||
|
redeemcodeDescCode := redeemcodeFields[0].Descriptor()
|
||||||
|
// redeemcode.CodeValidator is a validator for the "code" field. It is called by the builders before save.
|
||||||
|
redeemcode.CodeValidator = func() func(string) error {
|
||||||
|
validators := redeemcodeDescCode.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(code string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(code); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// redeemcodeDescType is the schema descriptor for type field.
|
||||||
|
redeemcodeDescType := redeemcodeFields[1].Descriptor()
|
||||||
|
// redeemcode.DefaultType holds the default value on creation for the type field.
|
||||||
|
redeemcode.DefaultType = redeemcodeDescType.Default.(string)
|
||||||
|
// redeemcode.TypeValidator is a validator for the "type" field. It is called by the builders before save.
|
||||||
|
redeemcode.TypeValidator = redeemcodeDescType.Validators[0].(func(string) error)
|
||||||
|
// redeemcodeDescValue is the schema descriptor for value field.
|
||||||
|
redeemcodeDescValue := redeemcodeFields[2].Descriptor()
|
||||||
|
// redeemcode.DefaultValue holds the default value on creation for the value field.
|
||||||
|
redeemcode.DefaultValue = redeemcodeDescValue.Default.(float64)
|
||||||
|
// redeemcodeDescStatus is the schema descriptor for status field.
|
||||||
|
redeemcodeDescStatus := redeemcodeFields[3].Descriptor()
|
||||||
|
// redeemcode.DefaultStatus holds the default value on creation for the status field.
|
||||||
|
redeemcode.DefaultStatus = redeemcodeDescStatus.Default.(string)
|
||||||
|
// redeemcode.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
redeemcode.StatusValidator = redeemcodeDescStatus.Validators[0].(func(string) error)
|
||||||
|
// redeemcodeDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
redeemcodeDescCreatedAt := redeemcodeFields[7].Descriptor()
|
||||||
|
// redeemcode.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
redeemcode.DefaultCreatedAt = redeemcodeDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// redeemcodeDescValidityDays is the schema descriptor for validity_days field.
|
||||||
|
redeemcodeDescValidityDays := redeemcodeFields[9].Descriptor()
|
||||||
|
// redeemcode.DefaultValidityDays holds the default value on creation for the validity_days field.
|
||||||
|
redeemcode.DefaultValidityDays = redeemcodeDescValidityDays.Default.(int)
|
||||||
|
settingFields := schema.Setting{}.Fields()
|
||||||
|
_ = settingFields
|
||||||
|
// settingDescKey is the schema descriptor for key field.
|
||||||
|
settingDescKey := settingFields[0].Descriptor()
|
||||||
|
// setting.KeyValidator is a validator for the "key" field. It is called by the builders before save.
|
||||||
|
setting.KeyValidator = func() func(string) error {
|
||||||
|
validators := settingDescKey.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(key string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// settingDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
settingDescUpdatedAt := settingFields[2].Descriptor()
|
||||||
|
// setting.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
setting.DefaultUpdatedAt = settingDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// setting.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
setting.UpdateDefaultUpdatedAt = settingDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
usagelogFields := schema.UsageLog{}.Fields()
|
||||||
|
_ = usagelogFields
|
||||||
|
// usagelogDescRequestID is the schema descriptor for request_id field.
|
||||||
|
usagelogDescRequestID := usagelogFields[3].Descriptor()
|
||||||
|
// usagelog.RequestIDValidator is a validator for the "request_id" field. It is called by the builders before save.
|
||||||
|
usagelog.RequestIDValidator = func() func(string) error {
|
||||||
|
validators := usagelogDescRequestID.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(request_id string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(request_id); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// usagelogDescModel is the schema descriptor for model field.
|
||||||
|
usagelogDescModel := usagelogFields[4].Descriptor()
|
||||||
|
// usagelog.ModelValidator is a validator for the "model" field. It is called by the builders before save.
|
||||||
|
usagelog.ModelValidator = func() func(string) error {
|
||||||
|
validators := usagelogDescModel.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(model string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(model); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// usagelogDescInputTokens is the schema descriptor for input_tokens field.
|
||||||
|
usagelogDescInputTokens := usagelogFields[7].Descriptor()
|
||||||
|
// usagelog.DefaultInputTokens holds the default value on creation for the input_tokens field.
|
||||||
|
usagelog.DefaultInputTokens = usagelogDescInputTokens.Default.(int)
|
||||||
|
// usagelogDescOutputTokens is the schema descriptor for output_tokens field.
|
||||||
|
usagelogDescOutputTokens := usagelogFields[8].Descriptor()
|
||||||
|
// usagelog.DefaultOutputTokens holds the default value on creation for the output_tokens field.
|
||||||
|
usagelog.DefaultOutputTokens = usagelogDescOutputTokens.Default.(int)
|
||||||
|
// usagelogDescCacheCreationTokens is the schema descriptor for cache_creation_tokens field.
|
||||||
|
usagelogDescCacheCreationTokens := usagelogFields[9].Descriptor()
|
||||||
|
// usagelog.DefaultCacheCreationTokens holds the default value on creation for the cache_creation_tokens field.
|
||||||
|
usagelog.DefaultCacheCreationTokens = usagelogDescCacheCreationTokens.Default.(int)
|
||||||
|
// usagelogDescCacheReadTokens is the schema descriptor for cache_read_tokens field.
|
||||||
|
usagelogDescCacheReadTokens := usagelogFields[10].Descriptor()
|
||||||
|
// usagelog.DefaultCacheReadTokens holds the default value on creation for the cache_read_tokens field.
|
||||||
|
usagelog.DefaultCacheReadTokens = usagelogDescCacheReadTokens.Default.(int)
|
||||||
|
// usagelogDescCacheCreation5mTokens is the schema descriptor for cache_creation_5m_tokens field.
|
||||||
|
usagelogDescCacheCreation5mTokens := usagelogFields[11].Descriptor()
|
||||||
|
// usagelog.DefaultCacheCreation5mTokens holds the default value on creation for the cache_creation_5m_tokens field.
|
||||||
|
usagelog.DefaultCacheCreation5mTokens = usagelogDescCacheCreation5mTokens.Default.(int)
|
||||||
|
// usagelogDescCacheCreation1hTokens is the schema descriptor for cache_creation_1h_tokens field.
|
||||||
|
usagelogDescCacheCreation1hTokens := usagelogFields[12].Descriptor()
|
||||||
|
// usagelog.DefaultCacheCreation1hTokens holds the default value on creation for the cache_creation_1h_tokens field.
|
||||||
|
usagelog.DefaultCacheCreation1hTokens = usagelogDescCacheCreation1hTokens.Default.(int)
|
||||||
|
// usagelogDescInputCost is the schema descriptor for input_cost field.
|
||||||
|
usagelogDescInputCost := usagelogFields[13].Descriptor()
|
||||||
|
// usagelog.DefaultInputCost holds the default value on creation for the input_cost field.
|
||||||
|
usagelog.DefaultInputCost = usagelogDescInputCost.Default.(float64)
|
||||||
|
// usagelogDescOutputCost is the schema descriptor for output_cost field.
|
||||||
|
usagelogDescOutputCost := usagelogFields[14].Descriptor()
|
||||||
|
// usagelog.DefaultOutputCost holds the default value on creation for the output_cost field.
|
||||||
|
usagelog.DefaultOutputCost = usagelogDescOutputCost.Default.(float64)
|
||||||
|
// usagelogDescCacheCreationCost is the schema descriptor for cache_creation_cost field.
|
||||||
|
usagelogDescCacheCreationCost := usagelogFields[15].Descriptor()
|
||||||
|
// usagelog.DefaultCacheCreationCost holds the default value on creation for the cache_creation_cost field.
|
||||||
|
usagelog.DefaultCacheCreationCost = usagelogDescCacheCreationCost.Default.(float64)
|
||||||
|
// usagelogDescCacheReadCost is the schema descriptor for cache_read_cost field.
|
||||||
|
usagelogDescCacheReadCost := usagelogFields[16].Descriptor()
|
||||||
|
// usagelog.DefaultCacheReadCost holds the default value on creation for the cache_read_cost field.
|
||||||
|
usagelog.DefaultCacheReadCost = usagelogDescCacheReadCost.Default.(float64)
|
||||||
|
// usagelogDescTotalCost is the schema descriptor for total_cost field.
|
||||||
|
usagelogDescTotalCost := usagelogFields[17].Descriptor()
|
||||||
|
// usagelog.DefaultTotalCost holds the default value on creation for the total_cost field.
|
||||||
|
usagelog.DefaultTotalCost = usagelogDescTotalCost.Default.(float64)
|
||||||
|
// usagelogDescActualCost is the schema descriptor for actual_cost field.
|
||||||
|
usagelogDescActualCost := usagelogFields[18].Descriptor()
|
||||||
|
// usagelog.DefaultActualCost holds the default value on creation for the actual_cost field.
|
||||||
|
usagelog.DefaultActualCost = usagelogDescActualCost.Default.(float64)
|
||||||
|
// usagelogDescRateMultiplier is the schema descriptor for rate_multiplier field.
|
||||||
|
usagelogDescRateMultiplier := usagelogFields[19].Descriptor()
|
||||||
|
// usagelog.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field.
|
||||||
|
usagelog.DefaultRateMultiplier = usagelogDescRateMultiplier.Default.(float64)
|
||||||
|
// usagelogDescBillingType is the schema descriptor for billing_type field.
|
||||||
|
usagelogDescBillingType := usagelogFields[21].Descriptor()
|
||||||
|
// usagelog.DefaultBillingType holds the default value on creation for the billing_type field.
|
||||||
|
usagelog.DefaultBillingType = usagelogDescBillingType.Default.(int8)
|
||||||
|
// usagelogDescStream is the schema descriptor for stream field.
|
||||||
|
usagelogDescStream := usagelogFields[22].Descriptor()
|
||||||
|
// usagelog.DefaultStream holds the default value on creation for the stream field.
|
||||||
|
usagelog.DefaultStream = usagelogDescStream.Default.(bool)
|
||||||
|
// usagelogDescUserAgent is the schema descriptor for user_agent field.
|
||||||
|
usagelogDescUserAgent := usagelogFields[25].Descriptor()
|
||||||
|
// usagelog.UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save.
|
||||||
|
usagelog.UserAgentValidator = usagelogDescUserAgent.Validators[0].(func(string) error)
|
||||||
|
// usagelogDescIPAddress is the schema descriptor for ip_address field.
|
||||||
|
usagelogDescIPAddress := usagelogFields[26].Descriptor()
|
||||||
|
// usagelog.IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save.
|
||||||
|
usagelog.IPAddressValidator = usagelogDescIPAddress.Validators[0].(func(string) error)
|
||||||
|
// usagelogDescImageCount is the schema descriptor for image_count field.
|
||||||
|
usagelogDescImageCount := usagelogFields[27].Descriptor()
|
||||||
|
// usagelog.DefaultImageCount holds the default value on creation for the image_count field.
|
||||||
|
usagelog.DefaultImageCount = usagelogDescImageCount.Default.(int)
|
||||||
|
// usagelogDescImageSize is the schema descriptor for image_size field.
|
||||||
|
usagelogDescImageSize := usagelogFields[28].Descriptor()
|
||||||
|
// usagelog.ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
|
||||||
|
usagelog.ImageSizeValidator = usagelogDescImageSize.Validators[0].(func(string) error)
|
||||||
|
// usagelogDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
usagelogDescCreatedAt := usagelogFields[29].Descriptor()
|
||||||
|
// usagelog.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
usagelog.DefaultCreatedAt = usagelogDescCreatedAt.Default.(func() time.Time)
|
||||||
|
userMixin := schema.User{}.Mixin()
|
||||||
|
userMixinHooks1 := userMixin[1].Hooks()
|
||||||
|
user.Hooks[0] = userMixinHooks1[0]
|
||||||
|
userMixinInters1 := userMixin[1].Interceptors()
|
||||||
|
user.Interceptors[0] = userMixinInters1[0]
|
||||||
|
userMixinFields0 := userMixin[0].Fields()
|
||||||
|
_ = userMixinFields0
|
||||||
|
userFields := schema.User{}.Fields()
|
||||||
|
_ = userFields
|
||||||
|
// userDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
userDescCreatedAt := userMixinFields0[0].Descriptor()
|
||||||
|
// user.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
user.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// userDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
userDescUpdatedAt := userMixinFields0[1].Descriptor()
|
||||||
|
// user.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
user.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// user.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
user.UpdateDefaultUpdatedAt = userDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// userDescEmail is the schema descriptor for email field.
|
||||||
|
userDescEmail := userFields[0].Descriptor()
|
||||||
|
// user.EmailValidator is a validator for the "email" field. It is called by the builders before save.
|
||||||
|
user.EmailValidator = func() func(string) error {
|
||||||
|
validators := userDescEmail.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(email string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(email); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// userDescPasswordHash is the schema descriptor for password_hash field.
|
||||||
|
userDescPasswordHash := userFields[1].Descriptor()
|
||||||
|
// user.PasswordHashValidator is a validator for the "password_hash" field. It is called by the builders before save.
|
||||||
|
user.PasswordHashValidator = func() func(string) error {
|
||||||
|
validators := userDescPasswordHash.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(password_hash string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(password_hash); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// userDescRole is the schema descriptor for role field.
|
||||||
|
userDescRole := userFields[2].Descriptor()
|
||||||
|
// user.DefaultRole holds the default value on creation for the role field.
|
||||||
|
user.DefaultRole = userDescRole.Default.(string)
|
||||||
|
// user.RoleValidator is a validator for the "role" field. It is called by the builders before save.
|
||||||
|
user.RoleValidator = userDescRole.Validators[0].(func(string) error)
|
||||||
|
// userDescBalance is the schema descriptor for balance field.
|
||||||
|
userDescBalance := userFields[3].Descriptor()
|
||||||
|
// user.DefaultBalance holds the default value on creation for the balance field.
|
||||||
|
user.DefaultBalance = userDescBalance.Default.(float64)
|
||||||
|
// userDescConcurrency is the schema descriptor for concurrency field.
|
||||||
|
userDescConcurrency := userFields[4].Descriptor()
|
||||||
|
// user.DefaultConcurrency holds the default value on creation for the concurrency field.
|
||||||
|
user.DefaultConcurrency = userDescConcurrency.Default.(int)
|
||||||
|
// userDescStatus is the schema descriptor for status field.
|
||||||
|
userDescStatus := userFields[5].Descriptor()
|
||||||
|
// user.DefaultStatus holds the default value on creation for the status field.
|
||||||
|
user.DefaultStatus = userDescStatus.Default.(string)
|
||||||
|
// user.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
user.StatusValidator = userDescStatus.Validators[0].(func(string) error)
|
||||||
|
// userDescUsername is the schema descriptor for username field.
|
||||||
|
userDescUsername := userFields[6].Descriptor()
|
||||||
|
// user.DefaultUsername holds the default value on creation for the username field.
|
||||||
|
user.DefaultUsername = userDescUsername.Default.(string)
|
||||||
|
// user.UsernameValidator is a validator for the "username" field. It is called by the builders before save.
|
||||||
|
user.UsernameValidator = userDescUsername.Validators[0].(func(string) error)
|
||||||
|
// userDescNotes is the schema descriptor for notes field.
|
||||||
|
userDescNotes := userFields[7].Descriptor()
|
||||||
|
// user.DefaultNotes holds the default value on creation for the notes field.
|
||||||
|
user.DefaultNotes = userDescNotes.Default.(string)
|
||||||
|
userallowedgroupFields := schema.UserAllowedGroup{}.Fields()
|
||||||
|
_ = userallowedgroupFields
|
||||||
|
// userallowedgroupDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
userallowedgroupDescCreatedAt := userallowedgroupFields[2].Descriptor()
|
||||||
|
// userallowedgroup.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
userallowedgroup.DefaultCreatedAt = userallowedgroupDescCreatedAt.Default.(func() time.Time)
|
||||||
|
userattributedefinitionMixin := schema.UserAttributeDefinition{}.Mixin()
|
||||||
|
userattributedefinitionMixinHooks1 := userattributedefinitionMixin[1].Hooks()
|
||||||
|
userattributedefinition.Hooks[0] = userattributedefinitionMixinHooks1[0]
|
||||||
|
userattributedefinitionMixinInters1 := userattributedefinitionMixin[1].Interceptors()
|
||||||
|
userattributedefinition.Interceptors[0] = userattributedefinitionMixinInters1[0]
|
||||||
|
userattributedefinitionMixinFields0 := userattributedefinitionMixin[0].Fields()
|
||||||
|
_ = userattributedefinitionMixinFields0
|
||||||
|
userattributedefinitionFields := schema.UserAttributeDefinition{}.Fields()
|
||||||
|
_ = userattributedefinitionFields
|
||||||
|
// userattributedefinitionDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
userattributedefinitionDescCreatedAt := userattributedefinitionMixinFields0[0].Descriptor()
|
||||||
|
// userattributedefinition.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
userattributedefinition.DefaultCreatedAt = userattributedefinitionDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// userattributedefinitionDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
userattributedefinitionDescUpdatedAt := userattributedefinitionMixinFields0[1].Descriptor()
|
||||||
|
// userattributedefinition.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
userattributedefinition.DefaultUpdatedAt = userattributedefinitionDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// userattributedefinition.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
userattributedefinition.UpdateDefaultUpdatedAt = userattributedefinitionDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// userattributedefinitionDescKey is the schema descriptor for key field.
|
||||||
|
userattributedefinitionDescKey := userattributedefinitionFields[0].Descriptor()
|
||||||
|
// userattributedefinition.KeyValidator is a validator for the "key" field. It is called by the builders before save.
|
||||||
|
userattributedefinition.KeyValidator = func() func(string) error {
|
||||||
|
validators := userattributedefinitionDescKey.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(key string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(key); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// userattributedefinitionDescName is the schema descriptor for name field.
|
||||||
|
userattributedefinitionDescName := userattributedefinitionFields[1].Descriptor()
|
||||||
|
// userattributedefinition.NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||||
|
userattributedefinition.NameValidator = func() func(string) error {
|
||||||
|
validators := userattributedefinitionDescName.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(name string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(name); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// userattributedefinitionDescDescription is the schema descriptor for description field.
|
||||||
|
userattributedefinitionDescDescription := userattributedefinitionFields[2].Descriptor()
|
||||||
|
// userattributedefinition.DefaultDescription holds the default value on creation for the description field.
|
||||||
|
userattributedefinition.DefaultDescription = userattributedefinitionDescDescription.Default.(string)
|
||||||
|
// userattributedefinitionDescType is the schema descriptor for type field.
|
||||||
|
userattributedefinitionDescType := userattributedefinitionFields[3].Descriptor()
|
||||||
|
// userattributedefinition.TypeValidator is a validator for the "type" field. It is called by the builders before save.
|
||||||
|
userattributedefinition.TypeValidator = func() func(string) error {
|
||||||
|
validators := userattributedefinitionDescType.Validators
|
||||||
|
fns := [...]func(string) error{
|
||||||
|
validators[0].(func(string) error),
|
||||||
|
validators[1].(func(string) error),
|
||||||
|
}
|
||||||
|
return func(_type string) error {
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := fn(_type); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
// userattributedefinitionDescOptions is the schema descriptor for options field.
|
||||||
|
userattributedefinitionDescOptions := userattributedefinitionFields[4].Descriptor()
|
||||||
|
// userattributedefinition.DefaultOptions holds the default value on creation for the options field.
|
||||||
|
userattributedefinition.DefaultOptions = userattributedefinitionDescOptions.Default.([]map[string]interface{})
|
||||||
|
// userattributedefinitionDescRequired is the schema descriptor for required field.
|
||||||
|
userattributedefinitionDescRequired := userattributedefinitionFields[5].Descriptor()
|
||||||
|
// userattributedefinition.DefaultRequired holds the default value on creation for the required field.
|
||||||
|
userattributedefinition.DefaultRequired = userattributedefinitionDescRequired.Default.(bool)
|
||||||
|
// userattributedefinitionDescValidation is the schema descriptor for validation field.
|
||||||
|
userattributedefinitionDescValidation := userattributedefinitionFields[6].Descriptor()
|
||||||
|
// userattributedefinition.DefaultValidation holds the default value on creation for the validation field.
|
||||||
|
userattributedefinition.DefaultValidation = userattributedefinitionDescValidation.Default.(map[string]interface{})
|
||||||
|
// userattributedefinitionDescPlaceholder is the schema descriptor for placeholder field.
|
||||||
|
userattributedefinitionDescPlaceholder := userattributedefinitionFields[7].Descriptor()
|
||||||
|
// userattributedefinition.DefaultPlaceholder holds the default value on creation for the placeholder field.
|
||||||
|
userattributedefinition.DefaultPlaceholder = userattributedefinitionDescPlaceholder.Default.(string)
|
||||||
|
// userattributedefinition.PlaceholderValidator is a validator for the "placeholder" field. It is called by the builders before save.
|
||||||
|
userattributedefinition.PlaceholderValidator = userattributedefinitionDescPlaceholder.Validators[0].(func(string) error)
|
||||||
|
// userattributedefinitionDescDisplayOrder is the schema descriptor for display_order field.
|
||||||
|
userattributedefinitionDescDisplayOrder := userattributedefinitionFields[8].Descriptor()
|
||||||
|
// userattributedefinition.DefaultDisplayOrder holds the default value on creation for the display_order field.
|
||||||
|
userattributedefinition.DefaultDisplayOrder = userattributedefinitionDescDisplayOrder.Default.(int)
|
||||||
|
// userattributedefinitionDescEnabled is the schema descriptor for enabled field.
|
||||||
|
userattributedefinitionDescEnabled := userattributedefinitionFields[9].Descriptor()
|
||||||
|
// userattributedefinition.DefaultEnabled holds the default value on creation for the enabled field.
|
||||||
|
userattributedefinition.DefaultEnabled = userattributedefinitionDescEnabled.Default.(bool)
|
||||||
|
userattributevalueMixin := schema.UserAttributeValue{}.Mixin()
|
||||||
|
userattributevalueMixinFields0 := userattributevalueMixin[0].Fields()
|
||||||
|
_ = userattributevalueMixinFields0
|
||||||
|
userattributevalueFields := schema.UserAttributeValue{}.Fields()
|
||||||
|
_ = userattributevalueFields
|
||||||
|
// userattributevalueDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
userattributevalueDescCreatedAt := userattributevalueMixinFields0[0].Descriptor()
|
||||||
|
// userattributevalue.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
userattributevalue.DefaultCreatedAt = userattributevalueDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// userattributevalueDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
userattributevalueDescUpdatedAt := userattributevalueMixinFields0[1].Descriptor()
|
||||||
|
// userattributevalue.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
userattributevalue.DefaultUpdatedAt = userattributevalueDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// userattributevalue.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
userattributevalue.UpdateDefaultUpdatedAt = userattributevalueDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// userattributevalueDescValue is the schema descriptor for value field.
|
||||||
|
userattributevalueDescValue := userattributevalueFields[2].Descriptor()
|
||||||
|
// userattributevalue.DefaultValue holds the default value on creation for the value field.
|
||||||
|
userattributevalue.DefaultValue = userattributevalueDescValue.Default.(string)
|
||||||
|
usersubscriptionMixin := schema.UserSubscription{}.Mixin()
|
||||||
|
usersubscriptionMixinHooks1 := usersubscriptionMixin[1].Hooks()
|
||||||
|
usersubscription.Hooks[0] = usersubscriptionMixinHooks1[0]
|
||||||
|
usersubscriptionMixinInters1 := usersubscriptionMixin[1].Interceptors()
|
||||||
|
usersubscription.Interceptors[0] = usersubscriptionMixinInters1[0]
|
||||||
|
usersubscriptionMixinFields0 := usersubscriptionMixin[0].Fields()
|
||||||
|
_ = usersubscriptionMixinFields0
|
||||||
|
usersubscriptionFields := schema.UserSubscription{}.Fields()
|
||||||
|
_ = usersubscriptionFields
|
||||||
|
// usersubscriptionDescCreatedAt is the schema descriptor for created_at field.
|
||||||
|
usersubscriptionDescCreatedAt := usersubscriptionMixinFields0[0].Descriptor()
|
||||||
|
// usersubscription.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||||
|
usersubscription.DefaultCreatedAt = usersubscriptionDescCreatedAt.Default.(func() time.Time)
|
||||||
|
// usersubscriptionDescUpdatedAt is the schema descriptor for updated_at field.
|
||||||
|
usersubscriptionDescUpdatedAt := usersubscriptionMixinFields0[1].Descriptor()
|
||||||
|
// usersubscription.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||||
|
usersubscription.DefaultUpdatedAt = usersubscriptionDescUpdatedAt.Default.(func() time.Time)
|
||||||
|
// usersubscription.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||||
|
usersubscription.UpdateDefaultUpdatedAt = usersubscriptionDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||||
|
// usersubscriptionDescStatus is the schema descriptor for status field.
|
||||||
|
usersubscriptionDescStatus := usersubscriptionFields[4].Descriptor()
|
||||||
|
// usersubscription.DefaultStatus holds the default value on creation for the status field.
|
||||||
|
usersubscription.DefaultStatus = usersubscriptionDescStatus.Default.(string)
|
||||||
|
// usersubscription.StatusValidator is a validator for the "status" field. It is called by the builders before save.
|
||||||
|
usersubscription.StatusValidator = usersubscriptionDescStatus.Validators[0].(func(string) error)
|
||||||
|
// usersubscriptionDescDailyUsageUsd is the schema descriptor for daily_usage_usd field.
|
||||||
|
usersubscriptionDescDailyUsageUsd := usersubscriptionFields[8].Descriptor()
|
||||||
|
// usersubscription.DefaultDailyUsageUsd holds the default value on creation for the daily_usage_usd field.
|
||||||
|
usersubscription.DefaultDailyUsageUsd = usersubscriptionDescDailyUsageUsd.Default.(float64)
|
||||||
|
// usersubscriptionDescWeeklyUsageUsd is the schema descriptor for weekly_usage_usd field.
|
||||||
|
usersubscriptionDescWeeklyUsageUsd := usersubscriptionFields[9].Descriptor()
|
||||||
|
// usersubscription.DefaultWeeklyUsageUsd holds the default value on creation for the weekly_usage_usd field.
|
||||||
|
usersubscription.DefaultWeeklyUsageUsd = usersubscriptionDescWeeklyUsageUsd.Default.(float64)
|
||||||
|
// usersubscriptionDescMonthlyUsageUsd is the schema descriptor for monthly_usage_usd field.
|
||||||
|
usersubscriptionDescMonthlyUsageUsd := usersubscriptionFields[10].Descriptor()
|
||||||
|
// usersubscription.DefaultMonthlyUsageUsd holds the default value on creation for the monthly_usage_usd field.
|
||||||
|
usersubscription.DefaultMonthlyUsageUsd = usersubscriptionDescMonthlyUsageUsd.Default.(float64)
|
||||||
|
// usersubscriptionDescAssignedAt is the schema descriptor for assigned_at field.
|
||||||
|
usersubscriptionDescAssignedAt := usersubscriptionFields[12].Descriptor()
|
||||||
|
// usersubscription.DefaultAssignedAt holds the default value on creation for the assigned_at field.
|
||||||
|
usersubscription.DefaultAssignedAt = usersubscriptionDescAssignedAt.Default.(func() time.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
Version = "v0.14.5" // Version of ent codegen.
|
||||||
|
Sum = "h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4=" // Sum of ent codegen.
|
||||||
|
)
|
||||||
218
backend/ent/schema/account.go
Normal file
218
backend/ent/schema/account.go
Normal file
@@ -0,0 +1,218 @@
|
|||||||
|
// Package schema 定义 Ent ORM 的数据库 schema。
|
||||||
|
// 每个文件对应一个数据库实体(表),定义其字段、边(关联)和索引。
|
||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/entsql"
|
||||||
|
"entgo.io/ent/schema"
|
||||||
|
"entgo.io/ent/schema/edge"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"entgo.io/ent/schema/index"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Account 定义 AI API 账户实体的 schema。
|
||||||
|
//
|
||||||
|
// 账户是系统的核心资源,代表一个可用于调用 AI API 的凭证。
|
||||||
|
// 例如:一个 Claude API 账户、一个 Gemini OAuth 账户等。
|
||||||
|
//
|
||||||
|
// 主要功能:
|
||||||
|
// - 存储不同平台(Claude、Gemini、OpenAI 等)的 API 凭证
|
||||||
|
// - 支持多种认证类型(api_key、oauth、cookie 等)
|
||||||
|
// - 管理账户的调度状态(可调度、速率限制、过载等)
|
||||||
|
// - 通过分组机制实现账户的灵活分配
|
||||||
|
type Account struct {
|
||||||
|
ent.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
// Annotations 返回 schema 的注解配置。
|
||||||
|
// 这里指定数据库表名为 "accounts"。
|
||||||
|
func (Account) Annotations() []schema.Annotation {
|
||||||
|
return []schema.Annotation{
|
||||||
|
entsql.Annotation{Table: "accounts"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mixin 返回该 schema 使用的混入组件。
|
||||||
|
// - TimeMixin: 自动管理 created_at 和 updated_at 时间戳
|
||||||
|
// - SoftDeleteMixin: 提供软删除功能(deleted_at)
|
||||||
|
func (Account) Mixin() []ent.Mixin {
|
||||||
|
return []ent.Mixin{
|
||||||
|
mixins.TimeMixin{},
|
||||||
|
mixins.SoftDeleteMixin{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields 定义账户实体的所有字段。
|
||||||
|
func (Account) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
// name: 账户显示名称,用于在界面中标识账户
|
||||||
|
field.String("name").
|
||||||
|
MaxLen(100).
|
||||||
|
NotEmpty(),
|
||||||
|
// notes: 管理员备注(可为空)
|
||||||
|
field.String("notes").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "text"}),
|
||||||
|
|
||||||
|
// platform: 所属平台,如 "claude", "gemini", "openai" 等
|
||||||
|
field.String("platform").
|
||||||
|
MaxLen(50).
|
||||||
|
NotEmpty(),
|
||||||
|
|
||||||
|
// type: 认证类型,如 "api_key", "oauth", "cookie" 等
|
||||||
|
// 不同类型决定了 credentials 中存储的数据结构
|
||||||
|
field.String("type").
|
||||||
|
MaxLen(20).
|
||||||
|
NotEmpty(),
|
||||||
|
|
||||||
|
// credentials: 认证凭证,以 JSONB 格式存储
|
||||||
|
// 结构取决于 type 字段:
|
||||||
|
// - api_key: {"api_key": "sk-xxx"}
|
||||||
|
// - oauth: {"access_token": "...", "refresh_token": "...", "expires_at": "..."}
|
||||||
|
// - cookie: {"session_key": "..."}
|
||||||
|
field.JSON("credentials", map[string]any{}).
|
||||||
|
Default(func() map[string]any { return map[string]any{} }).
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "jsonb"}),
|
||||||
|
|
||||||
|
// extra: 扩展数据,存储平台特定的额外信息
|
||||||
|
// 如 CRS 账户的 crs_account_id、组织信息等
|
||||||
|
field.JSON("extra", map[string]any{}).
|
||||||
|
Default(func() map[string]any { return map[string]any{} }).
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "jsonb"}),
|
||||||
|
|
||||||
|
// proxy_id: 关联的代理配置 ID(可选)
|
||||||
|
// 用于需要通过特定代理访问 API 的场景
|
||||||
|
field.Int64("proxy_id").
|
||||||
|
Optional().
|
||||||
|
Nillable(),
|
||||||
|
|
||||||
|
// concurrency: 账户最大并发请求数
|
||||||
|
// 用于限制同一时间对该账户发起的请求数量
|
||||||
|
field.Int("concurrency").
|
||||||
|
Default(3),
|
||||||
|
|
||||||
|
// priority: 账户优先级,数值越小优先级越高
|
||||||
|
// 调度器会优先使用高优先级的账户
|
||||||
|
field.Int("priority").
|
||||||
|
Default(50),
|
||||||
|
|
||||||
|
// rate_multiplier: 账号计费倍率(>=0,允许 0 表示该账号计费为 0)
|
||||||
|
// 仅影响账号维度计费口径,不影响用户/API Key 扣费(分组倍率)
|
||||||
|
field.Float("rate_multiplier").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}).
|
||||||
|
Default(1.0),
|
||||||
|
|
||||||
|
// status: 账户状态,如 "active", "error", "disabled"
|
||||||
|
field.String("status").
|
||||||
|
MaxLen(20).
|
||||||
|
Default(service.StatusActive),
|
||||||
|
|
||||||
|
// error_message: 错误信息,记录账户异常时的详细信息
|
||||||
|
field.String("error_message").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "text"}),
|
||||||
|
|
||||||
|
// last_used_at: 最后使用时间,用于统计和调度
|
||||||
|
field.Time("last_used_at").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
// expires_at: 账户过期时间(可为空)
|
||||||
|
field.Time("expires_at").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
Comment("Account expiration time (NULL means no expiration).").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
// auto_pause_on_expired: 过期后自动暂停调度
|
||||||
|
field.Bool("auto_pause_on_expired").
|
||||||
|
Default(true).
|
||||||
|
Comment("Auto pause scheduling when account expires."),
|
||||||
|
|
||||||
|
// ========== 调度和速率限制相关字段 ==========
|
||||||
|
// 这些字段在 migrations/005_schema_parity.sql 中添加
|
||||||
|
|
||||||
|
// schedulable: 是否可被调度器选中
|
||||||
|
// false 表示账户暂时不参与请求分配(如正在刷新 token)
|
||||||
|
field.Bool("schedulable").
|
||||||
|
Default(true),
|
||||||
|
|
||||||
|
// rate_limited_at: 触发速率限制的时间
|
||||||
|
// 当收到 429 错误时记录
|
||||||
|
field.Time("rate_limited_at").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
|
||||||
|
// rate_limit_reset_at: 速率限制预计解除的时间
|
||||||
|
// 调度器会在此时间之前避免使用该账户
|
||||||
|
field.Time("rate_limit_reset_at").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
|
||||||
|
// overload_until: 过载状态解除时间
|
||||||
|
// 当收到 529 错误(API 过载)时设置
|
||||||
|
field.Time("overload_until").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
|
||||||
|
// session_window_*: 会话窗口相关字段
|
||||||
|
// 用于管理某些需要会话时间窗口的 API(如 Claude Pro)
|
||||||
|
field.Time("session_window_start").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
field.Time("session_window_end").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
field.String("session_window_status").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
MaxLen(20),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Edges 定义账户实体的关联关系。
|
||||||
|
func (Account) Edges() []ent.Edge {
|
||||||
|
return []ent.Edge{
|
||||||
|
// groups: 账户所属的分组(多对多关系)
|
||||||
|
// 通过 account_groups 中间表实现
|
||||||
|
// 一个账户可以属于多个分组,一个分组可以包含多个账户
|
||||||
|
edge.To("groups", Group.Type).
|
||||||
|
Through("account_groups", AccountGroup.Type),
|
||||||
|
// proxy: 账户使用的代理配置(可选的一对一关系)
|
||||||
|
// 使用已有的 proxy_id 外键字段
|
||||||
|
edge.To("proxy", Proxy.Type).
|
||||||
|
Field("proxy_id").
|
||||||
|
Unique(),
|
||||||
|
// usage_logs: 该账户的使用日志
|
||||||
|
edge.To("usage_logs", UsageLog.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Indexes 定义数据库索引,优化查询性能。
|
||||||
|
// 每个索引对应一个常用的查询条件。
|
||||||
|
func (Account) Indexes() []ent.Index {
|
||||||
|
return []ent.Index{
|
||||||
|
index.Fields("platform"), // 按平台筛选
|
||||||
|
index.Fields("type"), // 按认证类型筛选
|
||||||
|
index.Fields("status"), // 按状态筛选
|
||||||
|
index.Fields("proxy_id"), // 按代理筛选
|
||||||
|
index.Fields("priority"), // 按优先级排序
|
||||||
|
index.Fields("last_used_at"), // 按最后使用时间排序
|
||||||
|
index.Fields("schedulable"), // 筛选可调度账户
|
||||||
|
index.Fields("rate_limited_at"), // 筛选速率限制账户
|
||||||
|
index.Fields("rate_limit_reset_at"), // 筛选速率限制解除时间
|
||||||
|
index.Fields("overload_until"), // 筛选过载账户
|
||||||
|
index.Fields("deleted_at"), // 软删除查询优化
|
||||||
|
}
|
||||||
|
}
|
||||||
60
backend/ent/schema/account_group.go
Normal file
60
backend/ent/schema/account_group.go
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/entsql"
|
||||||
|
"entgo.io/ent/schema"
|
||||||
|
"entgo.io/ent/schema/edge"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"entgo.io/ent/schema/index"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AccountGroup holds the edge schema definition for the account_groups relationship.
|
||||||
|
// It stores extra fields (priority, created_at) and uses a composite primary key.
|
||||||
|
type AccountGroup struct {
|
||||||
|
ent.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func (AccountGroup) Annotations() []schema.Annotation {
|
||||||
|
return []schema.Annotation{
|
||||||
|
entsql.Annotation{Table: "account_groups"},
|
||||||
|
// Composite primary key: (account_id, group_id).
|
||||||
|
field.ID("account_id", "group_id"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (AccountGroup) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
field.Int64("account_id"),
|
||||||
|
field.Int64("group_id"),
|
||||||
|
field.Int("priority").
|
||||||
|
Default(50),
|
||||||
|
field.Time("created_at").
|
||||||
|
Immutable().
|
||||||
|
Default(time.Now).
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "timestamptz"}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (AccountGroup) Edges() []ent.Edge {
|
||||||
|
return []ent.Edge{
|
||||||
|
edge.To("account", Account.Type).
|
||||||
|
Unique().
|
||||||
|
Required().
|
||||||
|
Field("account_id"),
|
||||||
|
edge.To("group", Group.Type).
|
||||||
|
Unique().
|
||||||
|
Required().
|
||||||
|
Field("group_id"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (AccountGroup) Indexes() []ent.Index {
|
||||||
|
return []ent.Index{
|
||||||
|
index.Fields("group_id"),
|
||||||
|
index.Fields("priority"),
|
||||||
|
}
|
||||||
|
}
|
||||||
81
backend/ent/schema/api_key.go
Normal file
81
backend/ent/schema/api_key.go
Normal file
@@ -0,0 +1,81 @@
|
|||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect/entsql"
|
||||||
|
"entgo.io/ent/schema"
|
||||||
|
"entgo.io/ent/schema/edge"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"entgo.io/ent/schema/index"
|
||||||
|
)
|
||||||
|
|
||||||
|
// APIKey holds the schema definition for the APIKey entity.
|
||||||
|
type APIKey struct {
|
||||||
|
ent.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func (APIKey) Annotations() []schema.Annotation {
|
||||||
|
return []schema.Annotation{
|
||||||
|
entsql.Annotation{Table: "api_keys"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (APIKey) Mixin() []ent.Mixin {
|
||||||
|
return []ent.Mixin{
|
||||||
|
mixins.TimeMixin{},
|
||||||
|
mixins.SoftDeleteMixin{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (APIKey) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
field.Int64("user_id"),
|
||||||
|
field.String("key").
|
||||||
|
MaxLen(128).
|
||||||
|
NotEmpty().
|
||||||
|
Unique(),
|
||||||
|
field.String("name").
|
||||||
|
MaxLen(100).
|
||||||
|
NotEmpty(),
|
||||||
|
field.Int64("group_id").
|
||||||
|
Optional().
|
||||||
|
Nillable(),
|
||||||
|
field.String("status").
|
||||||
|
MaxLen(20).
|
||||||
|
Default(service.StatusActive),
|
||||||
|
field.JSON("ip_whitelist", []string{}).
|
||||||
|
Optional().
|
||||||
|
Comment("Allowed IPs/CIDRs, e.g. [\"192.168.1.100\", \"10.0.0.0/8\"]"),
|
||||||
|
field.JSON("ip_blacklist", []string{}).
|
||||||
|
Optional().
|
||||||
|
Comment("Blocked IPs/CIDRs"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (APIKey) Edges() []ent.Edge {
|
||||||
|
return []ent.Edge{
|
||||||
|
edge.From("user", User.Type).
|
||||||
|
Ref("api_keys").
|
||||||
|
Field("user_id").
|
||||||
|
Unique().
|
||||||
|
Required(),
|
||||||
|
edge.From("group", Group.Type).
|
||||||
|
Ref("api_keys").
|
||||||
|
Field("group_id").
|
||||||
|
Unique(),
|
||||||
|
edge.To("usage_logs", UsageLog.Type),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (APIKey) Indexes() []ent.Index {
|
||||||
|
return []ent.Index{
|
||||||
|
// key 字段已在 Fields() 中声明 Unique(),无需重复索引
|
||||||
|
index.Fields("user_id"),
|
||||||
|
index.Fields("group_id"),
|
||||||
|
index.Fields("status"),
|
||||||
|
index.Fields("deleted_at"),
|
||||||
|
}
|
||||||
|
}
|
||||||
127
backend/ent/schema/group.go
Normal file
127
backend/ent/schema/group.go
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
package schema
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/schema/mixins"
|
||||||
|
"github.com/Wei-Shaw/sub2api/internal/service"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/entsql"
|
||||||
|
"entgo.io/ent/schema"
|
||||||
|
"entgo.io/ent/schema/edge"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"entgo.io/ent/schema/index"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Group holds the schema definition for the Group entity.
|
||||||
|
type Group struct {
|
||||||
|
ent.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Group) Annotations() []schema.Annotation {
|
||||||
|
return []schema.Annotation{
|
||||||
|
entsql.Annotation{Table: "groups"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Group) Mixin() []ent.Mixin {
|
||||||
|
return []ent.Mixin{
|
||||||
|
mixins.TimeMixin{},
|
||||||
|
mixins.SoftDeleteMixin{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Group) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
// 唯一约束通过部分索引实现(WHERE deleted_at IS NULL),支持软删除后重用
|
||||||
|
// 见迁移文件 016_soft_delete_partial_unique_indexes.sql
|
||||||
|
field.String("name").
|
||||||
|
MaxLen(100).
|
||||||
|
NotEmpty(),
|
||||||
|
field.String("description").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "text"}),
|
||||||
|
field.Float("rate_multiplier").
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}).
|
||||||
|
Default(1.0),
|
||||||
|
field.Bool("is_exclusive").
|
||||||
|
Default(false),
|
||||||
|
field.String("status").
|
||||||
|
MaxLen(20).
|
||||||
|
Default(service.StatusActive),
|
||||||
|
|
||||||
|
// Subscription-related fields (added by migration 003)
|
||||||
|
field.String("platform").
|
||||||
|
MaxLen(50).
|
||||||
|
Default(service.PlatformAnthropic),
|
||||||
|
field.String("subscription_type").
|
||||||
|
MaxLen(20).
|
||||||
|
Default(service.SubscriptionTypeStandard),
|
||||||
|
field.Float("daily_limit_usd").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
field.Float("weekly_limit_usd").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
field.Float("monthly_limit_usd").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
field.Int("default_validity_days").
|
||||||
|
Default(30),
|
||||||
|
|
||||||
|
// 图片生成计费配置(antigravity 和 gemini 平台使用)
|
||||||
|
field.Float("image_price_1k").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
field.Float("image_price_2k").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
field.Float("image_price_4k").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}),
|
||||||
|
|
||||||
|
// Claude Code 客户端限制 (added by migration 029)
|
||||||
|
field.Bool("claude_code_only").
|
||||||
|
Default(false).
|
||||||
|
Comment("是否仅允许 Claude Code 客户端"),
|
||||||
|
field.Int64("fallback_group_id").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
Comment("非 Claude Code 请求降级使用的分组 ID"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Group) Edges() []ent.Edge {
|
||||||
|
return []ent.Edge{
|
||||||
|
edge.To("api_keys", APIKey.Type),
|
||||||
|
edge.To("redeem_codes", RedeemCode.Type),
|
||||||
|
edge.To("subscriptions", UserSubscription.Type),
|
||||||
|
edge.To("usage_logs", UsageLog.Type),
|
||||||
|
edge.From("accounts", Account.Type).
|
||||||
|
Ref("groups").
|
||||||
|
Through("account_groups", AccountGroup.Type),
|
||||||
|
edge.From("allowed_users", User.Type).
|
||||||
|
Ref("allowed_groups").
|
||||||
|
Through("user_allowed_groups", UserAllowedGroup.Type),
|
||||||
|
// 注意:fallback_group_id 直接作为字段使用,不定义 edge
|
||||||
|
// 这样允许多个分组指向同一个降级分组(M2O 关系)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (Group) Indexes() []ent.Index {
|
||||||
|
return []ent.Index{
|
||||||
|
// name 字段已在 Fields() 中声明 Unique(),无需重复索引
|
||||||
|
index.Fields("status"),
|
||||||
|
index.Fields("platform"),
|
||||||
|
index.Fields("subscription_type"),
|
||||||
|
index.Fields("is_exclusive"),
|
||||||
|
index.Fields("deleted_at"),
|
||||||
|
}
|
||||||
|
}
|
||||||
139
backend/ent/schema/mixins/soft_delete.go
Normal file
139
backend/ent/schema/mixins/soft_delete.go
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
// Package mixins 提供 Ent schema 的可复用混入组件。
|
||||||
|
// 包括时间戳混入、软删除混入等通用功能。
|
||||||
|
package mixins
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/dialect/sql"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"entgo.io/ent/schema/mixin"
|
||||||
|
dbent "github.com/Wei-Shaw/sub2api/ent"
|
||||||
|
"github.com/Wei-Shaw/sub2api/ent/intercept"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SoftDeleteMixin 实现基于 deleted_at 时间戳的软删除功能。
|
||||||
|
//
|
||||||
|
// 软删除特性:
|
||||||
|
// - 删除操作不会真正删除数据库记录,而是设置 deleted_at 时间戳
|
||||||
|
// - 所有查询默认自动过滤 deleted_at IS NULL,只返回"未删除"的记录
|
||||||
|
// - 通过 SkipSoftDelete(ctx) 可以绕过软删除过滤器,查询或真正删除记录
|
||||||
|
//
|
||||||
|
// 实现原理:
|
||||||
|
// - 使用 Ent 的 Interceptor 拦截所有查询,自动添加 deleted_at IS NULL 条件
|
||||||
|
// - 使用 Ent 的 Hook 拦截删除操作,将 DELETE 转换为 UPDATE SET deleted_at = NOW()
|
||||||
|
//
|
||||||
|
// 使用示例:
|
||||||
|
//
|
||||||
|
// func (User) Mixin() []ent.Mixin {
|
||||||
|
// return []ent.Mixin{
|
||||||
|
// mixins.SoftDeleteMixin{},
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
type SoftDeleteMixin struct {
|
||||||
|
mixin.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fields 定义软删除所需的字段。
|
||||||
|
// deleted_at 字段:
|
||||||
|
// - 类型为 TIMESTAMPTZ,精确记录删除时间
|
||||||
|
// - Optional 和 Nillable 确保新记录时该字段为 NULL
|
||||||
|
// - NULL 表示记录未被删除,非 NULL 表示已软删除
|
||||||
|
func (SoftDeleteMixin) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
field.Time("deleted_at").
|
||||||
|
Optional().
|
||||||
|
Nillable().
|
||||||
|
SchemaType(map[string]string{
|
||||||
|
dialect.Postgres: "timestamptz",
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// softDeleteKey 是用于在 context 中标记跳过软删除的键类型。
|
||||||
|
// 使用空结构体作为键可以避免与其他包的键冲突。
|
||||||
|
type softDeleteKey struct{}
|
||||||
|
|
||||||
|
// SkipSoftDelete 返回一个新的 context,用于跳过软删除的拦截器和变更器。
|
||||||
|
//
|
||||||
|
// 使用场景:
|
||||||
|
// - 查询已软删除的记录(如管理员查看回收站)
|
||||||
|
// - 执行真正的物理删除(如彻底清理数据)
|
||||||
|
// - 恢复软删除的记录
|
||||||
|
//
|
||||||
|
// 示例:
|
||||||
|
//
|
||||||
|
// // 查询包含已删除记录的所有用户
|
||||||
|
// users, err := client.User.Query().All(mixins.SkipSoftDelete(ctx))
|
||||||
|
//
|
||||||
|
// // 真正删除记录
|
||||||
|
// client.User.DeleteOneID(id).Exec(mixins.SkipSoftDelete(ctx))
|
||||||
|
func SkipSoftDelete(parent context.Context) context.Context {
|
||||||
|
return context.WithValue(parent, softDeleteKey{}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interceptors 返回查询拦截器列表。
|
||||||
|
// 拦截器会自动为所有查询添加 deleted_at IS NULL 条件,
|
||||||
|
// 确保软删除的记录不会出现在普通查询结果中。
|
||||||
|
func (d SoftDeleteMixin) Interceptors() []ent.Interceptor {
|
||||||
|
return []ent.Interceptor{
|
||||||
|
intercept.TraverseFunc(func(ctx context.Context, q intercept.Query) error {
|
||||||
|
// 检查是否需要跳过软删除过滤
|
||||||
|
if skip, _ := ctx.Value(softDeleteKey{}).(bool); skip {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// 为查询添加 deleted_at IS NULL 条件
|
||||||
|
d.applyPredicate(q)
|
||||||
|
return nil
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hooks 返回变更钩子列表。
|
||||||
|
// 钩子会拦截 DELETE 操作,将其转换为 UPDATE SET deleted_at = NOW()。
|
||||||
|
// 这样删除操作实际上只是标记记录为已删除,而不是真正删除。
|
||||||
|
func (d SoftDeleteMixin) Hooks() []ent.Hook {
|
||||||
|
return []ent.Hook{
|
||||||
|
func(next ent.Mutator) ent.Mutator {
|
||||||
|
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||||
|
// 只处理删除操作
|
||||||
|
if m.Op() != ent.OpDelete && m.Op() != ent.OpDeleteOne {
|
||||||
|
return next.Mutate(ctx, m)
|
||||||
|
}
|
||||||
|
// 检查是否需要执行真正的删除
|
||||||
|
if skip, _ := ctx.Value(softDeleteKey{}).(bool); skip {
|
||||||
|
return next.Mutate(ctx, m)
|
||||||
|
}
|
||||||
|
// 类型断言,获取 mutation 的扩展接口
|
||||||
|
mx, ok := m.(interface {
|
||||||
|
SetOp(ent.Op)
|
||||||
|
SetDeletedAt(time.Time)
|
||||||
|
WhereP(...func(*sql.Selector))
|
||||||
|
Client() *dbent.Client
|
||||||
|
})
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||||
|
}
|
||||||
|
// 添加软删除过滤条件,确保不会影响已删除的记录
|
||||||
|
d.applyPredicate(mx)
|
||||||
|
// 将 DELETE 操作转换为 UPDATE 操作
|
||||||
|
mx.SetOp(ent.OpUpdate)
|
||||||
|
// 设置删除时间为当前时间
|
||||||
|
mx.SetDeletedAt(time.Now())
|
||||||
|
return mx.Client().Mutate(ctx, m)
|
||||||
|
})
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// applyPredicate 为查询添加 deleted_at IS NULL 条件。
|
||||||
|
// 这是软删除过滤的核心实现。
|
||||||
|
func (d SoftDeleteMixin) applyPredicate(w interface{ WhereP(...func(*sql.Selector)) }) {
|
||||||
|
w.WhereP(
|
||||||
|
sql.FieldIsNull(d.Fields()[0].Descriptor().Name),
|
||||||
|
)
|
||||||
|
}
|
||||||
32
backend/ent/schema/mixins/time.go
Normal file
32
backend/ent/schema/mixins/time.go
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
package mixins
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"entgo.io/ent"
|
||||||
|
"entgo.io/ent/dialect"
|
||||||
|
"entgo.io/ent/schema/field"
|
||||||
|
"entgo.io/ent/schema/mixin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TimeMixin provides created_at and updated_at fields compatible with the existing schema.
|
||||||
|
type TimeMixin struct {
|
||||||
|
mixin.Schema
|
||||||
|
}
|
||||||
|
|
||||||
|
func (TimeMixin) Fields() []ent.Field {
|
||||||
|
return []ent.Field{
|
||||||
|
field.Time("created_at").
|
||||||
|
Immutable().
|
||||||
|
Default(time.Now).
|
||||||
|
SchemaType(map[string]string{
|
||||||
|
dialect.Postgres: "timestamptz",
|
||||||
|
}),
|
||||||
|
field.Time("updated_at").
|
||||||
|
Default(time.Now).
|
||||||
|
UpdateDefault(time.Now).
|
||||||
|
SchemaType(map[string]string{
|
||||||
|
dialect.Postgres: "timestamptz",
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user