mirror of
https://github.com/sysadminsmedia/homebox.git
synced 2025-12-25 23:03:41 +01:00
Compare commits
126 Commits
fix/variou
...
v0.10.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8cc0f30291 | ||
|
|
afbc6a49ac | ||
|
|
2594d4cdb4 | ||
|
|
2eafa8e72f | ||
|
|
e65d44fa9e | ||
|
|
eeae790fe4 | ||
|
|
a70ee33759 | ||
|
|
80e2071300 | ||
|
|
db27d34b4f | ||
|
|
da22074ed3 | ||
|
|
51ba15f84c | ||
|
|
b408318acb | ||
|
|
c0e8e34065 | ||
|
|
4738a9b131 | ||
|
|
b08e52104c | ||
|
|
3e2ab29054 | ||
|
|
e5f66d99bc | ||
|
|
0995478cc0 | ||
|
|
ae4b95301f | ||
|
|
d8482f3a13 | ||
|
|
1365bdfd46 | ||
|
|
2cd3c15215 | ||
|
|
0dc4fa5d98 | ||
|
|
318b8be192 | ||
|
|
63a966c526 | ||
|
|
db16d3fb23 | ||
|
|
1952b9f1cb | ||
|
|
2b31d46ab3 | ||
|
|
f3f96723b2 | ||
|
|
b28bb2c4a8 | ||
|
|
a33cf54a33 | ||
|
|
f13bf2958d | ||
|
|
a9712c48af | ||
|
|
e68b7cf500 | ||
|
|
744a5bbb47 | ||
|
|
fc5698410b | ||
|
|
455163d637 | ||
|
|
fbc7e6e33a | ||
|
|
5739b2005a | ||
|
|
5f41960c0a | ||
|
|
c89aa738cf | ||
|
|
94fd9c314d | ||
|
|
0876deb1e9 | ||
|
|
5438898b49 | ||
|
|
9fa17bec90 | ||
|
|
b5987f2e8d | ||
|
|
2cbcc8bb1d | ||
|
|
cceec06148 | ||
|
|
2e2eed143d | ||
|
|
272cc5a370 | ||
|
|
275e106d72 | ||
|
|
3f0e65a2ad | ||
|
|
22bbaae08f | ||
|
|
8c7d91ea52 | ||
|
|
5a219f6a9c | ||
|
|
895017b28e | ||
|
|
02ce52dbe3 | ||
|
|
c5ae6b17f9 | ||
|
|
371fc0a6af | ||
|
|
016780920d | ||
|
|
06eb6c1f91 | ||
|
|
27dad0e118 | ||
|
|
dc9446516a | ||
|
|
a042496c71 | ||
|
|
feab9f4c46 | ||
|
|
fe5622d62a | ||
|
|
e759f2817e | ||
|
|
60cc5c2710 | ||
|
|
25ccd678c9 | ||
|
|
a77b4cbe71 | ||
|
|
aae32b0d74 | ||
|
|
a94b43a19e | ||
|
|
9a4c2df552 | ||
|
|
d5b89a755e | ||
|
|
a9acf62d93 | ||
|
|
c896e198dd | ||
|
|
c538518b4b | ||
|
|
f66d14eeea | ||
|
|
bc8feac83c | ||
|
|
40a98bcf30 | ||
|
|
045e91d9ac | ||
|
|
a80ab0f3e9 | ||
|
|
e5d209d407 | ||
|
|
ef1531e561 | ||
|
|
4dd036abb2 | ||
|
|
81e909ccfb | ||
|
|
0cb9d2a8e4 | ||
|
|
cb16c0e829 | ||
|
|
9e067ee230 | ||
|
|
8b53d40a2a | ||
|
|
4c0ad7a5d8 | ||
|
|
66e25ba068 | ||
|
|
56c98e6e3a | ||
|
|
01f305a98e | ||
|
|
e14cdaccdd | ||
|
|
4ece25b58d | ||
|
|
c1957bb927 | ||
|
|
636ca155e5 | ||
|
|
17a5b43609 | ||
|
|
b2b3ccf923 | ||
|
|
2272c7eb6b | ||
|
|
181c324dd4 | ||
|
|
89912b18d7 | ||
|
|
85f2af4bc3 | ||
|
|
21ad5a32c1 | ||
|
|
4b51a4ad9a | ||
|
|
dd7e634b69 | ||
|
|
351ec64bbc | ||
|
|
3a758e012f | ||
|
|
c16084d99f | ||
|
|
5591267124 | ||
|
|
d46c16f01f | ||
|
|
18c22e8a68 | ||
|
|
64b3ac3e94 | ||
|
|
c36b9dcf5d | ||
|
|
d3b6c93b63 | ||
|
|
3b862e36c8 | ||
|
|
f3bb86d905 | ||
|
|
4dd925caf0 | ||
|
|
ced5aef6d1 | ||
|
|
6a853c07a0 | ||
|
|
f0b9a0fce4 | ||
|
|
00f09fec2f | ||
|
|
6e1863b515 | ||
|
|
dfe2084c74 | ||
|
|
0825f055a7 |
@@ -35,6 +35,6 @@
|
||||
// Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "node",
|
||||
"features": {
|
||||
"golang": "1.19"
|
||||
"golang": "1.20"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,3 +22,4 @@
|
||||
**/secrets.dev.yaml
|
||||
**/values.dev.yaml
|
||||
README.md
|
||||
!Dockerfile.rootless
|
||||
|
||||
2
.github/workflows/partial-backend.yaml
vendored
2
.github/workflows/partial-backend.yaml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
Go:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
|
||||
10
.github/workflows/partial-frontend.yaml
vendored
10
.github/workflows/partial-frontend.yaml
vendored
@@ -9,11 +9,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: pnpm/action-setup@v2.2.4
|
||||
- uses: pnpm/action-setup@v2.4.0
|
||||
with:
|
||||
version: 6.0.2
|
||||
|
||||
@@ -34,7 +34,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -48,11 +48,11 @@ jobs:
|
||||
with:
|
||||
go-version: "1.20"
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18
|
||||
|
||||
- uses: pnpm/action-setup@v2.2.4
|
||||
- uses: pnpm/action-setup@v2.4.0
|
||||
with:
|
||||
version: 6.0.2
|
||||
|
||||
|
||||
29
.github/workflows/partial-publish.yaml
vendored
29
.github/workflows/partial-publish.yaml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
name: "Publish Homebox"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
@@ -35,7 +35,7 @@ jobs:
|
||||
|
||||
- name: install buildx
|
||||
id: buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
install: true
|
||||
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
env:
|
||||
CR_PAT: ${{ secrets.GH_TOKEN }}
|
||||
|
||||
- name: build nightly the image
|
||||
- name: build nightly image
|
||||
if: ${{ inputs.release == false }}
|
||||
run: |
|
||||
docker build --push --no-cache \
|
||||
@@ -53,6 +53,16 @@ jobs:
|
||||
--build-arg=BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
|
||||
--platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||
|
||||
- name: build nightly-rootless image
|
||||
if: ${{ inputs.release == false }}
|
||||
run: |
|
||||
docker build --push --no-cache \
|
||||
--tag=ghcr.io/hay-kot/homebox:${{ inputs.tag }}-rootless \
|
||||
--build-arg=COMMIT=$(git rev-parse HEAD) \
|
||||
--build-arg=BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
|
||||
--file Dockerfile.rootless \
|
||||
--platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||
|
||||
- name: build release tagged the image
|
||||
if: ${{ inputs.release == true }}
|
||||
run: |
|
||||
@@ -64,3 +74,16 @@ jobs:
|
||||
--build-arg COMMIT=$(git rev-parse HEAD) \
|
||||
--build-arg BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
|
||||
--platform linux/amd64,linux/arm64,linux/arm/v7 .
|
||||
|
||||
- name: build release tagged the rootless image
|
||||
if: ${{ inputs.release == true }}
|
||||
run: |
|
||||
docker build --push --no-cache \
|
||||
--tag ghcr.io/hay-kot/homebox:nightly-rootless \
|
||||
--tag ghcr.io/hay-kot/homebox:latest-rootless \
|
||||
--tag ghcr.io/hay-kot/homebox:${{ inputs.tag }}-rootless \
|
||||
--build-arg VERSION=${{ inputs.tag }} \
|
||||
--build-arg COMMIT=$(git rev-parse HEAD) \
|
||||
--build-arg BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
|
||||
--platform linux/amd64,linux/arm64,linux/arm/v7 \
|
||||
--file Dockerfile.rootless .
|
||||
|
||||
29
.github/workflows/publish.yaml
vendored
29
.github/workflows/publish.yaml
vendored
@@ -4,9 +4,6 @@ on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
@@ -16,7 +13,7 @@ jobs:
|
||||
name: "Deploy Nightly to Fly.io"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: superfly/flyctl-actions/setup-flyctl@master
|
||||
- run: flyctl deploy --remote-only
|
||||
|
||||
@@ -29,28 +26,4 @@ jobs:
|
||||
secrets:
|
||||
GH_TOKEN: ${{ secrets.CR_PAT }}
|
||||
|
||||
publish-tag:
|
||||
name: "Publish Tag"
|
||||
if: github.event_name == 'release'
|
||||
uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
|
||||
with:
|
||||
release: true
|
||||
tag: ${{ github.event.release.tag_name }}
|
||||
secrets:
|
||||
GH_TOKEN: ${{ secrets.CR_PAT }}
|
||||
|
||||
deploy-docs:
|
||||
name: Deploy docs
|
||||
needs:
|
||||
- publish-tag
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout main
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Deploy docs
|
||||
uses: mhausenblas/mkdocs-deploy-gh-pages@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CONFIG_FILE: docs/mkdocs.yml
|
||||
EXTRA_PACKAGES: build-base
|
||||
|
||||
32
.github/workflows/tag.yaml
vendored
32
.github/workflows/tag.yaml
vendored
@@ -17,17 +17,17 @@ jobs:
|
||||
name: "Frontend and End-to-End Tests"
|
||||
uses: hay-kot/homebox/.github/workflows/partial-frontend.yaml@main
|
||||
|
||||
|
||||
goreleaser:
|
||||
name: goreleaser
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
|
||||
- uses: pnpm/action-setup@v2
|
||||
with:
|
||||
@@ -49,3 +49,29 @@ jobs:
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
publish-tag:
|
||||
name: "Publish Tag"
|
||||
uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
|
||||
with:
|
||||
release: true
|
||||
tag: ${{ github.ref_name }}
|
||||
secrets:
|
||||
GH_TOKEN: ${{ secrets.CR_PAT }}
|
||||
|
||||
deploy-docs:
|
||||
name: Deploy docs
|
||||
needs:
|
||||
- publish-tag
|
||||
- goreleaser
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout main
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Deploy docs
|
||||
uses: mhausenblas/mkdocs-deploy-gh-pages@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
CONFIG_FILE: docs/mkdocs.yml
|
||||
EXTRA_PACKAGES: build-base
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
# Build Nuxt
|
||||
FROM node:17-alpine as frontend-builder
|
||||
FROM node:18-alpine as frontend-builder
|
||||
WORKDIR /app
|
||||
RUN npm install -g pnpm
|
||||
COPY frontend/package.json frontend/pnpm-lock.yaml ./
|
||||
|
||||
53
Dockerfile.rootless
Normal file
53
Dockerfile.rootless
Normal file
@@ -0,0 +1,53 @@
|
||||
|
||||
# Build Nuxt
|
||||
FROM node:17-alpine as frontend-builder
|
||||
WORKDIR /app
|
||||
RUN npm install -g pnpm
|
||||
COPY frontend/package.json frontend/pnpm-lock.yaml ./
|
||||
RUN pnpm install --frozen-lockfile --shamefully-hoist
|
||||
COPY frontend .
|
||||
RUN pnpm build
|
||||
|
||||
# Build API
|
||||
FROM golang:alpine AS builder
|
||||
ARG BUILD_TIME
|
||||
ARG COMMIT
|
||||
ARG VERSION
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk add --update git build-base gcc g++
|
||||
|
||||
WORKDIR /go/src/app
|
||||
COPY ./backend .
|
||||
RUN go get -d -v ./...
|
||||
RUN rm -rf ./app/api/public
|
||||
COPY --from=frontend-builder /app/.output/public ./app/api/static/public
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build \
|
||||
-ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \
|
||||
-o /go/bin/api \
|
||||
-v ./app/api/*.go && \
|
||||
chmod +x /go/bin/api && \
|
||||
# create a directory so that we can copy it in the next stage
|
||||
mkdir /data
|
||||
|
||||
# Production Stage
|
||||
FROM gcr.io/distroless/static
|
||||
|
||||
ENV HBOX_MODE=production
|
||||
ENV HBOX_STORAGE_DATA=/data/
|
||||
ENV HBOX_STORAGE_SQLITE_URL=/data/homebox.db?_fk=1
|
||||
|
||||
# Copy the binary and the (empty) /data dir and
|
||||
# change the ownership to the low-privileged user
|
||||
COPY --from=builder --chown=nonroot /go/bin/api /app
|
||||
COPY --from=builder --chown=nonroot /data /data
|
||||
|
||||
LABEL Name=homebox Version=0.0.1
|
||||
LABEL org.opencontainers.image.source="https://github.com/hay-kot/homebox"
|
||||
EXPOSE 7745
|
||||
VOLUME [ "/data" ]
|
||||
|
||||
# Drop root and run as low-privileged user
|
||||
USER nonroot
|
||||
ENTRYPOINT [ "/app" ]
|
||||
CMD [ "/data/config.yml" ]
|
||||
@@ -16,6 +16,10 @@
|
||||
[Configuration & Docker Compose](https://hay-kot.github.io/homebox/quick-start)
|
||||
|
||||
```bash
|
||||
# If using the rootless image, ensure data
|
||||
# folder has correct permissions
|
||||
mkdir -p /path/to/data/folder
|
||||
chown 65532:65532 -R /path/to/data/folder
|
||||
docker run -d \
|
||||
--name homebox \
|
||||
--restart unless-stopped \
|
||||
@@ -23,6 +27,7 @@ docker run -d \
|
||||
--env TZ=Europe/Bucharest \
|
||||
--volume /path/to/data/folder/:/data \
|
||||
ghcr.io/hay-kot/homebox:latest
|
||||
# ghcr.io/hay-kot/homebox:latest-rootless
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
43
Taskfile.yml
43
Taskfile.yml
@@ -12,15 +12,25 @@ tasks:
|
||||
- cd backend && go mod tidy
|
||||
- cd frontend && pnpm install --shamefully-hoist
|
||||
|
||||
generate:
|
||||
desc: |
|
||||
Generates collateral files from the backend project
|
||||
including swagger docs and typescripts type for the frontend
|
||||
deps:
|
||||
- db:generate
|
||||
swag:
|
||||
desc: Generate swagger docs
|
||||
dir: backend/app/api/static/
|
||||
vars:
|
||||
API: "../"
|
||||
INTERNAL: "../../../internal"
|
||||
PKGS: "../../../pkgs"
|
||||
cmds:
|
||||
- swag fmt --dir={{ .API }}
|
||||
- swag init --dir={{ .API }},{{ .INTERNAL }}/core/services,{{ .INTERNAL }}/data/repo --parseDependency
|
||||
sources:
|
||||
- "./backend/app/api/**/*"
|
||||
- "./backend/internal/data/**"
|
||||
- "./backend/internal/core/services/**/*"
|
||||
- "./backend/app/tools/typegen/main.go"
|
||||
|
||||
typescript-types:
|
||||
desc: Generates typescript types from swagger definition
|
||||
cmds:
|
||||
- cd backend/app/api/static && swag fmt --dir=../
|
||||
- cd backend/app/api/static && swag init --dir=../,../../../internal,../../../pkgs
|
||||
- |
|
||||
npx swagger-typescript-api \
|
||||
--no-client \
|
||||
@@ -28,12 +38,17 @@ tasks:
|
||||
--path ./backend/app/api/static/docs/swagger.json \
|
||||
--output ./frontend/lib/api/types
|
||||
- go run ./backend/app/tools/typegen/main.go ./frontend/lib/api/types/data-contracts.ts
|
||||
- cp ./backend/app/api/static/docs/swagger.json docs/docs/api/openapi-2.0.json
|
||||
sources:
|
||||
- "./backend/app/api/**/*"
|
||||
- "./backend/internal/data/**"
|
||||
- "./backend/internal/core/services/**/*"
|
||||
- "./backend/app/tools/typegen/main.go"
|
||||
- ./backend/app/tools/typegen/main.go
|
||||
- ./backend/app/api/static/docs/swagger.json
|
||||
|
||||
generate:
|
||||
deps:
|
||||
- db:generate
|
||||
cmds:
|
||||
- task: swag
|
||||
- task: typescript-types
|
||||
- cp ./backend/app/api/static/docs/swagger.json docs/docs/api/openapi-2.0.json
|
||||
|
||||
go:run:
|
||||
desc: Starts the backend api server (depends on generate task)
|
||||
@@ -139,4 +154,4 @@ tasks:
|
||||
- task: go:all
|
||||
- task: ui:check
|
||||
- task: ui:fix
|
||||
- task: test:ci
|
||||
- task: test:ci
|
||||
|
||||
BIN
backend/api
Executable file
BIN
backend/api
Executable file
Binary file not shown.
@@ -4,11 +4,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/config"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/mailer"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
)
|
||||
|
||||
type app struct {
|
||||
@@ -18,6 +19,7 @@ type app struct {
|
||||
server *server.Server
|
||||
repos *repo.AllRepos
|
||||
services *services.AllServices
|
||||
bus *eventbus.EventBus
|
||||
}
|
||||
|
||||
func new(conf *config.Config) *app {
|
||||
|
||||
@@ -1,12 +1,18 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
|
||||
"github.com/olahol/melody"
|
||||
)
|
||||
|
||||
type Results[T any] struct {
|
||||
@@ -43,12 +49,20 @@ func WithRegistration(allowRegistration bool) func(*V1Controller) {
|
||||
}
|
||||
}
|
||||
|
||||
func WithSecureCookies(secure bool) func(*V1Controller) {
|
||||
return func(ctrl *V1Controller) {
|
||||
ctrl.cookieSecure = secure
|
||||
}
|
||||
}
|
||||
|
||||
type V1Controller struct {
|
||||
cookieSecure bool
|
||||
repo *repo.AllRepos
|
||||
svc *services.AllServices
|
||||
maxUploadSize int64
|
||||
isDemo bool
|
||||
allowRegistration bool
|
||||
bus *eventbus.EventBus
|
||||
}
|
||||
|
||||
type (
|
||||
@@ -77,11 +91,12 @@ func BaseUrlFunc(prefix string) func(s string) string {
|
||||
}
|
||||
}
|
||||
|
||||
func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, options ...func(*V1Controller)) *V1Controller {
|
||||
func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, bus *eventbus.EventBus, options ...func(*V1Controller)) *V1Controller {
|
||||
ctrl := &V1Controller{
|
||||
repo: repos,
|
||||
svc: svc,
|
||||
allowRegistration: true,
|
||||
bus: bus,
|
||||
}
|
||||
|
||||
for _, opt := range options {
|
||||
@@ -110,3 +125,42 @@ func (ctrl *V1Controller) HandleBase(ready ReadyFunc, build Build) errchain.Hand
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (ctrl *V1Controller) HandleCacheWS() errchain.HandlerFunc {
|
||||
m := melody.New()
|
||||
|
||||
m.HandleConnect(func(s *melody.Session) {
|
||||
auth := services.NewContext(s.Request.Context())
|
||||
s.Set("gid", auth.GID)
|
||||
})
|
||||
|
||||
factory := func(e string) func(data any) {
|
||||
return func(data any) {
|
||||
eventData, ok := data.(eventbus.GroupMutationEvent)
|
||||
if !ok {
|
||||
log.Log().Msgf("invalid event data: %v", data)
|
||||
return
|
||||
}
|
||||
|
||||
jsonStr := fmt.Sprintf(`{"event": "%s"}`, e)
|
||||
|
||||
_ = m.BroadcastFilter([]byte(jsonStr), func(s *melody.Session) bool {
|
||||
groupIDStr, ok := s.Get("gid")
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
GID := groupIDStr.(uuid.UUID)
|
||||
return GID == eventData.GID
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
ctrl.bus.Subscribe(eventbus.EventLabelMutation, factory("label.mutation"))
|
||||
ctrl.bus.Subscribe(eventbus.EventLocationMutation, factory("location.mutation"))
|
||||
ctrl.bus.Subscribe(eventbus.EventItemMutation, factory("item.mutation"))
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
return m.HandleRequest(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -68,3 +68,16 @@ func (ctrl *V1Controller) HandleEnsureImportRefs() errchain.HandlerFunc {
|
||||
func (ctrl *V1Controller) HandleItemDateZeroOut() errchain.HandlerFunc {
|
||||
return actionHandlerFactory("zero out date time", ctrl.repo.Items.ZeroOutTimeFields)
|
||||
}
|
||||
|
||||
// HandleSetPrimaryPhotos godoc
|
||||
//
|
||||
// @Summary Set Primary Photos
|
||||
// @Description Sets the first photo of each item as the primary photo
|
||||
// @Tags Actions
|
||||
// @Produce json
|
||||
// @Success 200 {object} ActionAmountResult
|
||||
// @Router /v1/actions/set-primary-photos [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleSetPrimaryPhotos() errchain.HandlerFunc {
|
||||
return actionHandlerFactory("ensure asset IDs", ctrl.repo.Items.SetPrimaryPhotos)
|
||||
}
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -3,16 +3,23 @@ package v1
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
const (
|
||||
cookieNameToken = "hb.auth.token"
|
||||
cookieNameRemember = "hb.auth.remember"
|
||||
cookieNameSession = "hb.auth.session"
|
||||
)
|
||||
|
||||
type (
|
||||
TokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
@@ -27,6 +34,30 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
type CookieContents struct {
|
||||
Token string
|
||||
ExpiresAt time.Time
|
||||
Remember bool
|
||||
}
|
||||
|
||||
func GetCookies(r *http.Request) (*CookieContents, error) {
|
||||
cookie, err := r.Cookie(cookieNameToken)
|
||||
if err != nil {
|
||||
return nil, errors.New("authorization cookie is required")
|
||||
}
|
||||
|
||||
rememberCookie, err := r.Cookie(cookieNameRemember)
|
||||
if err != nil {
|
||||
return nil, errors.New("remember cookie is required")
|
||||
}
|
||||
|
||||
return &CookieContents{
|
||||
Token: cookie.Value,
|
||||
ExpiresAt: cookie.Expires,
|
||||
Remember: rememberCookie.Value == "true",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HandleAuthLogin godoc
|
||||
//
|
||||
// @Summary User Login
|
||||
@@ -81,6 +112,7 @@ func (ctrl *V1Controller) HandleAuthLogin() errchain.HandlerFunc {
|
||||
return validate.NewRequestError(errors.New("authentication failed"), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
ctrl.setCookies(w, noPort(r.Host), newToken.Raw, newToken.ExpiresAt, loginForm.StayLoggedIn)
|
||||
return server.JSON(w, http.StatusOK, TokenResponse{
|
||||
Token: "Bearer " + newToken.Raw,
|
||||
ExpiresAt: newToken.ExpiresAt,
|
||||
@@ -108,6 +140,7 @@ func (ctrl *V1Controller) HandleAuthLogout() errchain.HandlerFunc {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
ctrl.unsetCookies(w, noPort(r.Host))
|
||||
return server.JSON(w, http.StatusNoContent, nil)
|
||||
}
|
||||
}
|
||||
@@ -133,6 +166,78 @@ func (ctrl *V1Controller) HandleAuthRefresh() errchain.HandlerFunc {
|
||||
return validate.NewUnauthorizedError()
|
||||
}
|
||||
|
||||
ctrl.setCookies(w, noPort(r.Host), newToken.Raw, newToken.ExpiresAt, false)
|
||||
return server.JSON(w, http.StatusOK, newToken)
|
||||
}
|
||||
}
|
||||
|
||||
func noPort(host string) string {
|
||||
return strings.Split(host, ":")[0]
|
||||
}
|
||||
|
||||
func (ctrl *V1Controller) setCookies(w http.ResponseWriter, domain, token string, expires time.Time, remember bool) {
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieNameRemember,
|
||||
Value: strconv.FormatBool(remember),
|
||||
Expires: expires,
|
||||
Domain: domain,
|
||||
Secure: ctrl.cookieSecure,
|
||||
HttpOnly: true,
|
||||
Path: "/",
|
||||
})
|
||||
|
||||
// Set HTTP only cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieNameToken,
|
||||
Value: token,
|
||||
Expires: expires,
|
||||
Domain: domain,
|
||||
Secure: ctrl.cookieSecure,
|
||||
HttpOnly: true,
|
||||
Path: "/",
|
||||
})
|
||||
|
||||
// Set Fake Session cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieNameSession,
|
||||
Value: "true",
|
||||
Expires: expires,
|
||||
Domain: domain,
|
||||
Secure: ctrl.cookieSecure,
|
||||
HttpOnly: false,
|
||||
Path: "/",
|
||||
})
|
||||
}
|
||||
|
||||
func (ctrl *V1Controller) unsetCookies(w http.ResponseWriter, domain string) {
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieNameToken,
|
||||
Value: "",
|
||||
Expires: time.Unix(0, 0),
|
||||
Domain: domain,
|
||||
Secure: ctrl.cookieSecure,
|
||||
HttpOnly: true,
|
||||
Path: "/",
|
||||
})
|
||||
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieNameRemember,
|
||||
Value: "false",
|
||||
Expires: time.Unix(0, 0),
|
||||
Domain: domain,
|
||||
Secure: ctrl.cookieSecure,
|
||||
HttpOnly: true,
|
||||
Path: "/",
|
||||
})
|
||||
|
||||
// Set Fake Session cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieNameSession,
|
||||
Value: "false",
|
||||
Expires: time.Unix(0, 0),
|
||||
Domain: domain,
|
||||
Secure: ctrl.cookieSecure,
|
||||
HttpOnly: false,
|
||||
Path: "/",
|
||||
})
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
)
|
||||
|
||||
type (
|
||||
|
||||
@@ -12,8 +12,8 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
// @Param pageSize query int false "items per page"
|
||||
// @Param labels query []string false "label Ids" collectionFormat(multi)
|
||||
// @Param locations query []string false "location Ids" collectionFormat(multi)
|
||||
// @Param parentIds query []string false "parent Ids" collectionFormat(multi)
|
||||
// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
|
||||
// @Router /v1/items [GET]
|
||||
// @Security Bearer
|
||||
@@ -56,8 +57,10 @@ func (ctrl *V1Controller) HandleItemsGetAll() errchain.HandlerFunc {
|
||||
Search: params.Get("q"),
|
||||
LocationIDs: queryUUIDList(params, "locations"),
|
||||
LabelIDs: queryUUIDList(params, "labels"),
|
||||
ParentItemIDs: queryUUIDList(params, "parentIds"),
|
||||
IncludeArchived: queryBool(params.Get("includeArchived")),
|
||||
Fields: filterFieldItems(params["fields"]),
|
||||
OrderBy: params.Get("orderBy"),
|
||||
}
|
||||
|
||||
if strings.HasPrefix(v.Search, "#") {
|
||||
@@ -166,6 +169,32 @@ func (ctrl *V1Controller) HandleItemUpdate() errchain.HandlerFunc {
|
||||
return adapters.ActionID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleItemPatch godocs
|
||||
//
|
||||
// @Summary Update Item
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param payload body repo.ItemPatch true "Item Data"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Router /v1/items/{id} [Patch]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemPatch() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID, body repo.ItemPatch) (repo.ItemOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
|
||||
body.ID = ID
|
||||
err := ctrl.repo.Items.Patch(auth, auth.GID, ID, body)
|
||||
if err != nil {
|
||||
return repo.ItemOut{}, err
|
||||
}
|
||||
|
||||
return ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID)
|
||||
}
|
||||
|
||||
return adapters.ActionID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleGetAllCustomFieldNames godocs
|
||||
//
|
||||
// @Summary Get All Custom Field Names
|
||||
@@ -203,7 +232,7 @@ func (ctrl *V1Controller) HandleGetAllCustomFieldValues() errchain.HandlerFunc {
|
||||
return ctrl.repo.Items.GetAllCustomFieldValues(auth, auth.GID, q.Field)
|
||||
}
|
||||
|
||||
return adapters.Action(fn, http.StatusOK)
|
||||
return adapters.Query(fn, http.StatusOK)
|
||||
|
||||
}
|
||||
|
||||
@@ -261,7 +290,9 @@ func (ctrl *V1Controller) HandleItemsExport() errchain.HandlerFunc {
|
||||
|
||||
w.Header().Set("Content-Type", "text/tsv")
|
||||
w.Header().Set("Content-Disposition", "attachment;filename=homebox-items.tsv")
|
||||
|
||||
writer := csv.NewWriter(w)
|
||||
writer.Comma = '\t'
|
||||
return writer.WriteAll(csvData)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -29,7 +29,7 @@ type (
|
||||
// @Param type formData string true "Type of file"
|
||||
// @Param name formData string true "name of the file including extension"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Failure 422 {object} mid.ErrorResponse
|
||||
// @Failure 422 {object} validate.ErrorResponse
|
||||
// @Router /v1/items/{id}/attachments [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemAttachmentCreate() errchain.HandlerFunc {
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
)
|
||||
|
||||
// HandleLabelsGetAll godoc
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
)
|
||||
|
||||
// HandleLocationTreeQuery
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
)
|
||||
|
||||
// HandleMaintenanceGetLog godoc
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
)
|
||||
|
||||
// HandleGetUserNotifiers godoc
|
||||
|
||||
@@ -5,9 +5,10 @@ import (
|
||||
"image/png"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/yeqown/go-qrcode/v2"
|
||||
"github.com/yeqown/go-qrcode/writer/standard"
|
||||
|
||||
@@ -43,7 +44,12 @@ func (ctrl *V1Controller) HandleGenerateQRCode() errchain.HandlerFunc {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
qrc, err := qrcode.New(q.Data)
|
||||
decodedStr, err := url.QueryUnescape(q.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
qrc, err := qrcode.New(decodedStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
)
|
||||
|
||||
// HandleBillOfMaterialsExport godoc
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
)
|
||||
|
||||
// HandleGroupGet godoc
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
|
||||
@@ -13,15 +13,15 @@ import (
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/app/api/static/docs"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/migrations"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/config"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/mid"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
"github.com/hay-kot/httpkit/server"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/rs/zerolog/pkgerrors"
|
||||
@@ -39,7 +39,6 @@ var (
|
||||
// @version 1.0
|
||||
// @description Track, Manage, and Organize your Things.
|
||||
// @contact.name Don't
|
||||
// @license.name MIT
|
||||
// @BasePath /api
|
||||
// @securityDefinitions.apikey Bearer
|
||||
// @in header
|
||||
@@ -53,8 +52,6 @@ func main() {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
docs.SwaggerInfo.Host = cfg.Swagger.Host
|
||||
|
||||
if err := run(cfg); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -120,8 +117,9 @@ func run(cfg *config.Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
app.bus = eventbus.New()
|
||||
app.db = c
|
||||
app.repos = repo.New(c, cfg.Storage.Data)
|
||||
app.repos = repo.New(c, app.bus, cfg.Storage.Data)
|
||||
app.services = services.New(
|
||||
app.repos,
|
||||
services.WithAutoIncrementAssetID(cfg.Options.AutoIncrementAssetID),
|
||||
@@ -148,12 +146,17 @@ func run(cfg *config.Config) error {
|
||||
app.server = server.NewServer(
|
||||
server.WithHost(app.conf.Web.Host),
|
||||
server.WithPort(app.conf.Web.Port),
|
||||
server.WithReadTimeout(app.conf.Web.ReadTimeout),
|
||||
server.WithWriteTimeout(app.conf.Web.WriteTimeout),
|
||||
server.WithIdleTimeout(app.conf.Web.IdleTimeout),
|
||||
)
|
||||
log.Info().Msgf("Starting HTTP Server on %s:%s", app.server.Host, app.server.Port)
|
||||
|
||||
// =========================================================================
|
||||
// Start Reoccurring Tasks
|
||||
|
||||
go app.bus.Run()
|
||||
|
||||
go app.startBgTask(time.Duration(24)*time.Hour, func() {
|
||||
_, err := app.repos.AuthTokens.PurgeExpiredTokens(context.Background())
|
||||
if err != nil {
|
||||
|
||||
@@ -7,9 +7,10 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
v1 "github.com/hay-kot/homebox/backend/app/api/handlers/v1"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
)
|
||||
|
||||
type tokenHasKey struct {
|
||||
@@ -94,20 +95,6 @@ func getQuery(r *http.Request) (string, error) {
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func getCookie(r *http.Request) (string, error) {
|
||||
cookie, err := r.Cookie("hb.auth.token")
|
||||
if err != nil {
|
||||
return "", errors.New("access_token cookie is required")
|
||||
}
|
||||
|
||||
token, err := url.QueryUnescape(cookie.Value)
|
||||
if err != nil {
|
||||
return "", errors.New("access_token cookie is required")
|
||||
}
|
||||
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// mwAuthToken is a middleware that will check the database for a stateful token
|
||||
// and attach it's user to the request context, or return an appropriate error.
|
||||
// Authorization support is by token via Headers or Query Parameter
|
||||
@@ -115,21 +102,30 @@ func getCookie(r *http.Request) (string, error) {
|
||||
// Example:
|
||||
// - header = "Bearer 1234567890"
|
||||
// - query = "?access_token=1234567890"
|
||||
// - cookie = hb.auth.token = 1234567890
|
||||
func (a *app) mwAuthToken(next errchain.Handler) errchain.Handler {
|
||||
return errchain.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||
keyFuncs := [...]KeyFunc{
|
||||
getBearer,
|
||||
getCookie,
|
||||
getQuery,
|
||||
var requestToken string
|
||||
|
||||
// We ignore the error to allow the next strategy to be attempted
|
||||
{
|
||||
cookies, _ := v1.GetCookies(r)
|
||||
if cookies != nil {
|
||||
requestToken = cookies.Token
|
||||
}
|
||||
}
|
||||
|
||||
var requestToken string
|
||||
for _, keyFunc := range keyFuncs {
|
||||
token, err := keyFunc(r)
|
||||
if err == nil {
|
||||
requestToken = token
|
||||
break
|
||||
if requestToken == "" {
|
||||
keyFuncs := [...]KeyFunc{
|
||||
getBearer,
|
||||
getQuery,
|
||||
}
|
||||
|
||||
for _, keyFunc := range keyFuncs {
|
||||
token, err := keyFunc(r)
|
||||
if err == nil {
|
||||
requestToken = token
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"embed"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
@@ -16,7 +15,7 @@ import (
|
||||
_ "github.com/hay-kot/homebox/backend/app/api/static/docs"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/httpkit/errchain"
|
||||
httpSwagger "github.com/swaggo/http-swagger" // http-swagger middleware
|
||||
)
|
||||
|
||||
@@ -41,7 +40,7 @@ func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllR
|
||||
registerMimes()
|
||||
|
||||
r.Get("/swagger/*", httpSwagger.Handler(
|
||||
httpSwagger.URL(fmt.Sprintf("%s://%s/swagger/doc.json", a.conf.Swagger.Scheme, a.conf.Swagger.Host)),
|
||||
httpSwagger.URL("/swagger/doc.json"),
|
||||
))
|
||||
|
||||
// =========================================================================
|
||||
@@ -52,6 +51,7 @@ func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllR
|
||||
v1Ctrl := v1.NewControllerV1(
|
||||
a.services,
|
||||
a.repos,
|
||||
a.bus,
|
||||
v1.WithMaxUploadSize(a.conf.Web.MaxUploadSize),
|
||||
v1.WithRegistration(a.conf.Options.AllowRegistration),
|
||||
v1.WithDemoStatus(a.conf.Demo), // Disable Password Change in Demo Mode
|
||||
@@ -71,6 +71,7 @@ func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllR
|
||||
a.mwRoles(RoleModeOr, authroles.RoleUser.String()),
|
||||
}
|
||||
|
||||
r.Get(v1Base("/ws/events"), chain.ToHandlerFunc(v1Ctrl.HandleCacheWS(), userMW...))
|
||||
r.Get(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelf(), userMW...))
|
||||
r.Put(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfUpdate(), userMW...))
|
||||
r.Delete(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfDelete(), userMW...))
|
||||
@@ -91,6 +92,7 @@ func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllR
|
||||
r.Post(v1Base("/actions/ensure-asset-ids"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureAssetID(), userMW...))
|
||||
r.Post(v1Base("/actions/zero-item-time-fields"), chain.ToHandlerFunc(v1Ctrl.HandleItemDateZeroOut(), userMW...))
|
||||
r.Post(v1Base("/actions/ensure-import-refs"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureImportRefs(), userMW...))
|
||||
r.Post(v1Base("/actions/set-primary-photos"), chain.ToHandlerFunc(v1Ctrl.HandleSetPrimaryPhotos(), userMW...))
|
||||
|
||||
r.Get(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationGetAll(), userMW...))
|
||||
r.Post(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationCreate(), userMW...))
|
||||
@@ -114,6 +116,7 @@ func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllR
|
||||
|
||||
r.Get(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemGet(), userMW...))
|
||||
r.Put(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemUpdate(), userMW...))
|
||||
r.Patch(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemPatch(), userMW...))
|
||||
r.Delete(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemDelete(), userMW...))
|
||||
|
||||
r.Post(v1Base("/items/{id}/attachments"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentCreate(), userMW...))
|
||||
@@ -125,7 +128,7 @@ func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllR
|
||||
r.Put(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryUpdate(), userMW...))
|
||||
r.Delete(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryDelete(), userMW...))
|
||||
|
||||
r.Get(v1Base("/asset/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleAssetGet(), userMW...))
|
||||
r.Get(v1Base("/assets/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleAssetGet(), userMW...))
|
||||
|
||||
// Notifiers
|
||||
r.Get(v1Base("/notifiers"), chain.ToHandlerFunc(v1Ctrl.HandleGetUserNotifiers(), userMW...))
|
||||
@@ -153,7 +156,6 @@ func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllR
|
||||
r.Get(v1Base("/reporting/bill-of-materials"), chain.ToHandlerFunc(v1Ctrl.HandleBillOfMaterialsExport(), userMW...))
|
||||
|
||||
r.NotFound(chain.ToHandlerFunc(notFoundHandler()))
|
||||
|
||||
}
|
||||
|
||||
func registerMimes() {
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
// Package docs GENERATED BY SWAG; DO NOT EDIT
|
||||
// This file was generated by swaggo/swag
|
||||
// Package docs Code generated by swaggo/swag. DO NOT EDIT
|
||||
package docs
|
||||
|
||||
import "github.com/swaggo/swag"
|
||||
@@ -13,9 +12,6 @@ const docTemplate = `{
|
||||
"contact": {
|
||||
"name": "Don't"
|
||||
},
|
||||
"license": {
|
||||
"name": "MIT"
|
||||
},
|
||||
"version": "{{.Version}}"
|
||||
},
|
||||
"host": "{{.Host}}",
|
||||
@@ -71,6 +67,31 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/actions/set-primary-photos": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Sets the first photo of each item as the primary photo",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Actions"
|
||||
],
|
||||
"summary": "Set Primary Photos",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1.ActionAmountResult"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/actions/zero-item-time-fields": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -638,6 +659,46 @@ const docTemplate = `{
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
},
|
||||
"patch": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Update Item",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Item ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Item Data",
|
||||
"name": "payload",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.ItemPatch"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.ItemOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/items/{id}/attachments": {
|
||||
@@ -694,7 +755,7 @@ const docTemplate = `{
|
||||
"422": {
|
||||
"description": "Unprocessable Entity",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/mid.ErrorResponse"
|
||||
"$ref": "#/definitions/validate.ErrorResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1762,20 +1823,6 @@ const docTemplate = `{
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"mid.ErrorResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string"
|
||||
},
|
||||
"fields": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.DocumentOut": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1856,6 +1903,9 @@ const docTemplate = `{
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"primary": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1867,6 +1917,9 @@ const docTemplate = `{
|
||||
"repo.ItemAttachmentUpdate": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"primary": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1966,6 +2019,9 @@ const docTemplate = `{
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"imageId": {
|
||||
"type": "string"
|
||||
},
|
||||
"insured": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -1981,9 +2037,13 @@ const docTemplate = `{
|
||||
},
|
||||
"location": {
|
||||
"description": "Edges",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/repo.LocationSummary"
|
||||
}
|
||||
],
|
||||
"x-nullable": true,
|
||||
"x-omitempty": true,
|
||||
"$ref": "#/definitions/repo.LocationSummary"
|
||||
"x-omitempty": true
|
||||
},
|
||||
"manufacturer": {
|
||||
"type": "string"
|
||||
@@ -1999,9 +2059,13 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
},
|
||||
"parent": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
}
|
||||
],
|
||||
"x-nullable": true,
|
||||
"x-omitempty": true,
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
"x-omitempty": true
|
||||
},
|
||||
"purchaseFrom": {
|
||||
"type": "string"
|
||||
@@ -2045,6 +2109,19 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.ItemPatch": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"quantity": {
|
||||
"type": "integer",
|
||||
"x-nullable": true,
|
||||
"x-omitempty": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.ItemSummary": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2060,6 +2137,9 @@ const docTemplate = `{
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"imageId": {
|
||||
"type": "string"
|
||||
},
|
||||
"insured": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -2071,9 +2151,13 @@ const docTemplate = `{
|
||||
},
|
||||
"location": {
|
||||
"description": "Edges",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/repo.LocationSummary"
|
||||
}
|
||||
],
|
||||
"x-nullable": true,
|
||||
"x-omitempty": true,
|
||||
"$ref": "#/definitions/repo.LocationSummary"
|
||||
"x-omitempty": true
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
@@ -2097,7 +2181,8 @@ const docTemplate = `{
|
||||
"type": "boolean"
|
||||
},
|
||||
"assetId": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"example": "0"
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
@@ -2182,7 +2267,6 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
},
|
||||
"warrantyExpires": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
@@ -2219,12 +2303,6 @@ const docTemplate = `{
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2286,12 +2364,6 @@ const docTemplate = `{
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2368,7 +2440,6 @@ const docTemplate = `{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"completedDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
},
|
||||
"cost": {
|
||||
@@ -2385,7 +2456,6 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
},
|
||||
"scheduledDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
@@ -2397,7 +2467,6 @@ const docTemplate = `{
|
||||
],
|
||||
"properties": {
|
||||
"completedDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
},
|
||||
"cost": {
|
||||
@@ -2411,7 +2480,6 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
},
|
||||
"scheduledDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
@@ -2420,7 +2488,6 @@ const docTemplate = `{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"completedDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
},
|
||||
"cost": {
|
||||
@@ -2434,7 +2501,6 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
},
|
||||
"scheduledDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
@@ -2803,6 +2869,17 @@ const docTemplate = `{
|
||||
"properties": {
|
||||
"item": {}
|
||||
}
|
||||
},
|
||||
"validate.ErrorResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string"
|
||||
},
|
||||
"fields": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"securityDefinitions": {
|
||||
@@ -2825,6 +2902,8 @@ var SwaggerInfo = &swag.Spec{
|
||||
Description: "Track, Manage, and Organize your Things.",
|
||||
InfoInstanceName: "swagger",
|
||||
SwaggerTemplate: docTemplate,
|
||||
LeftDelim: "{{",
|
||||
RightDelim: "}}",
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
||||
@@ -6,9 +6,6 @@
|
||||
"contact": {
|
||||
"name": "Don't"
|
||||
},
|
||||
"license": {
|
||||
"name": "MIT"
|
||||
},
|
||||
"version": "1.0"
|
||||
},
|
||||
"basePath": "/api",
|
||||
@@ -63,6 +60,31 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/actions/set-primary-photos": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Sets the first photo of each item as the primary photo",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Actions"
|
||||
],
|
||||
"summary": "Set Primary Photos",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1.ActionAmountResult"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/actions/zero-item-time-fields": {
|
||||
"post": {
|
||||
"security": [
|
||||
@@ -630,6 +652,46 @@
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
},
|
||||
"patch": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Update Item",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Item ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Item Data",
|
||||
"name": "payload",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.ItemPatch"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.ItemOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/items/{id}/attachments": {
|
||||
@@ -686,7 +748,7 @@
|
||||
"422": {
|
||||
"description": "Unprocessable Entity",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/mid.ErrorResponse"
|
||||
"$ref": "#/definitions/validate.ErrorResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1754,20 +1816,6 @@
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"mid.ErrorResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string"
|
||||
},
|
||||
"fields": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.DocumentOut": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1848,6 +1896,9 @@
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"primary": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"type": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1859,6 +1910,9 @@
|
||||
"repo.ItemAttachmentUpdate": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"primary": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"title": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -1958,6 +2012,9 @@
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"imageId": {
|
||||
"type": "string"
|
||||
},
|
||||
"insured": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -1973,9 +2030,13 @@
|
||||
},
|
||||
"location": {
|
||||
"description": "Edges",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/repo.LocationSummary"
|
||||
}
|
||||
],
|
||||
"x-nullable": true,
|
||||
"x-omitempty": true,
|
||||
"$ref": "#/definitions/repo.LocationSummary"
|
||||
"x-omitempty": true
|
||||
},
|
||||
"manufacturer": {
|
||||
"type": "string"
|
||||
@@ -1991,9 +2052,13 @@
|
||||
"type": "string"
|
||||
},
|
||||
"parent": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
}
|
||||
],
|
||||
"x-nullable": true,
|
||||
"x-omitempty": true,
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
"x-omitempty": true
|
||||
},
|
||||
"purchaseFrom": {
|
||||
"type": "string"
|
||||
@@ -2037,6 +2102,19 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.ItemPatch": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"quantity": {
|
||||
"type": "integer",
|
||||
"x-nullable": true,
|
||||
"x-omitempty": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.ItemSummary": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2052,6 +2130,9 @@
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"imageId": {
|
||||
"type": "string"
|
||||
},
|
||||
"insured": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -2063,9 +2144,13 @@
|
||||
},
|
||||
"location": {
|
||||
"description": "Edges",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/repo.LocationSummary"
|
||||
}
|
||||
],
|
||||
"x-nullable": true,
|
||||
"x-omitempty": true,
|
||||
"$ref": "#/definitions/repo.LocationSummary"
|
||||
"x-omitempty": true
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
@@ -2089,7 +2174,8 @@
|
||||
"type": "boolean"
|
||||
},
|
||||
"assetId": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"example": "0"
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
@@ -2174,7 +2260,6 @@
|
||||
"type": "string"
|
||||
},
|
||||
"warrantyExpires": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
@@ -2211,12 +2296,6 @@
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2278,12 +2357,6 @@
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
@@ -2360,7 +2433,6 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"completedDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
},
|
||||
"cost": {
|
||||
@@ -2377,7 +2449,6 @@
|
||||
"type": "string"
|
||||
},
|
||||
"scheduledDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
@@ -2389,7 +2460,6 @@
|
||||
],
|
||||
"properties": {
|
||||
"completedDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
},
|
||||
"cost": {
|
||||
@@ -2403,7 +2473,6 @@
|
||||
"type": "string"
|
||||
},
|
||||
"scheduledDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
@@ -2412,7 +2481,6 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"completedDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
},
|
||||
"cost": {
|
||||
@@ -2426,7 +2494,6 @@
|
||||
"type": "string"
|
||||
},
|
||||
"scheduledDate": {
|
||||
"description": "Sold",
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
@@ -2795,6 +2862,17 @@
|
||||
"properties": {
|
||||
"item": {}
|
||||
}
|
||||
},
|
||||
"validate.ErrorResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string"
|
||||
},
|
||||
"fields": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"securityDefinitions": {
|
||||
|
||||
@@ -1,14 +1,5 @@
|
||||
basePath: /api
|
||||
definitions:
|
||||
mid.ErrorResponse:
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
fields:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
repo.DocumentOut:
|
||||
properties:
|
||||
id:
|
||||
@@ -61,6 +52,8 @@ definitions:
|
||||
$ref: '#/definitions/repo.DocumentOut'
|
||||
id:
|
||||
type: string
|
||||
primary:
|
||||
type: boolean
|
||||
type:
|
||||
type: string
|
||||
updatedAt:
|
||||
@@ -68,6 +61,8 @@ definitions:
|
||||
type: object
|
||||
repo.ItemAttachmentUpdate:
|
||||
properties:
|
||||
primary:
|
||||
type: boolean
|
||||
title:
|
||||
type: string
|
||||
type:
|
||||
@@ -135,6 +130,8 @@ definitions:
|
||||
type: array
|
||||
id:
|
||||
type: string
|
||||
imageId:
|
||||
type: string
|
||||
insured:
|
||||
type: boolean
|
||||
labels:
|
||||
@@ -145,7 +142,8 @@ definitions:
|
||||
description: Warranty
|
||||
type: boolean
|
||||
location:
|
||||
$ref: '#/definitions/repo.LocationSummary'
|
||||
allOf:
|
||||
- $ref: '#/definitions/repo.LocationSummary'
|
||||
description: Edges
|
||||
x-nullable: true
|
||||
x-omitempty: true
|
||||
@@ -159,7 +157,8 @@ definitions:
|
||||
description: Extras
|
||||
type: string
|
||||
parent:
|
||||
$ref: '#/definitions/repo.ItemSummary'
|
||||
allOf:
|
||||
- $ref: '#/definitions/repo.ItemSummary'
|
||||
x-nullable: true
|
||||
x-omitempty: true
|
||||
purchaseFrom:
|
||||
@@ -191,6 +190,15 @@ definitions:
|
||||
warrantyExpires:
|
||||
type: string
|
||||
type: object
|
||||
repo.ItemPatch:
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
quantity:
|
||||
type: integer
|
||||
x-nullable: true
|
||||
x-omitempty: true
|
||||
type: object
|
||||
repo.ItemSummary:
|
||||
properties:
|
||||
archived:
|
||||
@@ -201,6 +209,8 @@ definitions:
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
imageId:
|
||||
type: string
|
||||
insured:
|
||||
type: boolean
|
||||
labels:
|
||||
@@ -208,7 +218,8 @@ definitions:
|
||||
$ref: '#/definitions/repo.LabelSummary'
|
||||
type: array
|
||||
location:
|
||||
$ref: '#/definitions/repo.LocationSummary'
|
||||
allOf:
|
||||
- $ref: '#/definitions/repo.LocationSummary'
|
||||
description: Edges
|
||||
x-nullable: true
|
||||
x-omitempty: true
|
||||
@@ -227,6 +238,7 @@ definitions:
|
||||
archived:
|
||||
type: boolean
|
||||
assetId:
|
||||
example: "0"
|
||||
type: string
|
||||
description:
|
||||
type: string
|
||||
@@ -287,7 +299,6 @@ definitions:
|
||||
warrantyDetails:
|
||||
type: string
|
||||
warrantyExpires:
|
||||
description: Sold
|
||||
type: string
|
||||
type: object
|
||||
repo.LabelCreate:
|
||||
@@ -312,10 +323,6 @@ definitions:
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
items:
|
||||
items:
|
||||
$ref: '#/definitions/repo.ItemSummary'
|
||||
type: array
|
||||
name:
|
||||
type: string
|
||||
updatedAt:
|
||||
@@ -356,10 +363,6 @@ definitions:
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
items:
|
||||
items:
|
||||
$ref: '#/definitions/repo.ItemSummary'
|
||||
type: array
|
||||
name:
|
||||
type: string
|
||||
parent:
|
||||
@@ -410,7 +413,6 @@ definitions:
|
||||
repo.MaintenanceEntry:
|
||||
properties:
|
||||
completedDate:
|
||||
description: Sold
|
||||
type: string
|
||||
cost:
|
||||
example: "0"
|
||||
@@ -422,13 +424,11 @@ definitions:
|
||||
name:
|
||||
type: string
|
||||
scheduledDate:
|
||||
description: Sold
|
||||
type: string
|
||||
type: object
|
||||
repo.MaintenanceEntryCreate:
|
||||
properties:
|
||||
completedDate:
|
||||
description: Sold
|
||||
type: string
|
||||
cost:
|
||||
example: "0"
|
||||
@@ -438,7 +438,6 @@ definitions:
|
||||
name:
|
||||
type: string
|
||||
scheduledDate:
|
||||
description: Sold
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
@@ -446,7 +445,6 @@ definitions:
|
||||
repo.MaintenanceEntryUpdate:
|
||||
properties:
|
||||
completedDate:
|
||||
description: Sold
|
||||
type: string
|
||||
cost:
|
||||
example: "0"
|
||||
@@ -456,7 +454,6 @@ definitions:
|
||||
name:
|
||||
type: string
|
||||
scheduledDate:
|
||||
description: Sold
|
||||
type: string
|
||||
type: object
|
||||
repo.MaintenanceLog:
|
||||
@@ -698,12 +695,17 @@ definitions:
|
||||
properties:
|
||||
item: {}
|
||||
type: object
|
||||
validate.ErrorResponse:
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
fields:
|
||||
type: string
|
||||
type: object
|
||||
info:
|
||||
contact:
|
||||
name: Don't
|
||||
description: Track, Manage, and Organize your Things.
|
||||
license:
|
||||
name: MIT
|
||||
title: Homebox API
|
||||
version: "1.0"
|
||||
paths:
|
||||
@@ -737,6 +739,21 @@ paths:
|
||||
summary: Ensures Import Refs
|
||||
tags:
|
||||
- Actions
|
||||
/v1/actions/set-primary-photos:
|
||||
post:
|
||||
description: Sets the first photo of each item as the primary photo
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/v1.ActionAmountResult'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Set Primary Photos
|
||||
tags:
|
||||
- Actions
|
||||
/v1/actions/zero-item-time-fields:
|
||||
post:
|
||||
description: Resets all item date fields to the beginning of the day
|
||||
@@ -994,6 +1011,31 @@ paths:
|
||||
summary: Get Item
|
||||
tags:
|
||||
- Items
|
||||
patch:
|
||||
parameters:
|
||||
- description: Item ID
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: Item Data
|
||||
in: body
|
||||
name: payload
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/repo.ItemPatch'
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/repo.ItemOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Update Item
|
||||
tags:
|
||||
- Items
|
||||
put:
|
||||
parameters:
|
||||
- description: Item ID
|
||||
@@ -1052,7 +1094,7 @@ paths:
|
||||
"422":
|
||||
description: Unprocessable Entity
|
||||
schema:
|
||||
$ref: '#/definitions/mid.ErrorResponse'
|
||||
$ref: '#/definitions/validate.ErrorResponse'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Create Item Attachment
|
||||
|
||||
@@ -54,6 +54,7 @@ func main() {
|
||||
NewReReplace(` Services`, " "),
|
||||
NewReReplace(` V1`, " "),
|
||||
NewReReplace(`\?:`, ":"),
|
||||
NewReReplace(`(\w+):\s(.*null.*)`, "$1?: $2"), // make null union types optional
|
||||
NewReDate("createdAt"),
|
||||
NewReDate("updatedAt"),
|
||||
NewReDate("soldTime"),
|
||||
|
||||
@@ -3,72 +3,77 @@ module github.com/hay-kot/homebox/backend
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
ariga.io/atlas v0.10.0
|
||||
entgo.io/ent v0.11.10
|
||||
github.com/ardanlabs/conf/v3 v3.1.5
|
||||
github.com/containrrr/shoutrrr v0.7.1
|
||||
github.com/go-chi/chi/v5 v5.0.8
|
||||
github.com/go-playground/validator/v10 v10.12.0
|
||||
github.com/gocarina/gocsv v0.0.0-20230226133904-70c27cb2918a
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/schema v1.2.0
|
||||
github.com/hay-kot/safeserve v0.0.1
|
||||
github.com/mattn/go-sqlite3 v1.14.16
|
||||
ariga.io/atlas v0.15.0
|
||||
entgo.io/ent v0.12.5
|
||||
github.com/ardanlabs/conf/v3 v3.1.7
|
||||
github.com/containrrr/shoutrrr v0.8.0
|
||||
github.com/go-chi/chi/v5 v5.0.10
|
||||
github.com/go-playground/validator/v10 v10.16.0
|
||||
github.com/gocarina/gocsv v0.0.0-20230616125104-99d496ca653d
|
||||
github.com/google/uuid v1.4.0
|
||||
github.com/gorilla/schema v1.2.1
|
||||
github.com/hay-kot/httpkit v0.0.3
|
||||
github.com/mattn/go-sqlite3 v1.14.18
|
||||
github.com/olahol/melody v1.1.4
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.29.0
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/rs/zerolog v1.31.0
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/swaggo/http-swagger v1.3.4
|
||||
github.com/swaggo/swag v1.8.11
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.1
|
||||
github.com/yeqown/go-qrcode/writer/standard v1.2.1
|
||||
golang.org/x/crypto v0.7.0
|
||||
modernc.org/sqlite v1.21.0
|
||||
github.com/swaggo/swag v1.16.2
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.2
|
||||
github.com/yeqown/go-qrcode/writer/standard v1.2.2
|
||||
golang.org/x/crypto v0.15.0
|
||||
modernc.org/sqlite v1.27.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/KyleBanks/depth v1.2.1 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/fogleman/gg v1.3.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/go-openapi/inflect v0.19.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.7 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.20.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/spec v0.20.9 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.15.0 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.19.1 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/leodido/go-urn v1.2.2 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/swaggo/files v1.0.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.11.0 // indirect
|
||||
github.com/swaggo/files v1.0.1 // indirect
|
||||
github.com/yeqown/reedsolomon v1.0.0 // indirect
|
||||
github.com/zclconf/go-cty v1.12.1 // indirect
|
||||
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/tools v0.6.1-0.20230222164832-25d2519c8696 // indirect
|
||||
github.com/zclconf/go-cty v1.14.1 // indirect
|
||||
golang.org/x/image v0.14.0 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/net v0.18.0 // indirect
|
||||
golang.org/x/sys v0.14.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.15.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/uint128 v1.2.0 // indirect
|
||||
modernc.org/cc/v3 v3.40.0 // indirect
|
||||
modernc.org/ccgo/v3 v3.16.13 // indirect
|
||||
modernc.org/libc v1.22.3 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.5.0 // indirect
|
||||
lukechampine.com/uint128 v1.3.0 // indirect
|
||||
modernc.org/cc/v3 v3.41.0 // indirect
|
||||
modernc.org/ccgo/v3 v3.16.15 // indirect
|
||||
modernc.org/libc v1.34.4 // indirect
|
||||
modernc.org/mathutil v1.6.0 // indirect
|
||||
modernc.org/memory v1.7.2 // indirect
|
||||
modernc.org/opt v0.1.3 // indirect
|
||||
modernc.org/strutil v1.1.3 // indirect
|
||||
modernc.org/token v1.0.1 // indirect
|
||||
modernc.org/strutil v1.2.0 // indirect
|
||||
modernc.org/token v1.1.0 // indirect
|
||||
)
|
||||
|
||||
1322
backend/go.sum
1322
backend/go.sum
File diff suppressed because it is too large
Load Diff
@@ -6,6 +6,7 @@ import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/faker"
|
||||
@@ -13,7 +14,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
fk = faker.NewFaker()
|
||||
fk = faker.NewFaker()
|
||||
tbus = eventbus.New()
|
||||
|
||||
tCtx = Context{}
|
||||
tClient *ent.Client
|
||||
@@ -58,7 +60,7 @@ func TestMain(m *testing.M) {
|
||||
}
|
||||
|
||||
tClient = client
|
||||
tRepos = repo.New(tClient, os.TempDir()+"/homebox")
|
||||
tRepos = repo.New(tClient, tbus, os.TempDir()+"/homebox")
|
||||
tSvc = New(tRepos)
|
||||
defer client.Close()
|
||||
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
Import Ref,Location,Labels,Quantity,Name,Description,Insured,Serial Number,Mode Number,Manufacturer,Notes,Purchase From,Purchased Price,Purchased Time,Lifetime Warranty,Warranty Expires,Warranty Details,Sold To,Sold Price,Sold Time,Sold Notes
|
||||
A,Garage,IOT;Home Assistant; Z-Wave,1,Zooz Universal Relay ZEN17,Description 1,TRUE,,ZEN17,Zooz,,Amazon,39.95,10/13/2021,,10/13/2021,,,,10/13/2021,
|
||||
B,Living Room,IOT;Home Assistant; Z-Wave,1,Zooz Motion Sensor,Description 2,FALSE,,ZSE18,Zooz,,Amazon,29.95,10/15/2021,,10/15/2021,,,,10/15/2021,
|
||||
C,Office,IOT;Home Assistant; Z-Wave,1,Zooz 110v Power Switch,Description 3,TRUE,,ZEN15,Zooz,,Amazon,39.95,10/13/2021,,10/13/2021,,,,10/13/2021,
|
||||
D,Downstairs,IOT;Home Assistant; Z-Wave,1,Ecolink Z-Wave PIR Motion Sensor,Description 4,FALSE,,PIRZWAVE2.5-ECO,Ecolink,,Amazon,35.58,10/21/2020,,10/21/2020,,,,10/21/2020,
|
||||
E,Entry,IOT;Home Assistant; Z-Wave,1,Yale Security Touchscreen Deadbolt,Description 5,TRUE,,YRD226ZW2619,Yale,,Amazon,120.39,10/14/2020,,10/14/2020,,,,10/14/2020,
|
||||
F,Kitchen,IOT;Home Assistant; Z-Wave,1,Smart Rocker Light Dimmer,Description 6,FALSE,,39351,Honeywell,,Amazon,65.98,09/30/2020,,09/30/2020,,,,09/30/2020,
|
||||
|
@@ -1,7 +0,0 @@
|
||||
Import Ref Location Labels Quantity Name Description Insured Serial Number Mode Number Manufacturer Notes Purchase From Purchased Price Purchased Time Lifetime Warranty Warranty Expires Warranty Details Sold To Sold Price Sold Time Sold Notes
|
||||
A Garage IOT;Home Assistant; Z-Wave 1 Zooz Universal Relay ZEN17 Description 1 TRUE ZEN17 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021
|
||||
B Living Room IOT;Home Assistant; Z-Wave 1 Zooz Motion Sensor Description 2 FALSE ZSE18 Zooz Amazon 29.95 10/15/2021 10/15/2021 10/15/2021
|
||||
C Office IOT;Home Assistant; Z-Wave 1 Zooz 110v Power Switch Description 3 TRUE ZEN15 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021
|
||||
D Downstairs IOT;Home Assistant; Z-Wave 1 Ecolink Z-Wave PIR Motion Sensor Description 4 FALSE PIRZWAVE2.5-ECO Ecolink Amazon 35.58 10/21/2020 10/21/2020 10/21/2020
|
||||
E Entry IOT;Home Assistant; Z-Wave 1 Yale Security Touchscreen Deadbolt Description 5 TRUE YRD226ZW2619 Yale Amazon 120.39 10/14/2020 10/14/2020 10/14/2020
|
||||
F Kitchen IOT;Home Assistant; Z-Wave 1 Smart Rocker Light Dimmer Description 6 FALSE 39351 Honeywell Amazon 65.98 09/30/2020 09/30/2020 09/30/2020
|
||||
|
@@ -0,0 +1,85 @@
|
||||
// / Package eventbus provides an interface for event bus.
|
||||
package eventbus
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
type Event string
|
||||
|
||||
const (
|
||||
EventLabelMutation Event = "label.mutation"
|
||||
EventLocationMutation Event = "location.mutation"
|
||||
EventItemMutation Event = "item.mutation"
|
||||
)
|
||||
|
||||
type GroupMutationEvent struct {
|
||||
GID uuid.UUID
|
||||
}
|
||||
|
||||
type eventData struct {
|
||||
event Event
|
||||
data any
|
||||
}
|
||||
|
||||
type EventBus struct {
|
||||
started bool
|
||||
ch chan eventData
|
||||
|
||||
mu sync.RWMutex
|
||||
subscribers map[Event][]func(any)
|
||||
}
|
||||
|
||||
func New() *EventBus {
|
||||
return &EventBus{
|
||||
ch: make(chan eventData, 10),
|
||||
subscribers: map[Event][]func(any){
|
||||
EventLabelMutation: {},
|
||||
EventLocationMutation: {},
|
||||
EventItemMutation: {},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventBus) Run() {
|
||||
if e.started {
|
||||
panic("event bus already started")
|
||||
}
|
||||
|
||||
e.started = true
|
||||
|
||||
for event := range e.ch {
|
||||
e.mu.RLock()
|
||||
arr, ok := e.subscribers[event.event]
|
||||
e.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, fn := range arr {
|
||||
fn(event.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventBus) Publish(event Event, data any) {
|
||||
e.ch <- eventData{
|
||||
event: event,
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventBus) Subscribe(event Event, fn func(any)) {
|
||||
e.mu.Lock()
|
||||
defer e.mu.Unlock()
|
||||
|
||||
arr, ok := e.subscribers[event]
|
||||
if !ok {
|
||||
panic("event not found")
|
||||
}
|
||||
|
||||
e.subscribers[event] = append(arr, fn)
|
||||
}
|
||||
@@ -83,3 +83,13 @@ func parseLocationString(s string) LocationString {
|
||||
func (csf LocationString) String() string {
|
||||
return strings.Join(csf, " / ")
|
||||
}
|
||||
|
||||
func fromPathSlice(s []repo.LocationPath) LocationString {
|
||||
v := make(LocationString, len(s))
|
||||
|
||||
for i := range s {
|
||||
v[i] = s[i].Name
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package reporting
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
@@ -8,6 +9,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
@@ -151,7 +153,7 @@ func (s *IOSheet) Read(data io.Reader) error {
|
||||
}
|
||||
|
||||
// Write writes the sheet to a writer.
|
||||
func (s *IOSheet) ReadItems(items []repo.ItemOut) {
|
||||
func (s *IOSheet) ReadItems(ctx context.Context, items []repo.ItemOut, GID uuid.UUID, repos *repo.AllRepos) error {
|
||||
s.Rows = make([]ExportTSVRow, len(items))
|
||||
|
||||
extraHeaders := map[string]struct{}{}
|
||||
@@ -160,7 +162,15 @@ func (s *IOSheet) ReadItems(items []repo.ItemOut) {
|
||||
item := items[i]
|
||||
|
||||
// TODO: Support fetching nested locations
|
||||
locString := LocationString{item.Location.Name}
|
||||
locId := item.Location.ID
|
||||
|
||||
locPaths, err := repos.Locations.PathForLoc(context.Background(), GID, locId)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("could not get location path")
|
||||
return err
|
||||
}
|
||||
|
||||
locString := fromPathSlice(locPaths)
|
||||
|
||||
labelString := make([]string, len(item.Labels))
|
||||
|
||||
@@ -238,6 +248,8 @@ func (s *IOSheet) ReadItems(items []repo.ItemOut) {
|
||||
for _, h := range customHeaders {
|
||||
s.headers = append(s.headers, "HB.field."+h)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Writes the current sheet to a writer in TSV format.
|
||||
|
||||
@@ -20,12 +20,6 @@ var (
|
||||
|
||||
//go:embed .testdata/import/types.csv
|
||||
customTypesImportCSV []byte
|
||||
|
||||
//go:embed .testdata/import.csv
|
||||
CSVData_Comma []byte
|
||||
|
||||
//go:embed .testdata/import.tsv
|
||||
CSVData_Tab []byte
|
||||
)
|
||||
|
||||
func TestSheet_Read(t *testing.T) {
|
||||
@@ -189,7 +183,7 @@ func Test_determineSeparator(t *testing.T) {
|
||||
{
|
||||
name: "comma",
|
||||
args: args{
|
||||
data: CSVData_Comma,
|
||||
data: []byte("a,b,c"),
|
||||
},
|
||||
want: ',',
|
||||
wantErr: false,
|
||||
@@ -197,7 +191,7 @@ func Test_determineSeparator(t *testing.T) {
|
||||
{
|
||||
name: "tab",
|
||||
args: args{
|
||||
data: CSVData_Tab,
|
||||
data: []byte("a\tb\tc"),
|
||||
},
|
||||
want: '\t',
|
||||
wantErr: false,
|
||||
|
||||
@@ -337,7 +337,10 @@ func (svc *ItemService) ExportTSV(ctx context.Context, GID uuid.UUID) ([][]strin
|
||||
|
||||
sheet := reporting.IOSheet{}
|
||||
|
||||
sheet.ReadItems(items)
|
||||
err = sheet.ReadItems(ctx, items, GID, svc.repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sheet.TSV()
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ func (svc *ItemService) AttachmentPath(ctx context.Context, attachmentId uuid.UU
|
||||
|
||||
func (svc *ItemService) AttachmentUpdate(ctx Context, itemId uuid.UUID, data *repo.ItemAttachmentUpdate) (repo.ItemOut, error) {
|
||||
// Update Attachment
|
||||
attachment, err := svc.repo.Attachments.Update(ctx, data.ID, attachment.Type(data.Type))
|
||||
attachment, err := svc.repo.Attachments.Update(ctx, data.ID, data)
|
||||
if err != nil {
|
||||
return repo.ItemOut{}, err
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
|
||||
@@ -25,11 +26,14 @@ type Attachment struct {
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// Type holds the value of the "type" field.
|
||||
Type attachment.Type `json:"type,omitempty"`
|
||||
// Primary holds the value of the "primary" field.
|
||||
Primary bool `json:"primary,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the AttachmentQuery when eager-loading is set.
|
||||
Edges AttachmentEdges `json:"edges"`
|
||||
document_attachments *uuid.UUID
|
||||
item_attachments *uuid.UUID
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// AttachmentEdges holds the relations/edges for other nodes in the graph.
|
||||
@@ -74,6 +78,8 @@ func (*Attachment) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case attachment.FieldPrimary:
|
||||
values[i] = new(sql.NullBool)
|
||||
case attachment.FieldType:
|
||||
values[i] = new(sql.NullString)
|
||||
case attachment.FieldCreatedAt, attachment.FieldUpdatedAt:
|
||||
@@ -85,7 +91,7 @@ func (*Attachment) scanValues(columns []string) ([]any, error) {
|
||||
case attachment.ForeignKeys[1]: // item_attachments
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Attachment", columns[i])
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
@@ -123,6 +129,12 @@ func (a *Attachment) assignValues(columns []string, values []any) error {
|
||||
} else if value.Valid {
|
||||
a.Type = attachment.Type(value.String)
|
||||
}
|
||||
case attachment.FieldPrimary:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field primary", values[i])
|
||||
} else if value.Valid {
|
||||
a.Primary = value.Bool
|
||||
}
|
||||
case attachment.ForeignKeys[0]:
|
||||
if value, ok := values[i].(*sql.NullScanner); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field document_attachments", values[i])
|
||||
@@ -137,11 +149,19 @@ func (a *Attachment) assignValues(columns []string, values []any) error {
|
||||
a.item_attachments = new(uuid.UUID)
|
||||
*a.item_attachments = *value.S.(*uuid.UUID)
|
||||
}
|
||||
default:
|
||||
a.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the Attachment.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (a *Attachment) Value(name string) (ent.Value, error) {
|
||||
return a.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryItem queries the "item" edge of the Attachment entity.
|
||||
func (a *Attachment) QueryItem() *ItemQuery {
|
||||
return NewAttachmentClient(a.config).QueryItem(a)
|
||||
@@ -183,6 +203,9 @@ func (a *Attachment) String() string {
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("type=")
|
||||
builder.WriteString(fmt.Sprintf("%v", a.Type))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("primary=")
|
||||
builder.WriteString(fmt.Sprintf("%v", a.Primary))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
@@ -20,6 +22,8 @@ const (
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldType holds the string denoting the type field in the database.
|
||||
FieldType = "type"
|
||||
// FieldPrimary holds the string denoting the primary field in the database.
|
||||
FieldPrimary = "primary"
|
||||
// EdgeItem holds the string denoting the item edge name in mutations.
|
||||
EdgeItem = "item"
|
||||
// EdgeDocument holds the string denoting the document edge name in mutations.
|
||||
@@ -48,6 +52,7 @@ var Columns = []string{
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldType,
|
||||
FieldPrimary,
|
||||
}
|
||||
|
||||
// ForeignKeys holds the SQL foreign-keys that are owned by the "attachments"
|
||||
@@ -79,6 +84,8 @@ var (
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// DefaultPrimary holds the default value on creation for the "primary" field.
|
||||
DefaultPrimary bool
|
||||
// DefaultID holds the default value on creation for the "id" field.
|
||||
DefaultID func() uuid.UUID
|
||||
)
|
||||
@@ -111,3 +118,59 @@ func TypeValidator(_type Type) error {
|
||||
return fmt.Errorf("attachment: invalid enum value for type field: %q", _type)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the Attachment queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByType orders the results by the type field.
|
||||
func ByType(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldType, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPrimary orders the results by the primary field.
|
||||
func ByPrimary(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPrimary, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByItemField orders the results by item field.
|
||||
func ByItemField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newItemStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByDocumentField orders the results by document field.
|
||||
func ByDocumentField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newDocumentStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newItemStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(ItemInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
|
||||
)
|
||||
}
|
||||
func newDocumentStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(DocumentInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -66,6 +66,11 @@ func UpdatedAt(v time.Time) predicate.Attachment {
|
||||
return predicate.Attachment(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// Primary applies equality check predicate on the "primary" field. It's identical to PrimaryEQ.
|
||||
func Primary(v bool) predicate.Attachment {
|
||||
return predicate.Attachment(sql.FieldEQ(FieldPrimary, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.Attachment {
|
||||
return predicate.Attachment(sql.FieldEQ(FieldCreatedAt, v))
|
||||
@@ -166,6 +171,16 @@ func TypeNotIn(vs ...Type) predicate.Attachment {
|
||||
return predicate.Attachment(sql.FieldNotIn(FieldType, vs...))
|
||||
}
|
||||
|
||||
// PrimaryEQ applies the EQ predicate on the "primary" field.
|
||||
func PrimaryEQ(v bool) predicate.Attachment {
|
||||
return predicate.Attachment(sql.FieldEQ(FieldPrimary, v))
|
||||
}
|
||||
|
||||
// PrimaryNEQ applies the NEQ predicate on the "primary" field.
|
||||
func PrimaryNEQ(v bool) predicate.Attachment {
|
||||
return predicate.Attachment(sql.FieldNEQ(FieldPrimary, v))
|
||||
}
|
||||
|
||||
// HasItem applies the HasEdge predicate on the "item" edge.
|
||||
func HasItem() predicate.Attachment {
|
||||
return predicate.Attachment(func(s *sql.Selector) {
|
||||
@@ -180,11 +195,7 @@ func HasItem() predicate.Attachment {
|
||||
// HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates).
|
||||
func HasItemWith(preds ...predicate.Item) predicate.Attachment {
|
||||
return predicate.Attachment(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(ItemInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
|
||||
)
|
||||
step := newItemStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -207,11 +218,7 @@ func HasDocument() predicate.Attachment {
|
||||
// HasDocumentWith applies the HasEdge predicate on the "document" edge with a given conditions (other predicates).
|
||||
func HasDocumentWith(preds ...predicate.Document) predicate.Attachment {
|
||||
return predicate.Attachment(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(DocumentInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
|
||||
)
|
||||
step := newDocumentStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -222,32 +229,15 @@ func HasDocumentWith(preds ...predicate.Document) predicate.Attachment {
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.Attachment) predicate.Attachment {
|
||||
return predicate.Attachment(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for _, p := range predicates {
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.Attachment(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.Attachment) predicate.Attachment {
|
||||
return predicate.Attachment(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for i, p := range predicates {
|
||||
if i > 0 {
|
||||
s1.Or()
|
||||
}
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.Attachment(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.Attachment) predicate.Attachment {
|
||||
return predicate.Attachment(func(s *sql.Selector) {
|
||||
p(s.Not())
|
||||
})
|
||||
return predicate.Attachment(sql.NotPredicates(p))
|
||||
}
|
||||
|
||||
@@ -65,6 +65,20 @@ func (ac *AttachmentCreate) SetNillableType(a *attachment.Type) *AttachmentCreat
|
||||
return ac
|
||||
}
|
||||
|
||||
// SetPrimary sets the "primary" field.
|
||||
func (ac *AttachmentCreate) SetPrimary(b bool) *AttachmentCreate {
|
||||
ac.mutation.SetPrimary(b)
|
||||
return ac
|
||||
}
|
||||
|
||||
// SetNillablePrimary sets the "primary" field if the given value is not nil.
|
||||
func (ac *AttachmentCreate) SetNillablePrimary(b *bool) *AttachmentCreate {
|
||||
if b != nil {
|
||||
ac.SetPrimary(*b)
|
||||
}
|
||||
return ac
|
||||
}
|
||||
|
||||
// SetID sets the "id" field.
|
||||
func (ac *AttachmentCreate) SetID(u uuid.UUID) *AttachmentCreate {
|
||||
ac.mutation.SetID(u)
|
||||
@@ -109,7 +123,7 @@ func (ac *AttachmentCreate) Mutation() *AttachmentMutation {
|
||||
// Save creates the Attachment in the database.
|
||||
func (ac *AttachmentCreate) Save(ctx context.Context) (*Attachment, error) {
|
||||
ac.defaults()
|
||||
return withHooks[*Attachment, AttachmentMutation](ctx, ac.sqlSave, ac.mutation, ac.hooks)
|
||||
return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
@@ -148,6 +162,10 @@ func (ac *AttachmentCreate) defaults() {
|
||||
v := attachment.DefaultType
|
||||
ac.mutation.SetType(v)
|
||||
}
|
||||
if _, ok := ac.mutation.Primary(); !ok {
|
||||
v := attachment.DefaultPrimary
|
||||
ac.mutation.SetPrimary(v)
|
||||
}
|
||||
if _, ok := ac.mutation.ID(); !ok {
|
||||
v := attachment.DefaultID()
|
||||
ac.mutation.SetID(v)
|
||||
@@ -170,6 +188,9 @@ func (ac *AttachmentCreate) check() error {
|
||||
return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attachment.type": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := ac.mutation.Primary(); !ok {
|
||||
return &ValidationError{Name: "primary", err: errors.New(`ent: missing required field "Attachment.primary"`)}
|
||||
}
|
||||
if _, ok := ac.mutation.ItemID(); !ok {
|
||||
return &ValidationError{Name: "item", err: errors.New(`ent: missing required edge "Attachment.item"`)}
|
||||
}
|
||||
@@ -223,6 +244,10 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(attachment.FieldType, field.TypeEnum, value)
|
||||
_node.Type = value
|
||||
}
|
||||
if value, ok := ac.mutation.Primary(); ok {
|
||||
_spec.SetField(attachment.FieldPrimary, field.TypeBool, value)
|
||||
_node.Primary = value
|
||||
}
|
||||
if nodes := ac.mutation.ItemIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -263,11 +288,15 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
|
||||
// AttachmentCreateBulk is the builder for creating many Attachment entities in bulk.
|
||||
type AttachmentCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*AttachmentCreate
|
||||
}
|
||||
|
||||
// Save creates the Attachment entities in the database.
|
||||
func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error) {
|
||||
if acb.err != nil {
|
||||
return nil, acb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(acb.builders))
|
||||
nodes := make([]*Attachment, len(acb.builders))
|
||||
mutators := make([]Mutator, len(acb.builders))
|
||||
@@ -284,8 +313,8 @@ func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation)
|
||||
} else {
|
||||
|
||||
@@ -27,7 +27,7 @@ func (ad *AttachmentDelete) Where(ps ...predicate.Attachment) *AttachmentDelete
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (ad *AttachmentDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks[int, AttachmentMutation](ctx, ad.sqlExec, ad.mutation, ad.hooks)
|
||||
return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
type AttachmentQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []OrderFunc
|
||||
order []attachment.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.Attachment
|
||||
withItem *ItemQuery
|
||||
@@ -58,7 +58,7 @@ func (aq *AttachmentQuery) Unique(unique bool) *AttachmentQuery {
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (aq *AttachmentQuery) Order(o ...OrderFunc) *AttachmentQuery {
|
||||
func (aq *AttachmentQuery) Order(o ...attachment.OrderOption) *AttachmentQuery {
|
||||
aq.order = append(aq.order, o...)
|
||||
return aq
|
||||
}
|
||||
@@ -296,7 +296,7 @@ func (aq *AttachmentQuery) Clone() *AttachmentQuery {
|
||||
return &AttachmentQuery{
|
||||
config: aq.config,
|
||||
ctx: aq.ctx.Clone(),
|
||||
order: append([]OrderFunc{}, aq.order...),
|
||||
order: append([]attachment.OrderOption{}, aq.order...),
|
||||
inters: append([]Interceptor{}, aq.inters...),
|
||||
predicates: append([]predicate.Attachment{}, aq.predicates...),
|
||||
withItem: aq.withItem.Clone(),
|
||||
|
||||
@@ -51,6 +51,20 @@ func (au *AttachmentUpdate) SetNillableType(a *attachment.Type) *AttachmentUpdat
|
||||
return au
|
||||
}
|
||||
|
||||
// SetPrimary sets the "primary" field.
|
||||
func (au *AttachmentUpdate) SetPrimary(b bool) *AttachmentUpdate {
|
||||
au.mutation.SetPrimary(b)
|
||||
return au
|
||||
}
|
||||
|
||||
// SetNillablePrimary sets the "primary" field if the given value is not nil.
|
||||
func (au *AttachmentUpdate) SetNillablePrimary(b *bool) *AttachmentUpdate {
|
||||
if b != nil {
|
||||
au.SetPrimary(*b)
|
||||
}
|
||||
return au
|
||||
}
|
||||
|
||||
// SetItemID sets the "item" edge to the Item entity by ID.
|
||||
func (au *AttachmentUpdate) SetItemID(id uuid.UUID) *AttachmentUpdate {
|
||||
au.mutation.SetItemID(id)
|
||||
@@ -93,7 +107,7 @@ func (au *AttachmentUpdate) ClearDocument() *AttachmentUpdate {
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (au *AttachmentUpdate) Save(ctx context.Context) (int, error) {
|
||||
au.defaults()
|
||||
return withHooks[int, AttachmentMutation](ctx, au.sqlSave, au.mutation, au.hooks)
|
||||
return withHooks(ctx, au.sqlSave, au.mutation, au.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
@@ -160,6 +174,9 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
if value, ok := au.mutation.GetType(); ok {
|
||||
_spec.SetField(attachment.FieldType, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := au.mutation.Primary(); ok {
|
||||
_spec.SetField(attachment.FieldPrimary, field.TypeBool, value)
|
||||
}
|
||||
if au.mutation.ItemCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -258,6 +275,20 @@ func (auo *AttachmentUpdateOne) SetNillableType(a *attachment.Type) *AttachmentU
|
||||
return auo
|
||||
}
|
||||
|
||||
// SetPrimary sets the "primary" field.
|
||||
func (auo *AttachmentUpdateOne) SetPrimary(b bool) *AttachmentUpdateOne {
|
||||
auo.mutation.SetPrimary(b)
|
||||
return auo
|
||||
}
|
||||
|
||||
// SetNillablePrimary sets the "primary" field if the given value is not nil.
|
||||
func (auo *AttachmentUpdateOne) SetNillablePrimary(b *bool) *AttachmentUpdateOne {
|
||||
if b != nil {
|
||||
auo.SetPrimary(*b)
|
||||
}
|
||||
return auo
|
||||
}
|
||||
|
||||
// SetItemID sets the "item" edge to the Item entity by ID.
|
||||
func (auo *AttachmentUpdateOne) SetItemID(id uuid.UUID) *AttachmentUpdateOne {
|
||||
auo.mutation.SetItemID(id)
|
||||
@@ -313,7 +344,7 @@ func (auo *AttachmentUpdateOne) Select(field string, fields ...string) *Attachme
|
||||
// Save executes the query and returns the updated Attachment entity.
|
||||
func (auo *AttachmentUpdateOne) Save(ctx context.Context) (*Attachment, error) {
|
||||
auo.defaults()
|
||||
return withHooks[*Attachment, AttachmentMutation](ctx, auo.sqlSave, auo.mutation, auo.hooks)
|
||||
return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
@@ -397,6 +428,9 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
|
||||
if value, ok := auo.mutation.GetType(); ok {
|
||||
_spec.SetField(attachment.FieldType, field.TypeEnum, value)
|
||||
}
|
||||
if value, ok := auo.mutation.Primary(); ok {
|
||||
_spec.SetField(attachment.FieldPrimary, field.TypeBool, value)
|
||||
}
|
||||
if auo.mutation.ItemCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
|
||||
@@ -23,6 +24,7 @@ type AuthRoles struct {
|
||||
// The values are being populated by the AuthRolesQuery when eager-loading is set.
|
||||
Edges AuthRolesEdges `json:"edges"`
|
||||
auth_tokens_roles *uuid.UUID
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// AuthRolesEdges holds the relations/edges for other nodes in the graph.
|
||||
@@ -59,7 +61,7 @@ func (*AuthRoles) scanValues(columns []string) ([]any, error) {
|
||||
case authroles.ForeignKeys[0]: // auth_tokens_roles
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type AuthRoles", columns[i])
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
@@ -92,11 +94,19 @@ func (ar *AuthRoles) assignValues(columns []string, values []any) error {
|
||||
ar.auth_tokens_roles = new(uuid.UUID)
|
||||
*ar.auth_tokens_roles = *value.S.(*uuid.UUID)
|
||||
}
|
||||
default:
|
||||
ar.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the AuthRoles.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (ar *AuthRoles) Value(name string) (ent.Value, error) {
|
||||
return ar.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryToken queries the "token" edge of the AuthRoles entity.
|
||||
func (ar *AuthRoles) QueryToken() *AuthTokensQuery {
|
||||
return NewAuthRolesClient(ar.config).QueryToken(ar)
|
||||
|
||||
@@ -4,6 +4,9 @@ package authroles
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -79,3 +82,30 @@ func RoleValidator(r Role) error {
|
||||
return fmt.Errorf("authroles: invalid enum value for role field: %q", r)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the AuthRoles queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByRole orders the results by the role field.
|
||||
func ByRole(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldRole, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTokenField orders the results by token field.
|
||||
func ByTokenField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newTokenStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newTokenStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(TokenInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2O, true, TokenTable, TokenColumn),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -87,11 +87,7 @@ func HasToken() predicate.AuthRoles {
|
||||
// HasTokenWith applies the HasEdge predicate on the "token" edge with a given conditions (other predicates).
|
||||
func HasTokenWith(preds ...predicate.AuthTokens) predicate.AuthRoles {
|
||||
return predicate.AuthRoles(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(TokenInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2O, true, TokenTable, TokenColumn),
|
||||
)
|
||||
step := newTokenStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -102,32 +98,15 @@ func HasTokenWith(preds ...predicate.AuthTokens) predicate.AuthRoles {
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.AuthRoles) predicate.AuthRoles {
|
||||
return predicate.AuthRoles(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for _, p := range predicates {
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.AuthRoles(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.AuthRoles) predicate.AuthRoles {
|
||||
return predicate.AuthRoles(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for i, p := range predicates {
|
||||
if i > 0 {
|
||||
s1.Or()
|
||||
}
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.AuthRoles(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.AuthRoles) predicate.AuthRoles {
|
||||
return predicate.AuthRoles(func(s *sql.Selector) {
|
||||
p(s.Not())
|
||||
})
|
||||
return predicate.AuthRoles(sql.NotPredicates(p))
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func (arc *AuthRolesCreate) Mutation() *AuthRolesMutation {
|
||||
// Save creates the AuthRoles in the database.
|
||||
func (arc *AuthRolesCreate) Save(ctx context.Context) (*AuthRoles, error) {
|
||||
arc.defaults()
|
||||
return withHooks[*AuthRoles, AuthRolesMutation](ctx, arc.sqlSave, arc.mutation, arc.hooks)
|
||||
return withHooks(ctx, arc.sqlSave, arc.mutation, arc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
@@ -158,11 +158,15 @@ func (arc *AuthRolesCreate) createSpec() (*AuthRoles, *sqlgraph.CreateSpec) {
|
||||
// AuthRolesCreateBulk is the builder for creating many AuthRoles entities in bulk.
|
||||
type AuthRolesCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*AuthRolesCreate
|
||||
}
|
||||
|
||||
// Save creates the AuthRoles entities in the database.
|
||||
func (arcb *AuthRolesCreateBulk) Save(ctx context.Context) ([]*AuthRoles, error) {
|
||||
if arcb.err != nil {
|
||||
return nil, arcb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(arcb.builders))
|
||||
nodes := make([]*AuthRoles, len(arcb.builders))
|
||||
mutators := make([]Mutator, len(arcb.builders))
|
||||
@@ -179,8 +183,8 @@ func (arcb *AuthRolesCreateBulk) Save(ctx context.Context) ([]*AuthRoles, error)
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, arcb.builders[i+1].mutation)
|
||||
} else {
|
||||
|
||||
@@ -27,7 +27,7 @@ func (ard *AuthRolesDelete) Where(ps ...predicate.AuthRoles) *AuthRolesDelete {
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (ard *AuthRolesDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks[int, AuthRolesMutation](ctx, ard.sqlExec, ard.mutation, ard.hooks)
|
||||
return withHooks(ctx, ard.sqlExec, ard.mutation, ard.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
type AuthRolesQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []OrderFunc
|
||||
order []authroles.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.AuthRoles
|
||||
withToken *AuthTokensQuery
|
||||
@@ -56,7 +56,7 @@ func (arq *AuthRolesQuery) Unique(unique bool) *AuthRolesQuery {
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (arq *AuthRolesQuery) Order(o ...OrderFunc) *AuthRolesQuery {
|
||||
func (arq *AuthRolesQuery) Order(o ...authroles.OrderOption) *AuthRolesQuery {
|
||||
arq.order = append(arq.order, o...)
|
||||
return arq
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func (arq *AuthRolesQuery) Clone() *AuthRolesQuery {
|
||||
return &AuthRolesQuery{
|
||||
config: arq.config,
|
||||
ctx: arq.ctx.Clone(),
|
||||
order: append([]OrderFunc{}, arq.order...),
|
||||
order: append([]authroles.OrderOption{}, arq.order...),
|
||||
inters: append([]Interceptor{}, arq.inters...),
|
||||
predicates: append([]predicate.AuthRoles{}, arq.predicates...),
|
||||
withToken: arq.withToken.Clone(),
|
||||
|
||||
@@ -75,7 +75,7 @@ func (aru *AuthRolesUpdate) ClearToken() *AuthRolesUpdate {
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (aru *AuthRolesUpdate) Save(ctx context.Context) (int, error) {
|
||||
return withHooks[int, AuthRolesMutation](ctx, aru.sqlSave, aru.mutation, aru.hooks)
|
||||
return withHooks(ctx, aru.sqlSave, aru.mutation, aru.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
@@ -233,7 +233,7 @@ func (aruo *AuthRolesUpdateOne) Select(field string, fields ...string) *AuthRole
|
||||
|
||||
// Save executes the query and returns the updated AuthRoles entity.
|
||||
func (aruo *AuthRolesUpdateOne) Save(ctx context.Context) (*AuthRoles, error) {
|
||||
return withHooks[*AuthRoles, AuthRolesMutation](ctx, aruo.sqlSave, aruo.mutation, aruo.hooks)
|
||||
return withHooks(ctx, aruo.sqlSave, aruo.mutation, aruo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
|
||||
@@ -31,6 +32,7 @@ type AuthTokens struct {
|
||||
// The values are being populated by the AuthTokensQuery when eager-loading is set.
|
||||
Edges AuthTokensEdges `json:"edges"`
|
||||
user_auth_tokens *uuid.UUID
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// AuthTokensEdges holds the relations/edges for other nodes in the graph.
|
||||
@@ -84,7 +86,7 @@ func (*AuthTokens) scanValues(columns []string) ([]any, error) {
|
||||
case authtokens.ForeignKeys[0]: // user_auth_tokens
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type AuthTokens", columns[i])
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
@@ -135,11 +137,19 @@ func (at *AuthTokens) assignValues(columns []string, values []any) error {
|
||||
at.user_auth_tokens = new(uuid.UUID)
|
||||
*at.user_auth_tokens = *value.S.(*uuid.UUID)
|
||||
}
|
||||
default:
|
||||
at.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the AuthTokens.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (at *AuthTokens) Value(name string) (ent.Value, error) {
|
||||
return at.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryUser queries the "user" edge of the AuthTokens entity.
|
||||
func (at *AuthTokens) QueryUser() *UserQuery {
|
||||
return NewAuthTokensClient(at.config).QueryUser(at)
|
||||
|
||||
@@ -5,6 +5,8 @@ package authtokens
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
@@ -85,3 +87,54 @@ var (
|
||||
// DefaultID holds the default value on creation for the "id" field.
|
||||
DefaultID func() uuid.UUID
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the AuthTokens queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByExpiresAt orders the results by the expires_at field.
|
||||
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUserField orders the results by user field.
|
||||
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByRolesField orders the results by roles field.
|
||||
func ByRolesField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newRolesStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newUserStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UserInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
}
|
||||
func newRolesStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(RolesInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2O, false, RolesTable, RolesColumn),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -250,11 +250,7 @@ func HasUser() predicate.AuthTokens {
|
||||
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||
func HasUserWith(preds ...predicate.User) predicate.AuthTokens {
|
||||
return predicate.AuthTokens(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UserInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
step := newUserStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -277,11 +273,7 @@ func HasRoles() predicate.AuthTokens {
|
||||
// HasRolesWith applies the HasEdge predicate on the "roles" edge with a given conditions (other predicates).
|
||||
func HasRolesWith(preds ...predicate.AuthRoles) predicate.AuthTokens {
|
||||
return predicate.AuthTokens(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(RolesInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2O, false, RolesTable, RolesColumn),
|
||||
)
|
||||
step := newRolesStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -292,32 +284,15 @@ func HasRolesWith(preds ...predicate.AuthRoles) predicate.AuthTokens {
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.AuthTokens) predicate.AuthTokens {
|
||||
return predicate.AuthTokens(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for _, p := range predicates {
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.AuthTokens(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.AuthTokens) predicate.AuthTokens {
|
||||
return predicate.AuthTokens(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for i, p := range predicates {
|
||||
if i > 0 {
|
||||
s1.Or()
|
||||
}
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.AuthTokens(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.AuthTokens) predicate.AuthTokens {
|
||||
return predicate.AuthTokens(func(s *sql.Selector) {
|
||||
p(s.Not())
|
||||
})
|
||||
return predicate.AuthTokens(sql.NotPredicates(p))
|
||||
}
|
||||
|
||||
@@ -131,7 +131,7 @@ func (atc *AuthTokensCreate) Mutation() *AuthTokensMutation {
|
||||
// Save creates the AuthTokens in the database.
|
||||
func (atc *AuthTokensCreate) Save(ctx context.Context) (*AuthTokens, error) {
|
||||
atc.defaults()
|
||||
return withHooks[*AuthTokens, AuthTokensMutation](ctx, atc.sqlSave, atc.mutation, atc.hooks)
|
||||
return withHooks(ctx, atc.sqlSave, atc.mutation, atc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
@@ -280,11 +280,15 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) {
|
||||
// AuthTokensCreateBulk is the builder for creating many AuthTokens entities in bulk.
|
||||
type AuthTokensCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*AuthTokensCreate
|
||||
}
|
||||
|
||||
// Save creates the AuthTokens entities in the database.
|
||||
func (atcb *AuthTokensCreateBulk) Save(ctx context.Context) ([]*AuthTokens, error) {
|
||||
if atcb.err != nil {
|
||||
return nil, atcb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(atcb.builders))
|
||||
nodes := make([]*AuthTokens, len(atcb.builders))
|
||||
mutators := make([]Mutator, len(atcb.builders))
|
||||
@@ -301,8 +305,8 @@ func (atcb *AuthTokensCreateBulk) Save(ctx context.Context) ([]*AuthTokens, erro
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, atcb.builders[i+1].mutation)
|
||||
} else {
|
||||
|
||||
@@ -27,7 +27,7 @@ func (atd *AuthTokensDelete) Where(ps ...predicate.AuthTokens) *AuthTokensDelete
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (atd *AuthTokensDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks[int, AuthTokensMutation](ctx, atd.sqlExec, atd.mutation, atd.hooks)
|
||||
return withHooks(ctx, atd.sqlExec, atd.mutation, atd.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
type AuthTokensQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []OrderFunc
|
||||
order []authtokens.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.AuthTokens
|
||||
withUser *UserQuery
|
||||
@@ -59,7 +59,7 @@ func (atq *AuthTokensQuery) Unique(unique bool) *AuthTokensQuery {
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (atq *AuthTokensQuery) Order(o ...OrderFunc) *AuthTokensQuery {
|
||||
func (atq *AuthTokensQuery) Order(o ...authtokens.OrderOption) *AuthTokensQuery {
|
||||
atq.order = append(atq.order, o...)
|
||||
return atq
|
||||
}
|
||||
@@ -297,7 +297,7 @@ func (atq *AuthTokensQuery) Clone() *AuthTokensQuery {
|
||||
return &AuthTokensQuery{
|
||||
config: atq.config,
|
||||
ctx: atq.ctx.Clone(),
|
||||
order: append([]OrderFunc{}, atq.order...),
|
||||
order: append([]authtokens.OrderOption{}, atq.order...),
|
||||
inters: append([]Interceptor{}, atq.inters...),
|
||||
predicates: append([]predicate.AuthTokens{}, atq.predicates...),
|
||||
withUser: atq.withUser.Clone(),
|
||||
@@ -494,7 +494,7 @@ func (atq *AuthTokensQuery) loadRoles(ctx context.Context, query *AuthRolesQuery
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.AuthRoles(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(authtokens.RolesColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(authtokens.RolesColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -507,7 +507,7 @@ func (atq *AuthTokensQuery) loadRoles(ctx context.Context, query *AuthRolesQuery
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "auth_tokens_roles" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "auth_tokens_roles" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
|
||||
@@ -115,7 +115,7 @@ func (atu *AuthTokensUpdate) ClearRoles() *AuthTokensUpdate {
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (atu *AuthTokensUpdate) Save(ctx context.Context) (int, error) {
|
||||
atu.defaults()
|
||||
return withHooks[int, AuthTokensMutation](ctx, atu.sqlSave, atu.mutation, atu.hooks)
|
||||
return withHooks(ctx, atu.sqlSave, atu.mutation, atu.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
@@ -341,7 +341,7 @@ func (atuo *AuthTokensUpdateOne) Select(field string, fields ...string) *AuthTok
|
||||
// Save executes the query and returns the updated AuthTokens entity.
|
||||
func (atuo *AuthTokensUpdateOne) Save(ctx context.Context) (*AuthTokens, error) {
|
||||
atuo.defaults()
|
||||
return withHooks[*AuthTokens, AuthTokensMutation](ctx, atuo.sqlSave, atuo.mutation, atuo.hooks)
|
||||
return withHooks(ctx, atuo.sqlSave, atuo.mutation, atuo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/migrate"
|
||||
@@ -65,9 +66,7 @@ type Client struct {
|
||||
|
||||
// NewClient creates a new client configured with the given options.
|
||||
func NewClient(opts ...Option) *Client {
|
||||
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
|
||||
cfg.options(opts...)
|
||||
client := &Client{config: cfg}
|
||||
client := &Client{config: newConfig(opts...)}
|
||||
client.init()
|
||||
return client
|
||||
}
|
||||
@@ -107,6 +106,13 @@ type (
|
||||
Option func(*config)
|
||||
)
|
||||
|
||||
// newConfig creates a new config for the client.
|
||||
func newConfig(opts ...Option) config {
|
||||
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
|
||||
cfg.options(opts...)
|
||||
return cfg
|
||||
}
|
||||
|
||||
// options applies the options on the config object.
|
||||
func (c *config) options(opts ...Option) {
|
||||
for _, opt := range opts {
|
||||
@@ -154,11 +160,14 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error)
|
||||
}
|
||||
}
|
||||
|
||||
// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
|
||||
var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
|
||||
|
||||
// Tx returns a new transactional client. The provided context
|
||||
// is used until the transaction is committed or rolled back.
|
||||
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
||||
if _, ok := c.driver.(*txDriver); ok {
|
||||
return nil, errors.New("ent: cannot start a transaction within a transaction")
|
||||
return nil, ErrTxStarted
|
||||
}
|
||||
tx, err := newTx(ctx, c.driver)
|
||||
if err != nil {
|
||||
@@ -330,6 +339,21 @@ func (c *AttachmentClient) CreateBulk(builders ...*AttachmentCreate) *Attachment
|
||||
return &AttachmentCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *AttachmentClient) MapCreateBulk(slice any, setFunc func(*AttachmentCreate, int)) *AttachmentCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &AttachmentCreateBulk{err: fmt.Errorf("calling to AttachmentClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*AttachmentCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &AttachmentCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Attachment.
|
||||
func (c *AttachmentClient) Update() *AttachmentUpdate {
|
||||
mutation := newAttachmentMutation(c.config, OpUpdate)
|
||||
@@ -480,6 +504,21 @@ func (c *AuthRolesClient) CreateBulk(builders ...*AuthRolesCreate) *AuthRolesCre
|
||||
return &AuthRolesCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *AuthRolesClient) MapCreateBulk(slice any, setFunc func(*AuthRolesCreate, int)) *AuthRolesCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &AuthRolesCreateBulk{err: fmt.Errorf("calling to AuthRolesClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*AuthRolesCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &AuthRolesCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for AuthRoles.
|
||||
func (c *AuthRolesClient) Update() *AuthRolesUpdate {
|
||||
mutation := newAuthRolesMutation(c.config, OpUpdate)
|
||||
@@ -614,6 +653,21 @@ func (c *AuthTokensClient) CreateBulk(builders ...*AuthTokensCreate) *AuthTokens
|
||||
return &AuthTokensCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *AuthTokensClient) MapCreateBulk(slice any, setFunc func(*AuthTokensCreate, int)) *AuthTokensCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &AuthTokensCreateBulk{err: fmt.Errorf("calling to AuthTokensClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*AuthTokensCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &AuthTokensCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for AuthTokens.
|
||||
func (c *AuthTokensClient) Update() *AuthTokensUpdate {
|
||||
mutation := newAuthTokensMutation(c.config, OpUpdate)
|
||||
@@ -764,6 +818,21 @@ func (c *DocumentClient) CreateBulk(builders ...*DocumentCreate) *DocumentCreate
|
||||
return &DocumentCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *DocumentClient) MapCreateBulk(slice any, setFunc func(*DocumentCreate, int)) *DocumentCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &DocumentCreateBulk{err: fmt.Errorf("calling to DocumentClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*DocumentCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &DocumentCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Document.
|
||||
func (c *DocumentClient) Update() *DocumentUpdate {
|
||||
mutation := newDocumentMutation(c.config, OpUpdate)
|
||||
@@ -914,6 +983,21 @@ func (c *GroupClient) CreateBulk(builders ...*GroupCreate) *GroupCreateBulk {
|
||||
return &GroupCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *GroupClient) MapCreateBulk(slice any, setFunc func(*GroupCreate, int)) *GroupCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &GroupCreateBulk{err: fmt.Errorf("calling to GroupClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*GroupCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &GroupCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Group.
|
||||
func (c *GroupClient) Update() *GroupUpdate {
|
||||
mutation := newGroupMutation(c.config, OpUpdate)
|
||||
@@ -1144,6 +1228,21 @@ func (c *GroupInvitationTokenClient) CreateBulk(builders ...*GroupInvitationToke
|
||||
return &GroupInvitationTokenCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *GroupInvitationTokenClient) MapCreateBulk(slice any, setFunc func(*GroupInvitationTokenCreate, int)) *GroupInvitationTokenCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &GroupInvitationTokenCreateBulk{err: fmt.Errorf("calling to GroupInvitationTokenClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*GroupInvitationTokenCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &GroupInvitationTokenCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for GroupInvitationToken.
|
||||
func (c *GroupInvitationTokenClient) Update() *GroupInvitationTokenUpdate {
|
||||
mutation := newGroupInvitationTokenMutation(c.config, OpUpdate)
|
||||
@@ -1278,6 +1377,21 @@ func (c *ItemClient) CreateBulk(builders ...*ItemCreate) *ItemCreateBulk {
|
||||
return &ItemCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *ItemClient) MapCreateBulk(slice any, setFunc func(*ItemCreate, int)) *ItemCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &ItemCreateBulk{err: fmt.Errorf("calling to ItemClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*ItemCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &ItemCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Item.
|
||||
func (c *ItemClient) Update() *ItemUpdate {
|
||||
mutation := newItemMutation(c.config, OpUpdate)
|
||||
@@ -1524,6 +1638,21 @@ func (c *ItemFieldClient) CreateBulk(builders ...*ItemFieldCreate) *ItemFieldCre
|
||||
return &ItemFieldCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *ItemFieldClient) MapCreateBulk(slice any, setFunc func(*ItemFieldCreate, int)) *ItemFieldCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &ItemFieldCreateBulk{err: fmt.Errorf("calling to ItemFieldClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*ItemFieldCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &ItemFieldCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for ItemField.
|
||||
func (c *ItemFieldClient) Update() *ItemFieldUpdate {
|
||||
mutation := newItemFieldMutation(c.config, OpUpdate)
|
||||
@@ -1658,6 +1787,21 @@ func (c *LabelClient) CreateBulk(builders ...*LabelCreate) *LabelCreateBulk {
|
||||
return &LabelCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *LabelClient) MapCreateBulk(slice any, setFunc func(*LabelCreate, int)) *LabelCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &LabelCreateBulk{err: fmt.Errorf("calling to LabelClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*LabelCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &LabelCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Label.
|
||||
func (c *LabelClient) Update() *LabelUpdate {
|
||||
mutation := newLabelMutation(c.config, OpUpdate)
|
||||
@@ -1808,6 +1952,21 @@ func (c *LocationClient) CreateBulk(builders ...*LocationCreate) *LocationCreate
|
||||
return &LocationCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *LocationClient) MapCreateBulk(slice any, setFunc func(*LocationCreate, int)) *LocationCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &LocationCreateBulk{err: fmt.Errorf("calling to LocationClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*LocationCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &LocationCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Location.
|
||||
func (c *LocationClient) Update() *LocationUpdate {
|
||||
mutation := newLocationMutation(c.config, OpUpdate)
|
||||
@@ -1990,6 +2149,21 @@ func (c *MaintenanceEntryClient) CreateBulk(builders ...*MaintenanceEntryCreate)
|
||||
return &MaintenanceEntryCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *MaintenanceEntryClient) MapCreateBulk(slice any, setFunc func(*MaintenanceEntryCreate, int)) *MaintenanceEntryCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &MaintenanceEntryCreateBulk{err: fmt.Errorf("calling to MaintenanceEntryClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*MaintenanceEntryCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &MaintenanceEntryCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for MaintenanceEntry.
|
||||
func (c *MaintenanceEntryClient) Update() *MaintenanceEntryUpdate {
|
||||
mutation := newMaintenanceEntryMutation(c.config, OpUpdate)
|
||||
@@ -2124,6 +2298,21 @@ func (c *NotifierClient) CreateBulk(builders ...*NotifierCreate) *NotifierCreate
|
||||
return &NotifierCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *NotifierClient) MapCreateBulk(slice any, setFunc func(*NotifierCreate, int)) *NotifierCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &NotifierCreateBulk{err: fmt.Errorf("calling to NotifierClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*NotifierCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &NotifierCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Notifier.
|
||||
func (c *NotifierClient) Update() *NotifierUpdate {
|
||||
mutation := newNotifierMutation(c.config, OpUpdate)
|
||||
@@ -2274,6 +2463,21 @@ func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk {
|
||||
return &UserCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
|
||||
// a builder and applies setFunc on it.
|
||||
func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk {
|
||||
rv := reflect.ValueOf(slice)
|
||||
if rv.Kind() != reflect.Slice {
|
||||
return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)}
|
||||
}
|
||||
builders := make([]*UserCreate, rv.Len())
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
builders[i] = c.Create()
|
||||
setFunc(builders[i], i)
|
||||
}
|
||||
return &UserCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for User.
|
||||
func (c *UserClient) Update() *UserUpdate {
|
||||
mutation := newUserMutation(c.config, OpUpdate)
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
|
||||
@@ -30,6 +31,7 @@ type Document struct {
|
||||
// The values are being populated by the DocumentQuery when eager-loading is set.
|
||||
Edges DocumentEdges `json:"edges"`
|
||||
group_documents *uuid.UUID
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// DocumentEdges holds the relations/edges for other nodes in the graph.
|
||||
@@ -79,7 +81,7 @@ func (*Document) scanValues(columns []string) ([]any, error) {
|
||||
case document.ForeignKeys[0]: // group_documents
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Document", columns[i])
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
@@ -130,11 +132,19 @@ func (d *Document) assignValues(columns []string, values []any) error {
|
||||
d.group_documents = new(uuid.UUID)
|
||||
*d.group_documents = *value.S.(*uuid.UUID)
|
||||
}
|
||||
default:
|
||||
d.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the Document.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (d *Document) Value(name string) (ent.Value, error) {
|
||||
return d.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryGroup queries the "group" edge of the Document entity.
|
||||
func (d *Document) QueryGroup() *GroupQuery {
|
||||
return NewDocumentClient(d.config).QueryGroup(d)
|
||||
|
||||
@@ -5,6 +5,8 @@ package document
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
@@ -87,3 +89,66 @@ var (
|
||||
// DefaultID holds the default value on creation for the "id" field.
|
||||
DefaultID func() uuid.UUID
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the Document queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTitle orders the results by the title field.
|
||||
func ByTitle(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTitle, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPath orders the results by the path field.
|
||||
func ByPath(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPath, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByGroupField orders the results by group field.
|
||||
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByAttachmentsCount orders the results by attachments count.
|
||||
func ByAttachmentsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newAttachmentsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByAttachments orders the results by attachments terms.
|
||||
func ByAttachments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newAttachmentsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
func newGroupStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
}
|
||||
func newAttachmentsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(AttachmentsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -300,11 +300,7 @@ func HasGroup() predicate.Document {
|
||||
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||
func HasGroupWith(preds ...predicate.Group) predicate.Document {
|
||||
return predicate.Document(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
step := newGroupStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -327,11 +323,7 @@ func HasAttachments() predicate.Document {
|
||||
// HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates).
|
||||
func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Document {
|
||||
return predicate.Document(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(AttachmentsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
|
||||
)
|
||||
step := newAttachmentsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -342,32 +334,15 @@ func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Document {
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.Document) predicate.Document {
|
||||
return predicate.Document(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for _, p := range predicates {
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.Document(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.Document) predicate.Document {
|
||||
return predicate.Document(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for i, p := range predicates {
|
||||
if i > 0 {
|
||||
s1.Or()
|
||||
}
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.Document(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.Document) predicate.Document {
|
||||
return predicate.Document(func(s *sql.Selector) {
|
||||
p(s.Not())
|
||||
})
|
||||
return predicate.Document(sql.NotPredicates(p))
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ func (dc *DocumentCreate) Mutation() *DocumentMutation {
|
||||
// Save creates the Document in the database.
|
||||
func (dc *DocumentCreate) Save(ctx context.Context) (*Document, error) {
|
||||
dc.defaults()
|
||||
return withHooks[*Document, DocumentMutation](ctx, dc.sqlSave, dc.mutation, dc.hooks)
|
||||
return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
@@ -269,11 +269,15 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
|
||||
// DocumentCreateBulk is the builder for creating many Document entities in bulk.
|
||||
type DocumentCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*DocumentCreate
|
||||
}
|
||||
|
||||
// Save creates the Document entities in the database.
|
||||
func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) {
|
||||
if dcb.err != nil {
|
||||
return nil, dcb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(dcb.builders))
|
||||
nodes := make([]*Document, len(dcb.builders))
|
||||
mutators := make([]Mutator, len(dcb.builders))
|
||||
@@ -290,8 +294,8 @@ func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation)
|
||||
} else {
|
||||
|
||||
@@ -27,7 +27,7 @@ func (dd *DocumentDelete) Where(ps ...predicate.Document) *DocumentDelete {
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (dd *DocumentDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks[int, DocumentMutation](ctx, dd.sqlExec, dd.mutation, dd.hooks)
|
||||
return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
type DocumentQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []OrderFunc
|
||||
order []document.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.Document
|
||||
withGroup *GroupQuery
|
||||
@@ -59,7 +59,7 @@ func (dq *DocumentQuery) Unique(unique bool) *DocumentQuery {
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (dq *DocumentQuery) Order(o ...OrderFunc) *DocumentQuery {
|
||||
func (dq *DocumentQuery) Order(o ...document.OrderOption) *DocumentQuery {
|
||||
dq.order = append(dq.order, o...)
|
||||
return dq
|
||||
}
|
||||
@@ -297,7 +297,7 @@ func (dq *DocumentQuery) Clone() *DocumentQuery {
|
||||
return &DocumentQuery{
|
||||
config: dq.config,
|
||||
ctx: dq.ctx.Clone(),
|
||||
order: append([]OrderFunc{}, dq.order...),
|
||||
order: append([]document.OrderOption{}, dq.order...),
|
||||
inters: append([]Interceptor{}, dq.inters...),
|
||||
predicates: append([]predicate.Document{}, dq.predicates...),
|
||||
withGroup: dq.withGroup.Clone(),
|
||||
@@ -498,7 +498,7 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.Attachment(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(document.AttachmentsColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(document.AttachmentsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -511,7 +511,7 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "document_attachments" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "document_attachments" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
|
||||
@@ -43,12 +43,28 @@ func (du *DocumentUpdate) SetTitle(s string) *DocumentUpdate {
|
||||
return du
|
||||
}
|
||||
|
||||
// SetNillableTitle sets the "title" field if the given value is not nil.
|
||||
func (du *DocumentUpdate) SetNillableTitle(s *string) *DocumentUpdate {
|
||||
if s != nil {
|
||||
du.SetTitle(*s)
|
||||
}
|
||||
return du
|
||||
}
|
||||
|
||||
// SetPath sets the "path" field.
|
||||
func (du *DocumentUpdate) SetPath(s string) *DocumentUpdate {
|
||||
du.mutation.SetPath(s)
|
||||
return du
|
||||
}
|
||||
|
||||
// SetNillablePath sets the "path" field if the given value is not nil.
|
||||
func (du *DocumentUpdate) SetNillablePath(s *string) *DocumentUpdate {
|
||||
if s != nil {
|
||||
du.SetPath(*s)
|
||||
}
|
||||
return du
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (du *DocumentUpdate) SetGroupID(id uuid.UUID) *DocumentUpdate {
|
||||
du.mutation.SetGroupID(id)
|
||||
@@ -110,7 +126,7 @@ func (du *DocumentUpdate) RemoveAttachments(a ...*Attachment) *DocumentUpdate {
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (du *DocumentUpdate) Save(ctx context.Context) (int, error) {
|
||||
du.defaults()
|
||||
return withHooks[int, DocumentMutation](ctx, du.sqlSave, du.mutation, du.hooks)
|
||||
return withHooks(ctx, du.sqlSave, du.mutation, du.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
@@ -288,12 +304,28 @@ func (duo *DocumentUpdateOne) SetTitle(s string) *DocumentUpdateOne {
|
||||
return duo
|
||||
}
|
||||
|
||||
// SetNillableTitle sets the "title" field if the given value is not nil.
|
||||
func (duo *DocumentUpdateOne) SetNillableTitle(s *string) *DocumentUpdateOne {
|
||||
if s != nil {
|
||||
duo.SetTitle(*s)
|
||||
}
|
||||
return duo
|
||||
}
|
||||
|
||||
// SetPath sets the "path" field.
|
||||
func (duo *DocumentUpdateOne) SetPath(s string) *DocumentUpdateOne {
|
||||
duo.mutation.SetPath(s)
|
||||
return duo
|
||||
}
|
||||
|
||||
// SetNillablePath sets the "path" field if the given value is not nil.
|
||||
func (duo *DocumentUpdateOne) SetNillablePath(s *string) *DocumentUpdateOne {
|
||||
if s != nil {
|
||||
duo.SetPath(*s)
|
||||
}
|
||||
return duo
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (duo *DocumentUpdateOne) SetGroupID(id uuid.UUID) *DocumentUpdateOne {
|
||||
duo.mutation.SetGroupID(id)
|
||||
@@ -368,7 +400,7 @@ func (duo *DocumentUpdateOne) Select(field string, fields ...string) *DocumentUp
|
||||
// Save executes the query and returns the updated Document entity.
|
||||
func (duo *DocumentUpdateOne) Save(ctx context.Context) (*Document, error) {
|
||||
duo.defaults()
|
||||
return withHooks[*Document, DocumentMutation](ctx, duo.sqlSave, duo.mutation, duo.hooks)
|
||||
return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
@@ -72,45 +73,41 @@ func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
||||
}
|
||||
|
||||
// OrderFunc applies an ordering on the sql selector.
|
||||
// Deprecated: Use Asc/Desc functions or the package builders instead.
|
||||
type OrderFunc func(*sql.Selector)
|
||||
|
||||
// columnChecker returns a function indicates if the column exists in the given column.
|
||||
func columnChecker(table string) func(string) error {
|
||||
checks := map[string]func(string) bool{
|
||||
attachment.Table: attachment.ValidColumn,
|
||||
authroles.Table: authroles.ValidColumn,
|
||||
authtokens.Table: authtokens.ValidColumn,
|
||||
document.Table: document.ValidColumn,
|
||||
group.Table: group.ValidColumn,
|
||||
groupinvitationtoken.Table: groupinvitationtoken.ValidColumn,
|
||||
item.Table: item.ValidColumn,
|
||||
itemfield.Table: itemfield.ValidColumn,
|
||||
label.Table: label.ValidColumn,
|
||||
location.Table: location.ValidColumn,
|
||||
maintenanceentry.Table: maintenanceentry.ValidColumn,
|
||||
notifier.Table: notifier.ValidColumn,
|
||||
user.Table: user.ValidColumn,
|
||||
}
|
||||
check, ok := checks[table]
|
||||
if !ok {
|
||||
return func(string) error {
|
||||
return fmt.Errorf("unknown table %q", table)
|
||||
}
|
||||
}
|
||||
return func(column string) error {
|
||||
if !check(column) {
|
||||
return fmt.Errorf("unknown column %q for table %q", column, table)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
var (
|
||||
initCheck sync.Once
|
||||
columnCheck sql.ColumnCheck
|
||||
)
|
||||
|
||||
// columnChecker checks if the column exists in the given table.
|
||||
func checkColumn(table, column string) error {
|
||||
initCheck.Do(func() {
|
||||
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
|
||||
attachment.Table: attachment.ValidColumn,
|
||||
authroles.Table: authroles.ValidColumn,
|
||||
authtokens.Table: authtokens.ValidColumn,
|
||||
document.Table: document.ValidColumn,
|
||||
group.Table: group.ValidColumn,
|
||||
groupinvitationtoken.Table: groupinvitationtoken.ValidColumn,
|
||||
item.Table: item.ValidColumn,
|
||||
itemfield.Table: itemfield.ValidColumn,
|
||||
label.Table: label.ValidColumn,
|
||||
location.Table: location.ValidColumn,
|
||||
maintenanceentry.Table: maintenanceentry.ValidColumn,
|
||||
notifier.Table: notifier.ValidColumn,
|
||||
user.Table: user.ValidColumn,
|
||||
})
|
||||
})
|
||||
return columnCheck(table, column)
|
||||
}
|
||||
|
||||
// Asc applies the given fields in ASC order.
|
||||
func Asc(fields ...string) OrderFunc {
|
||||
func Asc(fields ...string) func(*sql.Selector) {
|
||||
return func(s *sql.Selector) {
|
||||
check := columnChecker(s.TableName())
|
||||
for _, f := range fields {
|
||||
if err := check(f); err != nil {
|
||||
if err := checkColumn(s.TableName(), f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Asc(s.C(f)))
|
||||
@@ -119,11 +116,10 @@ func Asc(fields ...string) OrderFunc {
|
||||
}
|
||||
|
||||
// Desc applies the given fields in DESC order.
|
||||
func Desc(fields ...string) OrderFunc {
|
||||
func Desc(fields ...string) func(*sql.Selector) {
|
||||
return func(s *sql.Selector) {
|
||||
check := columnChecker(s.TableName())
|
||||
for _, f := range fields {
|
||||
if err := check(f); err != nil {
|
||||
if err := checkColumn(s.TableName(), f); err != nil {
|
||||
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
|
||||
}
|
||||
s.OrderBy(sql.Desc(s.C(f)))
|
||||
@@ -155,8 +151,7 @@ func Count() AggregateFunc {
|
||||
// Max applies the "max" aggregation function on the given field of each group.
|
||||
func Max(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
@@ -167,8 +162,7 @@ func Max(field string) AggregateFunc {
|
||||
// Mean applies the "mean" aggregation function on the given field of each group.
|
||||
func Mean(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
@@ -179,8 +173,7 @@ func Mean(field string) AggregateFunc {
|
||||
// Min applies the "min" aggregation function on the given field of each group.
|
||||
func Min(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
@@ -191,8 +184,7 @@ func Min(field string) AggregateFunc {
|
||||
// Sum applies the "sum" aggregation function on the given field of each group.
|
||||
func Sum(field string) AggregateFunc {
|
||||
return func(s *sql.Selector) string {
|
||||
check := columnChecker(s.TableName())
|
||||
if err := check(field); err != nil {
|
||||
if err := checkColumn(s.TableName(), field); err != nil {
|
||||
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
|
||||
@@ -27,7 +28,8 @@ type Group struct {
|
||||
Currency group.Currency `json:"currency,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the GroupQuery when eager-loading is set.
|
||||
Edges GroupEdges `json:"edges"`
|
||||
Edges GroupEdges `json:"edges"`
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// GroupEdges holds the relations/edges for other nodes in the graph.
|
||||
@@ -126,7 +128,7 @@ func (*Group) scanValues(columns []string) ([]any, error) {
|
||||
case group.FieldID:
|
||||
values[i] = new(uuid.UUID)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Group", columns[i])
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
@@ -170,11 +172,19 @@ func (gr *Group) assignValues(columns []string, values []any) error {
|
||||
} else if value.Valid {
|
||||
gr.Currency = group.Currency(value.String)
|
||||
}
|
||||
default:
|
||||
gr.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the Group.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (gr *Group) Value(name string) (ent.Value, error) {
|
||||
return gr.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryUsers queries the "users" edge of the Group entity.
|
||||
func (gr *Group) QueryUsers() *UserQuery {
|
||||
return NewGroupClient(gr.config).QueryUsers(gr)
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
@@ -129,24 +131,37 @@ const DefaultCurrency = CurrencyUsd
|
||||
|
||||
// Currency values.
|
||||
const (
|
||||
CurrencyUsd Currency = "usd"
|
||||
CurrencyAed Currency = "aed"
|
||||
CurrencyAud Currency = "aud"
|
||||
CurrencyBgn Currency = "bgn"
|
||||
CurrencyBrl Currency = "brl"
|
||||
CurrencyCad Currency = "cad"
|
||||
CurrencyChf Currency = "chf"
|
||||
CurrencyCzk Currency = "czk"
|
||||
CurrencyDkk Currency = "dkk"
|
||||
CurrencyEur Currency = "eur"
|
||||
CurrencyGbp Currency = "gbp"
|
||||
CurrencyHkd Currency = "hkd"
|
||||
CurrencyIdr Currency = "idr"
|
||||
CurrencyInr Currency = "inr"
|
||||
CurrencyJpy Currency = "jpy"
|
||||
CurrencyZar Currency = "zar"
|
||||
CurrencyAud Currency = "aud"
|
||||
CurrencyKrw Currency = "krw"
|
||||
CurrencyMxn Currency = "mxn"
|
||||
CurrencyNok Currency = "nok"
|
||||
CurrencyNzd Currency = "nzd"
|
||||
CurrencySek Currency = "sek"
|
||||
CurrencyDkk Currency = "dkk"
|
||||
CurrencyInr Currency = "inr"
|
||||
CurrencyRmb Currency = "rmb"
|
||||
CurrencyBgn Currency = "bgn"
|
||||
CurrencyChf Currency = "chf"
|
||||
CurrencyPln Currency = "pln"
|
||||
CurrencyTry Currency = "try"
|
||||
CurrencyRmb Currency = "rmb"
|
||||
CurrencyRon Currency = "ron"
|
||||
CurrencyCzk Currency = "czk"
|
||||
CurrencyRub Currency = "rub"
|
||||
CurrencySar Currency = "sar"
|
||||
CurrencySek Currency = "sek"
|
||||
CurrencySgd Currency = "sgd"
|
||||
CurrencyThb Currency = "thb"
|
||||
CurrencyTry Currency = "try"
|
||||
CurrencyUsd Currency = "usd"
|
||||
CurrencyXag Currency = "xag"
|
||||
CurrencyXau Currency = "xau"
|
||||
CurrencyZar Currency = "zar"
|
||||
)
|
||||
|
||||
func (c Currency) String() string {
|
||||
@@ -156,9 +171,184 @@ func (c Currency) String() string {
|
||||
// CurrencyValidator is a validator for the "currency" field enum values. It is called by the builders before save.
|
||||
func CurrencyValidator(c Currency) error {
|
||||
switch c {
|
||||
case CurrencyUsd, CurrencyEur, CurrencyGbp, CurrencyJpy, CurrencyZar, CurrencyAud, CurrencyNok, CurrencyNzd, CurrencySek, CurrencyDkk, CurrencyInr, CurrencyRmb, CurrencyBgn, CurrencyChf, CurrencyPln, CurrencyTry, CurrencyRon, CurrencyCzk:
|
||||
case CurrencyAed, CurrencyAud, CurrencyBgn, CurrencyBrl, CurrencyCad, CurrencyChf, CurrencyCzk, CurrencyDkk, CurrencyEur, CurrencyGbp, CurrencyHkd, CurrencyIdr, CurrencyInr, CurrencyJpy, CurrencyKrw, CurrencyMxn, CurrencyNok, CurrencyNzd, CurrencyPln, CurrencyRmb, CurrencyRon, CurrencyRub, CurrencySar, CurrencySek, CurrencySgd, CurrencyThb, CurrencyTry, CurrencyUsd, CurrencyXag, CurrencyXau, CurrencyZar:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("group: invalid enum value for currency field: %q", c)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the Group queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCurrency orders the results by the currency field.
|
||||
func ByCurrency(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCurrency, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUsersCount orders the results by users count.
|
||||
func ByUsersCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newUsersStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByUsers orders the results by users terms.
|
||||
func ByUsers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newUsersStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByLocationsCount orders the results by locations count.
|
||||
func ByLocationsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newLocationsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByLocations orders the results by locations terms.
|
||||
func ByLocations(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newLocationsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByItemsCount orders the results by items count.
|
||||
func ByItemsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newItemsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByItems orders the results by items terms.
|
||||
func ByItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newItemsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByLabelsCount orders the results by labels count.
|
||||
func ByLabelsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newLabelsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByLabels orders the results by labels terms.
|
||||
func ByLabels(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newLabelsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByDocumentsCount orders the results by documents count.
|
||||
func ByDocumentsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newDocumentsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByDocuments orders the results by documents terms.
|
||||
func ByDocuments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newDocumentsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByInvitationTokensCount orders the results by invitation_tokens count.
|
||||
func ByInvitationTokensCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newInvitationTokensStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByInvitationTokens orders the results by invitation_tokens terms.
|
||||
func ByInvitationTokens(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newInvitationTokensStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByNotifiersCount orders the results by notifiers count.
|
||||
func ByNotifiersCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newNotifiersStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByNotifiers orders the results by notifiers terms.
|
||||
func ByNotifiers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newNotifiersStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
func newUsersStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UsersInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn),
|
||||
)
|
||||
}
|
||||
func newLocationsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(LocationsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, LocationsTable, LocationsColumn),
|
||||
)
|
||||
}
|
||||
func newItemsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(ItemsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
|
||||
)
|
||||
}
|
||||
func newLabelsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(LabelsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, LabelsTable, LabelsColumn),
|
||||
)
|
||||
}
|
||||
func newDocumentsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(DocumentsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn),
|
||||
)
|
||||
}
|
||||
func newInvitationTokensStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(InvitationTokensInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, InvitationTokensTable, InvitationTokensColumn),
|
||||
)
|
||||
}
|
||||
func newNotifiersStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(NotifiersInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -250,11 +250,7 @@ func HasUsers() predicate.Group {
|
||||
// HasUsersWith applies the HasEdge predicate on the "users" edge with a given conditions (other predicates).
|
||||
func HasUsersWith(preds ...predicate.User) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UsersInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn),
|
||||
)
|
||||
step := newUsersStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -277,11 +273,7 @@ func HasLocations() predicate.Group {
|
||||
// HasLocationsWith applies the HasEdge predicate on the "locations" edge with a given conditions (other predicates).
|
||||
func HasLocationsWith(preds ...predicate.Location) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(LocationsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, LocationsTable, LocationsColumn),
|
||||
)
|
||||
step := newLocationsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -304,11 +296,7 @@ func HasItems() predicate.Group {
|
||||
// HasItemsWith applies the HasEdge predicate on the "items" edge with a given conditions (other predicates).
|
||||
func HasItemsWith(preds ...predicate.Item) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(ItemsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
|
||||
)
|
||||
step := newItemsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -331,11 +319,7 @@ func HasLabels() predicate.Group {
|
||||
// HasLabelsWith applies the HasEdge predicate on the "labels" edge with a given conditions (other predicates).
|
||||
func HasLabelsWith(preds ...predicate.Label) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(LabelsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, LabelsTable, LabelsColumn),
|
||||
)
|
||||
step := newLabelsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -358,11 +342,7 @@ func HasDocuments() predicate.Group {
|
||||
// HasDocumentsWith applies the HasEdge predicate on the "documents" edge with a given conditions (other predicates).
|
||||
func HasDocumentsWith(preds ...predicate.Document) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(DocumentsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn),
|
||||
)
|
||||
step := newDocumentsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -385,11 +365,7 @@ func HasInvitationTokens() predicate.Group {
|
||||
// HasInvitationTokensWith applies the HasEdge predicate on the "invitation_tokens" edge with a given conditions (other predicates).
|
||||
func HasInvitationTokensWith(preds ...predicate.GroupInvitationToken) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(InvitationTokensInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, InvitationTokensTable, InvitationTokensColumn),
|
||||
)
|
||||
step := newInvitationTokensStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -412,11 +388,7 @@ func HasNotifiers() predicate.Group {
|
||||
// HasNotifiersWith applies the HasEdge predicate on the "notifiers" edge with a given conditions (other predicates).
|
||||
func HasNotifiersWith(preds ...predicate.Notifier) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(NotifiersInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
|
||||
)
|
||||
step := newNotifiersStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -427,32 +399,15 @@ func HasNotifiersWith(preds ...predicate.Notifier) predicate.Group {
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.Group) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for _, p := range predicates {
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.Group(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.Group) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for i, p := range predicates {
|
||||
if i > 0 {
|
||||
s1.Or()
|
||||
}
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.Group(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.Group) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
p(s.Not())
|
||||
})
|
||||
return predicate.Group(sql.NotPredicates(p))
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ func (gc *GroupCreate) Mutation() *GroupMutation {
|
||||
// Save creates the Group in the database.
|
||||
func (gc *GroupCreate) Save(ctx context.Context) (*Group, error) {
|
||||
gc.defaults()
|
||||
return withHooks[*Group, GroupMutation](ctx, gc.sqlSave, gc.mutation, gc.hooks)
|
||||
return withHooks(ctx, gc.sqlSave, gc.mutation, gc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
@@ -441,11 +441,15 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
// GroupCreateBulk is the builder for creating many Group entities in bulk.
|
||||
type GroupCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*GroupCreate
|
||||
}
|
||||
|
||||
// Save creates the Group entities in the database.
|
||||
func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) {
|
||||
if gcb.err != nil {
|
||||
return nil, gcb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(gcb.builders))
|
||||
nodes := make([]*Group, len(gcb.builders))
|
||||
mutators := make([]Mutator, len(gcb.builders))
|
||||
@@ -462,8 +466,8 @@ func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, gcb.builders[i+1].mutation)
|
||||
} else {
|
||||
|
||||
@@ -27,7 +27,7 @@ func (gd *GroupDelete) Where(ps ...predicate.Group) *GroupDelete {
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (gd *GroupDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks[int, GroupMutation](ctx, gd.sqlExec, gd.mutation, gd.hooks)
|
||||
return withHooks(ctx, gd.sqlExec, gd.mutation, gd.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
type GroupQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []OrderFunc
|
||||
order []group.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.Group
|
||||
withUsers *UserQuery
|
||||
@@ -68,7 +68,7 @@ func (gq *GroupQuery) Unique(unique bool) *GroupQuery {
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (gq *GroupQuery) Order(o ...OrderFunc) *GroupQuery {
|
||||
func (gq *GroupQuery) Order(o ...group.OrderOption) *GroupQuery {
|
||||
gq.order = append(gq.order, o...)
|
||||
return gq
|
||||
}
|
||||
@@ -416,7 +416,7 @@ func (gq *GroupQuery) Clone() *GroupQuery {
|
||||
return &GroupQuery{
|
||||
config: gq.config,
|
||||
ctx: gq.ctx.Clone(),
|
||||
order: append([]OrderFunc{}, gq.order...),
|
||||
order: append([]group.OrderOption{}, gq.order...),
|
||||
inters: append([]Interceptor{}, gq.inters...),
|
||||
predicates: append([]predicate.Group{}, gq.predicates...),
|
||||
withUsers: gq.withUsers.Clone(),
|
||||
@@ -681,7 +681,7 @@ func (gq *GroupQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []*
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.User(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(group.UsersColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(group.UsersColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -694,7 +694,7 @@ func (gq *GroupQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []*
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_users" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "group_users" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
@@ -712,7 +712,7 @@ func (gq *GroupQuery) loadLocations(ctx context.Context, query *LocationQuery, n
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.Location(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(group.LocationsColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(group.LocationsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -725,7 +725,7 @@ func (gq *GroupQuery) loadLocations(ctx context.Context, query *LocationQuery, n
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "group_locations" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
@@ -743,7 +743,7 @@ func (gq *GroupQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.Item(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(group.ItemsColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(group.ItemsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -756,7 +756,7 @@ func (gq *GroupQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_items" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "group_items" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
@@ -774,7 +774,7 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.Label(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(group.LabelsColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(group.LabelsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -787,7 +787,7 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_labels" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "group_labels" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
@@ -805,7 +805,7 @@ func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, n
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.Document(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(group.DocumentsColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(group.DocumentsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -818,7 +818,7 @@ func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, n
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "group_documents" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
@@ -836,7 +836,7 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.GroupInvitationToken(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(group.InvitationTokensColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(group.InvitationTokensColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -849,7 +849,7 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_invitation_tokens" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "group_invitation_tokens" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
@@ -865,8 +865,11 @@ func (gq *GroupQuery) loadNotifiers(ctx context.Context, query *NotifierQuery, n
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(notifier.FieldGroupID)
|
||||
}
|
||||
query.Where(predicate.Notifier(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(group.NotifiersColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(group.NotifiersColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -876,7 +879,7 @@ func (gq *GroupQuery) loadNotifiers(ctx context.Context, query *NotifierQuery, n
|
||||
fk := n.GroupID
|
||||
node, ok := nodeids[fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_id" returned %v for node %v`, fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
|
||||
@@ -48,6 +48,14 @@ func (gu *GroupUpdate) SetName(s string) *GroupUpdate {
|
||||
return gu
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (gu *GroupUpdate) SetNillableName(s *string) *GroupUpdate {
|
||||
if s != nil {
|
||||
gu.SetName(*s)
|
||||
}
|
||||
return gu
|
||||
}
|
||||
|
||||
// SetCurrency sets the "currency" field.
|
||||
func (gu *GroupUpdate) SetCurrency(gr group.Currency) *GroupUpdate {
|
||||
gu.mutation.SetCurrency(gr)
|
||||
@@ -322,7 +330,7 @@ func (gu *GroupUpdate) RemoveNotifiers(n ...*Notifier) *GroupUpdate {
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (gu *GroupUpdate) Save(ctx context.Context) (int, error) {
|
||||
gu.defaults()
|
||||
return withHooks[int, GroupMutation](ctx, gu.sqlSave, gu.mutation, gu.hooks)
|
||||
return withHooks(ctx, gu.sqlSave, gu.mutation, gu.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
@@ -738,6 +746,14 @@ func (guo *GroupUpdateOne) SetName(s string) *GroupUpdateOne {
|
||||
return guo
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (guo *GroupUpdateOne) SetNillableName(s *string) *GroupUpdateOne {
|
||||
if s != nil {
|
||||
guo.SetName(*s)
|
||||
}
|
||||
return guo
|
||||
}
|
||||
|
||||
// SetCurrency sets the "currency" field.
|
||||
func (guo *GroupUpdateOne) SetCurrency(gr group.Currency) *GroupUpdateOne {
|
||||
guo.mutation.SetCurrency(gr)
|
||||
@@ -1025,7 +1041,7 @@ func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOn
|
||||
// Save executes the query and returns the updated Group entity.
|
||||
func (guo *GroupUpdateOne) Save(ctx context.Context) (*Group, error) {
|
||||
guo.defaults()
|
||||
return withHooks[*Group, GroupMutation](ctx, guo.sqlSave, guo.mutation, guo.hooks)
|
||||
return withHooks(ctx, guo.sqlSave, guo.mutation, guo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
|
||||
@@ -32,6 +33,7 @@ type GroupInvitationToken struct {
|
||||
// The values are being populated by the GroupInvitationTokenQuery when eager-loading is set.
|
||||
Edges GroupInvitationTokenEdges `json:"edges"`
|
||||
group_invitation_tokens *uuid.UUID
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// GroupInvitationTokenEdges holds the relations/edges for other nodes in the graph.
|
||||
@@ -72,7 +74,7 @@ func (*GroupInvitationToken) scanValues(columns []string) ([]any, error) {
|
||||
case groupinvitationtoken.ForeignKeys[0]: // group_invitation_tokens
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type GroupInvitationToken", columns[i])
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
@@ -129,11 +131,19 @@ func (git *GroupInvitationToken) assignValues(columns []string, values []any) er
|
||||
git.group_invitation_tokens = new(uuid.UUID)
|
||||
*git.group_invitation_tokens = *value.S.(*uuid.UUID)
|
||||
}
|
||||
default:
|
||||
git.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the GroupInvitationToken.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (git *GroupInvitationToken) Value(name string) (ent.Value, error) {
|
||||
return git.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryGroup queries the "group" edge of the GroupInvitationToken entity.
|
||||
func (git *GroupInvitationToken) QueryGroup() *GroupQuery {
|
||||
return NewGroupInvitationTokenClient(git.config).QueryGroup(git)
|
||||
|
||||
@@ -5,6 +5,8 @@ package groupinvitationtoken
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
@@ -81,3 +83,45 @@ var (
|
||||
// DefaultID holds the default value on creation for the "id" field.
|
||||
DefaultID func() uuid.UUID
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the GroupInvitationToken queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByExpiresAt orders the results by the expires_at field.
|
||||
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUses orders the results by the uses field.
|
||||
func ByUses(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUses, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByGroupField orders the results by group field.
|
||||
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newGroupStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -295,11 +295,7 @@ func HasGroup() predicate.GroupInvitationToken {
|
||||
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||
func HasGroupWith(preds ...predicate.Group) predicate.GroupInvitationToken {
|
||||
return predicate.GroupInvitationToken(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
step := newGroupStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -310,32 +306,15 @@ func HasGroupWith(preds ...predicate.Group) predicate.GroupInvitationToken {
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.GroupInvitationToken) predicate.GroupInvitationToken {
|
||||
return predicate.GroupInvitationToken(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for _, p := range predicates {
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.GroupInvitationToken(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.GroupInvitationToken) predicate.GroupInvitationToken {
|
||||
return predicate.GroupInvitationToken(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for i, p := range predicates {
|
||||
if i > 0 {
|
||||
s1.Or()
|
||||
}
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.GroupInvitationToken(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.GroupInvitationToken) predicate.GroupInvitationToken {
|
||||
return predicate.GroupInvitationToken(func(s *sql.Selector) {
|
||||
p(s.Not())
|
||||
})
|
||||
return predicate.GroupInvitationToken(sql.NotPredicates(p))
|
||||
}
|
||||
|
||||
@@ -125,7 +125,7 @@ func (gitc *GroupInvitationTokenCreate) Mutation() *GroupInvitationTokenMutation
|
||||
// Save creates the GroupInvitationToken in the database.
|
||||
func (gitc *GroupInvitationTokenCreate) Save(ctx context.Context) (*GroupInvitationToken, error) {
|
||||
gitc.defaults()
|
||||
return withHooks[*GroupInvitationToken, GroupInvitationTokenMutation](ctx, gitc.sqlSave, gitc.mutation, gitc.hooks)
|
||||
return withHooks(ctx, gitc.sqlSave, gitc.mutation, gitc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
@@ -269,11 +269,15 @@ func (gitc *GroupInvitationTokenCreate) createSpec() (*GroupInvitationToken, *sq
|
||||
// GroupInvitationTokenCreateBulk is the builder for creating many GroupInvitationToken entities in bulk.
|
||||
type GroupInvitationTokenCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*GroupInvitationTokenCreate
|
||||
}
|
||||
|
||||
// Save creates the GroupInvitationToken entities in the database.
|
||||
func (gitcb *GroupInvitationTokenCreateBulk) Save(ctx context.Context) ([]*GroupInvitationToken, error) {
|
||||
if gitcb.err != nil {
|
||||
return nil, gitcb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(gitcb.builders))
|
||||
nodes := make([]*GroupInvitationToken, len(gitcb.builders))
|
||||
mutators := make([]Mutator, len(gitcb.builders))
|
||||
@@ -290,8 +294,8 @@ func (gitcb *GroupInvitationTokenCreateBulk) Save(ctx context.Context) ([]*Group
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, gitcb.builders[i+1].mutation)
|
||||
} else {
|
||||
|
||||
@@ -27,7 +27,7 @@ func (gitd *GroupInvitationTokenDelete) Where(ps ...predicate.GroupInvitationTok
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (gitd *GroupInvitationTokenDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks[int, GroupInvitationTokenMutation](ctx, gitd.sqlExec, gitd.mutation, gitd.hooks)
|
||||
return withHooks(ctx, gitd.sqlExec, gitd.mutation, gitd.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
type GroupInvitationTokenQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []OrderFunc
|
||||
order []groupinvitationtoken.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.GroupInvitationToken
|
||||
withGroup *GroupQuery
|
||||
@@ -56,7 +56,7 @@ func (gitq *GroupInvitationTokenQuery) Unique(unique bool) *GroupInvitationToken
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (gitq *GroupInvitationTokenQuery) Order(o ...OrderFunc) *GroupInvitationTokenQuery {
|
||||
func (gitq *GroupInvitationTokenQuery) Order(o ...groupinvitationtoken.OrderOption) *GroupInvitationTokenQuery {
|
||||
gitq.order = append(gitq.order, o...)
|
||||
return gitq
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func (gitq *GroupInvitationTokenQuery) Clone() *GroupInvitationTokenQuery {
|
||||
return &GroupInvitationTokenQuery{
|
||||
config: gitq.config,
|
||||
ctx: gitq.ctx.Clone(),
|
||||
order: append([]OrderFunc{}, gitq.order...),
|
||||
order: append([]groupinvitationtoken.OrderOption{}, gitq.order...),
|
||||
inters: append([]Interceptor{}, gitq.inters...),
|
||||
predicates: append([]predicate.GroupInvitationToken{}, gitq.predicates...),
|
||||
withGroup: gitq.withGroup.Clone(),
|
||||
|
||||
@@ -110,7 +110,7 @@ func (gitu *GroupInvitationTokenUpdate) ClearGroup() *GroupInvitationTokenUpdate
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (gitu *GroupInvitationTokenUpdate) Save(ctx context.Context) (int, error) {
|
||||
gitu.defaults()
|
||||
return withHooks[int, GroupInvitationTokenMutation](ctx, gitu.sqlSave, gitu.mutation, gitu.hooks)
|
||||
return withHooks(ctx, gitu.sqlSave, gitu.mutation, gitu.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
@@ -309,7 +309,7 @@ func (gituo *GroupInvitationTokenUpdateOne) Select(field string, fields ...strin
|
||||
// Save executes the query and returns the updated GroupInvitationToken entity.
|
||||
func (gituo *GroupInvitationTokenUpdateOne) Save(ctx context.Context) (*GroupInvitationToken, error) {
|
||||
gituo.defaults()
|
||||
return withHooks[*GroupInvitationToken, GroupInvitationTokenMutation](ctx, gituo.sqlSave, gituo.mutation, gituo.hooks)
|
||||
return withHooks(ctx, gituo.sqlSave, gituo.mutation, gituo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
|
||||
@@ -71,6 +72,7 @@ type Item struct {
|
||||
group_items *uuid.UUID
|
||||
item_children *uuid.UUID
|
||||
location_items *uuid.UUID
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ItemEdges holds the relations/edges for other nodes in the graph.
|
||||
@@ -204,7 +206,7 @@ func (*Item) scanValues(columns []string) ([]any, error) {
|
||||
case item.ForeignKeys[2]: // location_items
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Item", columns[i])
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
@@ -383,11 +385,19 @@ func (i *Item) assignValues(columns []string, values []any) error {
|
||||
i.location_items = new(uuid.UUID)
|
||||
*i.location_items = *value.S.(*uuid.UUID)
|
||||
}
|
||||
default:
|
||||
i.selectValues.Set(columns[j], values[j])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the Item.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (i *Item) Value(name string) (ent.Value, error) {
|
||||
return i.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryGroup queries the "group" edge of the Item entity.
|
||||
func (i *Item) QueryGroup() *GroupQuery {
|
||||
return NewItemClient(i.config).QueryGroup(i)
|
||||
|
||||
@@ -5,6 +5,8 @@ package item
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
@@ -226,3 +228,273 @@ var (
|
||||
// DefaultID holds the default value on creation for the "id" field.
|
||||
DefaultID func() uuid.UUID
|
||||
)
|
||||
|
||||
// OrderOption defines the ordering options for the Item queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDescription orders the results by the description field.
|
||||
func ByDescription(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDescription, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByImportRef orders the results by the import_ref field.
|
||||
func ByImportRef(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldImportRef, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByNotes orders the results by the notes field.
|
||||
func ByNotes(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldNotes, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByQuantity orders the results by the quantity field.
|
||||
func ByQuantity(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldQuantity, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByInsured orders the results by the insured field.
|
||||
func ByInsured(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldInsured, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByArchived orders the results by the archived field.
|
||||
func ByArchived(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldArchived, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByAssetID orders the results by the asset_id field.
|
||||
func ByAssetID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldAssetID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySerialNumber orders the results by the serial_number field.
|
||||
func BySerialNumber(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSerialNumber, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByModelNumber orders the results by the model_number field.
|
||||
func ByModelNumber(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldModelNumber, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByManufacturer orders the results by the manufacturer field.
|
||||
func ByManufacturer(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldManufacturer, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByLifetimeWarranty orders the results by the lifetime_warranty field.
|
||||
func ByLifetimeWarranty(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldLifetimeWarranty, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByWarrantyExpires orders the results by the warranty_expires field.
|
||||
func ByWarrantyExpires(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldWarrantyExpires, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByWarrantyDetails orders the results by the warranty_details field.
|
||||
func ByWarrantyDetails(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldWarrantyDetails, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPurchaseTime orders the results by the purchase_time field.
|
||||
func ByPurchaseTime(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPurchaseTime, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPurchaseFrom orders the results by the purchase_from field.
|
||||
func ByPurchaseFrom(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPurchaseFrom, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByPurchasePrice orders the results by the purchase_price field.
|
||||
func ByPurchasePrice(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldPurchasePrice, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoldTime orders the results by the sold_time field.
|
||||
func BySoldTime(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoldTime, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoldTo orders the results by the sold_to field.
|
||||
func BySoldTo(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoldTo, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoldPrice orders the results by the sold_price field.
|
||||
func BySoldPrice(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoldPrice, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// BySoldNotes orders the results by the sold_notes field.
|
||||
func BySoldNotes(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldSoldNotes, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByGroupField orders the results by group field.
|
||||
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByParentField orders the results by parent field.
|
||||
func ByParentField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newParentStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByChildrenCount orders the results by children count.
|
||||
func ByChildrenCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newChildrenStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByChildren orders the results by children terms.
|
||||
func ByChildren(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newChildrenStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByLabelCount orders the results by label count.
|
||||
func ByLabelCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newLabelStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByLabel orders the results by label terms.
|
||||
func ByLabel(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newLabelStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByLocationField orders the results by location field.
|
||||
func ByLocationField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newLocationStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
|
||||
// ByFieldsCount orders the results by fields count.
|
||||
func ByFieldsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newFieldsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByFields orders the results by fields terms.
|
||||
func ByFields(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newFieldsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByMaintenanceEntriesCount orders the results by maintenance_entries count.
|
||||
func ByMaintenanceEntriesCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newMaintenanceEntriesStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByMaintenanceEntries orders the results by maintenance_entries terms.
|
||||
func ByMaintenanceEntries(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newMaintenanceEntriesStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByAttachmentsCount orders the results by attachments count.
|
||||
func ByAttachmentsCount(opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborsCount(s, newAttachmentsStep(), opts...)
|
||||
}
|
||||
}
|
||||
|
||||
// ByAttachments orders the results by attachments terms.
|
||||
func ByAttachments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newAttachmentsStep(), append([]sql.OrderTerm{term}, terms...)...)
|
||||
}
|
||||
}
|
||||
func newGroupStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
}
|
||||
func newParentStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
|
||||
)
|
||||
}
|
||||
func newChildrenStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
|
||||
)
|
||||
}
|
||||
func newLabelStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(LabelInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2M, true, LabelTable, LabelPrimaryKey...),
|
||||
)
|
||||
}
|
||||
func newLocationStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(LocationInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, LocationTable, LocationColumn),
|
||||
)
|
||||
}
|
||||
func newFieldsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(FieldsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, FieldsTable, FieldsColumn),
|
||||
)
|
||||
}
|
||||
func newMaintenanceEntriesStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(MaintenanceEntriesInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, MaintenanceEntriesTable, MaintenanceEntriesColumn),
|
||||
)
|
||||
}
|
||||
func newAttachmentsStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(AttachmentsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -1420,11 +1420,7 @@ func HasGroup() predicate.Item {
|
||||
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||
func HasGroupWith(preds ...predicate.Group) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
step := newGroupStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -1447,11 +1443,7 @@ func HasParent() predicate.Item {
|
||||
// HasParentWith applies the HasEdge predicate on the "parent" edge with a given conditions (other predicates).
|
||||
func HasParentWith(preds ...predicate.Item) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ParentTable, ParentColumn),
|
||||
)
|
||||
step := newParentStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -1474,11 +1466,7 @@ func HasChildren() predicate.Item {
|
||||
// HasChildrenWith applies the HasEdge predicate on the "children" edge with a given conditions (other predicates).
|
||||
func HasChildrenWith(preds ...predicate.Item) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, ChildrenTable, ChildrenColumn),
|
||||
)
|
||||
step := newChildrenStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -1501,11 +1489,7 @@ func HasLabel() predicate.Item {
|
||||
// HasLabelWith applies the HasEdge predicate on the "label" edge with a given conditions (other predicates).
|
||||
func HasLabelWith(preds ...predicate.Label) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(LabelInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2M, true, LabelTable, LabelPrimaryKey...),
|
||||
)
|
||||
step := newLabelStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -1528,11 +1512,7 @@ func HasLocation() predicate.Item {
|
||||
// HasLocationWith applies the HasEdge predicate on the "location" edge with a given conditions (other predicates).
|
||||
func HasLocationWith(preds ...predicate.Location) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(LocationInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, LocationTable, LocationColumn),
|
||||
)
|
||||
step := newLocationStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -1555,11 +1535,7 @@ func HasFields() predicate.Item {
|
||||
// HasFieldsWith applies the HasEdge predicate on the "fields" edge with a given conditions (other predicates).
|
||||
func HasFieldsWith(preds ...predicate.ItemField) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(FieldsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, FieldsTable, FieldsColumn),
|
||||
)
|
||||
step := newFieldsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -1582,11 +1558,7 @@ func HasMaintenanceEntries() predicate.Item {
|
||||
// HasMaintenanceEntriesWith applies the HasEdge predicate on the "maintenance_entries" edge with a given conditions (other predicates).
|
||||
func HasMaintenanceEntriesWith(preds ...predicate.MaintenanceEntry) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(MaintenanceEntriesInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, MaintenanceEntriesTable, MaintenanceEntriesColumn),
|
||||
)
|
||||
step := newMaintenanceEntriesStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -1609,11 +1581,7 @@ func HasAttachments() predicate.Item {
|
||||
// HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates).
|
||||
func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(AttachmentsInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
|
||||
)
|
||||
step := newAttachmentsStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -1624,32 +1592,15 @@ func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Item {
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.Item) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for _, p := range predicates {
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.Item(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.Item) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for i, p := range predicates {
|
||||
if i > 0 {
|
||||
s1.Or()
|
||||
}
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.Item(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.Item) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
p(s.Not())
|
||||
})
|
||||
return predicate.Item(sql.NotPredicates(p))
|
||||
}
|
||||
|
||||
@@ -487,7 +487,7 @@ func (ic *ItemCreate) Mutation() *ItemMutation {
|
||||
// Save creates the Item in the database.
|
||||
func (ic *ItemCreate) Save(ctx context.Context) (*Item, error) {
|
||||
ic.defaults()
|
||||
return withHooks[*Item, ItemMutation](ctx, ic.sqlSave, ic.mutation, ic.hooks)
|
||||
return withHooks(ctx, ic.sqlSave, ic.mutation, ic.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
@@ -900,11 +900,15 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
// ItemCreateBulk is the builder for creating many Item entities in bulk.
|
||||
type ItemCreateBulk struct {
|
||||
config
|
||||
err error
|
||||
builders []*ItemCreate
|
||||
}
|
||||
|
||||
// Save creates the Item entities in the database.
|
||||
func (icb *ItemCreateBulk) Save(ctx context.Context) ([]*Item, error) {
|
||||
if icb.err != nil {
|
||||
return nil, icb.err
|
||||
}
|
||||
specs := make([]*sqlgraph.CreateSpec, len(icb.builders))
|
||||
nodes := make([]*Item, len(icb.builders))
|
||||
mutators := make([]Mutator, len(icb.builders))
|
||||
@@ -921,8 +925,8 @@ func (icb *ItemCreateBulk) Save(ctx context.Context) ([]*Item, error) {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
var err error
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, icb.builders[i+1].mutation)
|
||||
} else {
|
||||
|
||||
@@ -27,7 +27,7 @@ func (id *ItemDelete) Where(ps ...predicate.Item) *ItemDelete {
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (id *ItemDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks[int, ItemMutation](ctx, id.sqlExec, id.mutation, id.hooks)
|
||||
return withHooks(ctx, id.sqlExec, id.mutation, id.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
|
||||
@@ -26,7 +26,7 @@ import (
|
||||
type ItemQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []OrderFunc
|
||||
order []item.OrderOption
|
||||
inters []Interceptor
|
||||
predicates []predicate.Item
|
||||
withGroup *GroupQuery
|
||||
@@ -69,7 +69,7 @@ func (iq *ItemQuery) Unique(unique bool) *ItemQuery {
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (iq *ItemQuery) Order(o ...OrderFunc) *ItemQuery {
|
||||
func (iq *ItemQuery) Order(o ...item.OrderOption) *ItemQuery {
|
||||
iq.order = append(iq.order, o...)
|
||||
return iq
|
||||
}
|
||||
@@ -439,7 +439,7 @@ func (iq *ItemQuery) Clone() *ItemQuery {
|
||||
return &ItemQuery{
|
||||
config: iq.config,
|
||||
ctx: iq.ctx.Clone(),
|
||||
order: append([]OrderFunc{}, iq.order...),
|
||||
order: append([]item.OrderOption{}, iq.order...),
|
||||
inters: append([]Interceptor{}, iq.inters...),
|
||||
predicates: append([]predicate.Item{}, iq.predicates...),
|
||||
withGroup: iq.withGroup.Clone(),
|
||||
@@ -790,7 +790,7 @@ func (iq *ItemQuery) loadChildren(ctx context.Context, query *ItemQuery, nodes [
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.Item(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(item.ChildrenColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(item.ChildrenColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -803,7 +803,7 @@ func (iq *ItemQuery) loadChildren(ctx context.Context, query *ItemQuery, nodes [
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "item_children" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "item_children" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
@@ -914,7 +914,7 @@ func (iq *ItemQuery) loadFields(ctx context.Context, query *ItemFieldQuery, node
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.ItemField(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(item.FieldsColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(item.FieldsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -927,7 +927,7 @@ func (iq *ItemQuery) loadFields(ctx context.Context, query *ItemFieldQuery, node
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "item_fields" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "item_fields" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
@@ -943,8 +943,11 @@ func (iq *ItemQuery) loadMaintenanceEntries(ctx context.Context, query *Maintena
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
if len(query.ctx.Fields) > 0 {
|
||||
query.ctx.AppendFieldOnce(maintenanceentry.FieldItemID)
|
||||
}
|
||||
query.Where(predicate.MaintenanceEntry(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(item.MaintenanceEntriesColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(item.MaintenanceEntriesColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -954,7 +957,7 @@ func (iq *ItemQuery) loadMaintenanceEntries(ctx context.Context, query *Maintena
|
||||
fk := n.ItemID
|
||||
node, ok := nodeids[fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "item_id" returned %v for node %v`, fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "item_id" returned %v for node %v`, fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
@@ -972,7 +975,7 @@ func (iq *ItemQuery) loadAttachments(ctx context.Context, query *AttachmentQuery
|
||||
}
|
||||
query.withFKs = true
|
||||
query.Where(predicate.Attachment(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(item.AttachmentsColumn, fks...))
|
||||
s.Where(sql.InValues(s.C(item.AttachmentsColumn), fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
@@ -985,7 +988,7 @@ func (iq *ItemQuery) loadAttachments(ctx context.Context, query *AttachmentQuery
|
||||
}
|
||||
node, ok := nodeids[*fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "item_attachments" returned %v for node %v`, *fk, n.ID)
|
||||
return fmt.Errorf(`unexpected referenced foreign-key "item_attachments" returned %v for node %v`, *fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
|
||||
@@ -47,6 +47,14 @@ func (iu *ItemUpdate) SetName(s string) *ItemUpdate {
|
||||
return iu
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (iu *ItemUpdate) SetNillableName(s *string) *ItemUpdate {
|
||||
if s != nil {
|
||||
iu.SetName(*s)
|
||||
}
|
||||
return iu
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (iu *ItemUpdate) SetDescription(s string) *ItemUpdate {
|
||||
iu.mutation.SetDescription(s)
|
||||
@@ -688,7 +696,7 @@ func (iu *ItemUpdate) RemoveAttachments(a ...*Attachment) *ItemUpdate {
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (iu *ItemUpdate) Save(ctx context.Context) (int, error) {
|
||||
iu.defaults()
|
||||
return withHooks[int, ItemMutation](ctx, iu.sqlSave, iu.mutation, iu.hooks)
|
||||
return withHooks(ctx, iu.sqlSave, iu.mutation, iu.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
@@ -1247,6 +1255,14 @@ func (iuo *ItemUpdateOne) SetName(s string) *ItemUpdateOne {
|
||||
return iuo
|
||||
}
|
||||
|
||||
// SetNillableName sets the "name" field if the given value is not nil.
|
||||
func (iuo *ItemUpdateOne) SetNillableName(s *string) *ItemUpdateOne {
|
||||
if s != nil {
|
||||
iuo.SetName(*s)
|
||||
}
|
||||
return iuo
|
||||
}
|
||||
|
||||
// SetDescription sets the "description" field.
|
||||
func (iuo *ItemUpdateOne) SetDescription(s string) *ItemUpdateOne {
|
||||
iuo.mutation.SetDescription(s)
|
||||
@@ -1901,7 +1917,7 @@ func (iuo *ItemUpdateOne) Select(field string, fields ...string) *ItemUpdateOne
|
||||
// Save executes the query and returns the updated Item entity.
|
||||
func (iuo *ItemUpdateOne) Save(ctx context.Context) (*Item, error) {
|
||||
iuo.defaults()
|
||||
return withHooks[*Item, ItemMutation](ctx, iuo.sqlSave, iuo.mutation, iuo.hooks)
|
||||
return withHooks(ctx, iuo.sqlSave, iuo.mutation, iuo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
|
||||
@@ -38,8 +39,9 @@ type ItemField struct {
|
||||
TimeValue time.Time `json:"time_value,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the ItemFieldQuery when eager-loading is set.
|
||||
Edges ItemFieldEdges `json:"edges"`
|
||||
item_fields *uuid.UUID
|
||||
Edges ItemFieldEdges `json:"edges"`
|
||||
item_fields *uuid.UUID
|
||||
selectValues sql.SelectValues
|
||||
}
|
||||
|
||||
// ItemFieldEdges holds the relations/edges for other nodes in the graph.
|
||||
@@ -82,7 +84,7 @@ func (*ItemField) scanValues(columns []string) ([]any, error) {
|
||||
case itemfield.ForeignKeys[0]: // item_fields
|
||||
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type ItemField", columns[i])
|
||||
values[i] = new(sql.UnknownType)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
@@ -163,11 +165,19 @@ func (_if *ItemField) assignValues(columns []string, values []any) error {
|
||||
_if.item_fields = new(uuid.UUID)
|
||||
*_if.item_fields = *value.S.(*uuid.UUID)
|
||||
}
|
||||
default:
|
||||
_if.selectValues.Set(columns[i], values[i])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Value returns the ent.Value that was dynamically selected and assigned to the ItemField.
|
||||
// This includes values selected through modifiers, order, etc.
|
||||
func (_if *ItemField) Value(name string) (ent.Value, error) {
|
||||
return _if.selectValues.Get(name)
|
||||
}
|
||||
|
||||
// QueryItem queries the "item" edge of the ItemField entity.
|
||||
func (_if *ItemField) QueryItem() *ItemQuery {
|
||||
return NewItemFieldClient(_if.config).QueryItem(_if)
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
@@ -125,3 +127,70 @@ func TypeValidator(_type Type) error {
|
||||
return fmt.Errorf("itemfield: invalid enum value for type field: %q", _type)
|
||||
}
|
||||
}
|
||||
|
||||
// OrderOption defines the ordering options for the ItemField queries.
|
||||
type OrderOption func(*sql.Selector)
|
||||
|
||||
// ByID orders the results by the id field.
|
||||
func ByID(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldID, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByCreatedAt orders the results by the created_at field.
|
||||
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByUpdatedAt orders the results by the updated_at field.
|
||||
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByName orders the results by the name field.
|
||||
func ByName(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldName, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByDescription orders the results by the description field.
|
||||
func ByDescription(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldDescription, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByType orders the results by the type field.
|
||||
func ByType(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldType, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTextValue orders the results by the text_value field.
|
||||
func ByTextValue(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTextValue, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByNumberValue orders the results by the number_value field.
|
||||
func ByNumberValue(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldNumberValue, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByBooleanValue orders the results by the boolean_value field.
|
||||
func ByBooleanValue(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldBooleanValue, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByTimeValue orders the results by the time_value field.
|
||||
func ByTimeValue(opts ...sql.OrderTermOption) OrderOption {
|
||||
return sql.OrderByField(FieldTimeValue, opts...).ToFunc()
|
||||
}
|
||||
|
||||
// ByItemField orders the results by item field.
|
||||
func ByItemField(field string, opts ...sql.OrderTermOption) OrderOption {
|
||||
return func(s *sql.Selector) {
|
||||
sqlgraph.OrderByNeighborTerms(s, newItemStep(), sql.OrderByField(field, opts...))
|
||||
}
|
||||
}
|
||||
func newItemStep() *sqlgraph.Step {
|
||||
return sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(ItemInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -525,11 +525,7 @@ func HasItem() predicate.ItemField {
|
||||
// HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates).
|
||||
func HasItemWith(preds ...predicate.Item) predicate.ItemField {
|
||||
return predicate.ItemField(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(ItemInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
|
||||
)
|
||||
step := newItemStep()
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
@@ -540,32 +536,15 @@ func HasItemWith(preds ...predicate.Item) predicate.ItemField {
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.ItemField) predicate.ItemField {
|
||||
return predicate.ItemField(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for _, p := range predicates {
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.ItemField(sql.AndPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.ItemField) predicate.ItemField {
|
||||
return predicate.ItemField(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for i, p := range predicates {
|
||||
if i > 0 {
|
||||
s1.Or()
|
||||
}
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
return predicate.ItemField(sql.OrPredicates(predicates...))
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.ItemField) predicate.ItemField {
|
||||
return predicate.ItemField(func(s *sql.Selector) {
|
||||
p(s.Not())
|
||||
})
|
||||
return predicate.ItemField(sql.NotPredicates(p))
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user