mirror of
https://github.com/sysadminsmedia/homebox.git
synced 2026-01-01 02:27:32 +01:00
Compare commits
36 Commits
feat/new-n
...
fix/variou
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0e48256b77 | ||
|
|
c41a43d804 | ||
|
|
3ed72daa64 | ||
|
|
564b8a2725 | ||
|
|
4a9bcde9ea | ||
|
|
e1e04d49aa | ||
|
|
9020587c9e | ||
|
|
bd0db1ea37 | ||
|
|
d2985ff72c | ||
|
|
8c57ff841e | ||
|
|
0264bfb8c1 | ||
|
|
5dd6844536 | ||
|
|
0f8db862b4 | ||
|
|
be6b5c9c56 | ||
|
|
faed343eda | ||
|
|
ed1230e17d | ||
|
|
2d768e2b9c | ||
|
|
40e76bac0c | ||
|
|
840d220d4f | ||
|
|
975e636fb6 | ||
|
|
d1076baf84 | ||
|
|
40fcef4e9b | ||
|
|
97fb94d231 | ||
|
|
4a8ba6231d | ||
|
|
db80f8a159 | ||
|
|
184b494fc3 | ||
|
|
5a3fa23332 | ||
|
|
ef0690d511 | ||
|
|
9e55c880f6 | ||
|
|
cb9b20e2d2 | ||
|
|
a79e780b4e | ||
|
|
90cbb9bfd1 | ||
|
|
dc08dbbd7a | ||
|
|
1f47d96e4c | ||
|
|
23b5892aef | ||
|
|
2665b666f1 |
4
.github/workflows/partial-backend.yaml
vendored
4
.github/workflows/partial-backend.yaml
vendored
@@ -10,9 +10,9 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: "1.20"
|
||||
|
||||
- name: Install Task
|
||||
uses: arduino/setup-task@v1
|
||||
|
||||
4
.github/workflows/partial-frontend.yaml
vendored
4
.github/workflows/partial-frontend.yaml
vendored
@@ -44,9 +44,9 @@ jobs:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: "1.20"
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
|
||||
4
.github/workflows/partial-publish.yaml
vendored
4
.github/workflows/partial-publish.yaml
vendored
@@ -22,9 +22,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.19
|
||||
go-version: "1.20"
|
||||
|
||||
- name: Set up QEMU
|
||||
id: qemu
|
||||
|
||||
19
.github/workflows/publish.yaml
vendored
19
.github/workflows/publish.yaml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Build Nightly
|
||||
name: Publish Dockers
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -12,20 +12,9 @@ env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
|
||||
jobs:
|
||||
backend-tests:
|
||||
name: "Backend Server Tests"
|
||||
uses: hay-kot/homebox/.github/workflows/partial-backend.yaml@main
|
||||
|
||||
frontend-tests:
|
||||
name: "Frontend and End-to-End Tests"
|
||||
uses: hay-kot/homebox/.github/workflows/partial-frontend.yaml@main
|
||||
|
||||
deploy:
|
||||
name: "Deploy Nightly to Fly.io"
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- backend-tests
|
||||
- frontend-tests
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: superfly/flyctl-actions/setup-flyctl@master
|
||||
@@ -34,9 +23,6 @@ jobs:
|
||||
publish-nightly:
|
||||
name: "Publish Nightly"
|
||||
if: github.event_name != 'release'
|
||||
needs:
|
||||
- backend-tests
|
||||
- frontend-tests
|
||||
uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
|
||||
with:
|
||||
tag: nightly
|
||||
@@ -46,9 +32,6 @@ jobs:
|
||||
publish-tag:
|
||||
name: "Publish Tag"
|
||||
if: github.event_name == 'release'
|
||||
needs:
|
||||
- backend-tests
|
||||
- frontend-tests
|
||||
uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
|
||||
with:
|
||||
release: true
|
||||
|
||||
2
.github/workflows/pull-requests.yaml
vendored
2
.github/workflows/pull-requests.yaml
vendored
@@ -12,4 +12,4 @@ jobs:
|
||||
|
||||
frontend-tests:
|
||||
name: "Frontend and End-to-End Tests"
|
||||
uses: ./.github/workflows/partial-frontend.yaml
|
||||
uses: ./.github/workflows/partial-frontend.yaml
|
||||
51
.github/workflows/tag.yaml
vendored
Normal file
51
.github/workflows/tag.yaml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Publish Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- v*
|
||||
|
||||
env:
|
||||
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
|
||||
|
||||
jobs:
|
||||
backend-tests:
|
||||
name: "Backend Server Tests"
|
||||
uses: hay-kot/homebox/.github/workflows/partial-backend.yaml@main
|
||||
|
||||
frontend-tests:
|
||||
name: "Frontend and End-to-End Tests"
|
||||
uses: hay-kot/homebox/.github/workflows/partial-frontend.yaml@main
|
||||
|
||||
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
|
||||
- uses: pnpm/action-setup@v2
|
||||
with:
|
||||
version: 7.30.1
|
||||
|
||||
- name: Build Frontend and Copy to Backend
|
||||
working-directory: frontend
|
||||
run: |
|
||||
pnpm install --shamefully-hoist
|
||||
pnpm run build
|
||||
cp -r ./.output/public ../backend/app/api/static/
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
workdir: "backend"
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -48,4 +48,9 @@ dist
|
||||
|
||||
.pnpm-store
|
||||
backend/app/api/app
|
||||
backend/app/api/__debug_bin
|
||||
backend/app/api/__debug_bin
|
||||
dist/
|
||||
|
||||
# Nuxt Publish Dir
|
||||
backend/app/api/static/public/*
|
||||
!backend/app/api/static/public/.gitkeep
|
||||
33
.scaffold/model/scaffold.yaml
Normal file
33
.scaffold/model/scaffold.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
# yaml-language-server: $schema=https://hay-kot.github.io/scaffold/schema.json
|
||||
messages:
|
||||
pre: |
|
||||
# Ent Model Generation
|
||||
|
||||
With Boilerplate!
|
||||
post: |
|
||||
Complete!
|
||||
|
||||
questions:
|
||||
- name: "model"
|
||||
prompt:
|
||||
message: "What is the name of the model? (PascalCase)"
|
||||
required: true
|
||||
|
||||
- name: "by_group"
|
||||
prompt:
|
||||
confirm: "Include a Group Edge? (group_id -> id)"
|
||||
required: true
|
||||
|
||||
rewrites:
|
||||
- from: 'templates/model.go'
|
||||
to: 'backend/internal/data/ent/schema/{{ lower .Scaffold.model }}.go'
|
||||
|
||||
inject:
|
||||
- name: "Insert Groups Edge"
|
||||
path: 'backend/internal/data/ent/schema/group.go'
|
||||
at: // $scaffold_edge
|
||||
template: |
|
||||
{{- if .Scaffold.by_group -}}
|
||||
owned("{{ lower .Scaffold.model }}s", {{ .Scaffold.model }}.Type),
|
||||
{{- end -}}
|
||||
40
.scaffold/model/templates/model.go
Normal file
40
.scaffold/model/templates/model.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package schema
|
||||
|
||||
import (
|
||||
"entgo.io/ent"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins"
|
||||
)
|
||||
|
||||
type {{ .Scaffold.model }} struct {
|
||||
ent.Schema
|
||||
}
|
||||
|
||||
func ({{ .Scaffold.model }}) Mixin() []ent.Mixin {
|
||||
return []ent.Mixin{
|
||||
mixins.BaseMixin{},
|
||||
{{- if .Scaffold.by_group }}
|
||||
GroupMixin{ref: "{{ snakecase .Scaffold.model }}s"},
|
||||
{{- end }}
|
||||
}
|
||||
}
|
||||
|
||||
// Fields of the {{ .Scaffold.model }}.
|
||||
func ({{ .Scaffold.model }}) Fields() []ent.Field {
|
||||
return []ent.Field{
|
||||
// field.String("name").
|
||||
}
|
||||
}
|
||||
|
||||
// Edges of the {{ .Scaffold.model }}.
|
||||
func ({{ .Scaffold.model }}) Edges() []ent.Edge {
|
||||
return []ent.Edge{
|
||||
// edge.From("group", Group.Type).
|
||||
}
|
||||
}
|
||||
|
||||
func ({{ .Scaffold.model }}) Indexes() []ent.Index {
|
||||
return []ent.Index{
|
||||
// index.Fields("token"),
|
||||
}
|
||||
}
|
||||
@@ -48,4 +48,10 @@ start command `task: ui:dev`
|
||||
|
||||
1. The frontend is a Vue 3 app with Nuxt.js that uses Tailwind and DaisyUI for styling.
|
||||
2. We're using Vitest for our automated testing. you can run these with `task ui:watch`.
|
||||
3. Tests require the API server to be running and in some cases the first run will fail due to a race condition. If this happens just run the tests again and they should pass.
|
||||
3. Tests require the API server to be running and in some cases the first run will fail due to a race condition. If this happens just run the tests again and they should pass.
|
||||
|
||||
## Publishing Release
|
||||
|
||||
Create a new tag in github with the version number vX.X.X. This will trigger a new release to be created.
|
||||
|
||||
Test -> Goreleaser -> Publish Release -> Trigger Docker Builds -> Deploy Docs + Fly.io Demo
|
||||
@@ -22,7 +22,7 @@ COPY ./backend .
|
||||
RUN go get -d -v ./...
|
||||
RUN rm -rf ./app/api/public
|
||||
COPY --from=frontend-builder /app/.output/public ./app/api/static/public
|
||||
RUN CGO_ENABLED=1 GOOS=linux go build \
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build \
|
||||
-ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \
|
||||
-o /go/bin/api \
|
||||
-v ./app/api/*.go
|
||||
|
||||
11
README.md
11
README.md
@@ -16,10 +16,13 @@
|
||||
[Configuration & Docker Compose](https://hay-kot.github.io/homebox/quick-start)
|
||||
|
||||
```bash
|
||||
docker run --name=homebox \
|
||||
--restart=always \
|
||||
--publish=3100:7745 \
|
||||
ghcr.io/hay-kot/homebox:latest
|
||||
docker run -d \
|
||||
--name homebox \
|
||||
--restart unless-stopped \
|
||||
--publish 3100:7745 \
|
||||
--env TZ=Europe/Bucharest \
|
||||
--volume /path/to/data/folder/:/data \
|
||||
ghcr.io/hay-kot/homebox:latest
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
49
Taskfile.yml
49
Taskfile.yml
@@ -1,7 +1,7 @@
|
||||
version: "3"
|
||||
|
||||
env:
|
||||
HBOX_STORAGE_SQLITE_URL: .data/homebox.db?_fk=1
|
||||
HBOX_STORAGE_SQLITE_URL: .data/homebox.db?_pragma=busy_timeout=1000&_pragma=journal_mode=WAL&_fk=1
|
||||
HBOX_OPTIONS_ALLOW_REGISTRATION: true
|
||||
UNSAFE_DISABLE_PASSWORD_PROJECTION: "yes_i_am_sure"
|
||||
tasks:
|
||||
@@ -27,46 +27,47 @@ tasks:
|
||||
--modular \
|
||||
--path ./backend/app/api/static/docs/swagger.json \
|
||||
--output ./frontend/lib/api/types
|
||||
- go run ./scripts/process-types/*.go ./frontend/lib/api/types/data-contracts.ts
|
||||
- go run ./backend/app/tools/typegen/main.go ./frontend/lib/api/types/data-contracts.ts
|
||||
- cp ./backend/app/api/static/docs/swagger.json docs/docs/api/openapi-2.0.json
|
||||
sources:
|
||||
- "./backend/app/api/**/*"
|
||||
- "./backend/internal/data/**"
|
||||
- "./backend/internal/services/**/*"
|
||||
- "./scripts/process-types.py"
|
||||
generates:
|
||||
- "./frontend/lib/api/types/data-contracts.ts"
|
||||
- "./backend/internal/data/ent/schema"
|
||||
- "./backend/app/api/static/docs/swagger.json"
|
||||
- "./backend/app/api/static/docs/swagger.yaml"
|
||||
- "./backend/internal/core/services/**/*"
|
||||
- "./backend/app/tools/typegen/main.go"
|
||||
|
||||
go:run:
|
||||
desc: Starts the backend api server (depends on generate task)
|
||||
dir: backend
|
||||
deps:
|
||||
- generate
|
||||
cmds:
|
||||
- cd backend && go run ./app/api/ {{ .CLI_ARGS }}
|
||||
- go run ./app/api/ {{ .CLI_ARGS }}
|
||||
silent: false
|
||||
|
||||
go:test:
|
||||
desc: Runs all go tests using gotestsum - supports passing gotestsum args
|
||||
dir: backend
|
||||
cmds:
|
||||
- cd backend && gotestsum {{ .CLI_ARGS }} ./...
|
||||
- gotestsum {{ .CLI_ARGS }} ./...
|
||||
|
||||
go:coverage:
|
||||
desc: Runs all go tests with -race flag and generates a coverage report
|
||||
dir: backend
|
||||
cmds:
|
||||
- cd backend && go test -race -coverprofile=coverage.out -covermode=atomic ./app/... ./internal/... ./pkgs/... -v -cover
|
||||
- go test -race -coverprofile=coverage.out -covermode=atomic ./app/... ./internal/... ./pkgs/... -v -cover
|
||||
silent: true
|
||||
|
||||
go:tidy:
|
||||
desc: Runs go mod tidy on the backend
|
||||
dir: backend
|
||||
cmds:
|
||||
- cd backend && go mod tidy
|
||||
- go mod tidy
|
||||
|
||||
go:lint:
|
||||
desc: Runs golangci-lint
|
||||
dir: backend
|
||||
cmds:
|
||||
- cd backend && golangci-lint run ./...
|
||||
- golangci-lint run ./...
|
||||
|
||||
go:all:
|
||||
desc: Runs all go test and lint related tasks
|
||||
@@ -77,19 +78,19 @@ tasks:
|
||||
|
||||
go:build:
|
||||
desc: Builds the backend binary
|
||||
dir: backend
|
||||
cmds:
|
||||
- cd backend && go build -o ../build/backend ./app/api
|
||||
- go build -o ../build/backend ./app/api
|
||||
|
||||
db:generate:
|
||||
desc: Run Entgo.io Code Generation
|
||||
dir: backend/internal/
|
||||
cmds:
|
||||
- |
|
||||
cd backend/internal/ && go generate ./... \
|
||||
go generate ./... \
|
||||
--template=./data/ent/schema/templates/has_id.tmpl
|
||||
sources:
|
||||
- "./backend/internal/data/ent/schema/**/*"
|
||||
generates:
|
||||
- "./backend/internal/ent/"
|
||||
|
||||
db:migration:
|
||||
desc: Runs the database diff engine to generate a SQL migration files
|
||||
@@ -100,23 +101,27 @@ tasks:
|
||||
|
||||
ui:watch:
|
||||
desc: Starts the vitest test runner in watch mode
|
||||
dir: frontend
|
||||
cmds:
|
||||
- cd frontend && pnpm run test:watch
|
||||
- pnpm run test:watch
|
||||
|
||||
ui:dev:
|
||||
desc: Run frontend development server
|
||||
dir: frontend
|
||||
cmds:
|
||||
- cd frontend && pnpm dev
|
||||
- pnpm dev
|
||||
|
||||
ui:fix:
|
||||
desc: Runs prettier and eslint on the frontend
|
||||
dir: frontend
|
||||
cmds:
|
||||
- cd frontend && pnpm run lint:fix
|
||||
- pnpm run lint:fix
|
||||
|
||||
ui:check:
|
||||
desc: Runs type checking
|
||||
dir: frontend
|
||||
cmds:
|
||||
- cd frontend && pnpm run typecheck
|
||||
- pnpm run typecheck
|
||||
|
||||
test:ci:
|
||||
desc: Runs end-to-end test on a live server (only for use in CI)
|
||||
|
||||
2
backend/.gitignore
vendored
Normal file
2
backend/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
|
||||
dist/
|
||||
54
backend/.goreleaser.yaml
Normal file
54
backend/.goreleaser.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
# This is an example .goreleaser.yml file with some sensible defaults.
|
||||
# Make sure to check the documentation at https://goreleaser.com
|
||||
before:
|
||||
hooks:
|
||||
# you may remove this if you don't need go generate
|
||||
- go generate ./...
|
||||
builds:
|
||||
- main: ./app/api
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- amd64
|
||||
- "386"
|
||||
- arm
|
||||
- arm64
|
||||
ignore:
|
||||
- goos: windows
|
||||
goarch: arm
|
||||
- goos: windows
|
||||
goarch: "386"
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
# this name template makes the OS and Arch compatible with the results of uname.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
checksum:
|
||||
name_template: 'checksums.txt'
|
||||
snapshot:
|
||||
name_template: "{{ incpatch .Version }}-next"
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- '^docs:'
|
||||
- '^test:'
|
||||
|
||||
# The lines beneath this are called `modelines`. See `:help modeline`
|
||||
# Feel free to remove those if you don't want/use them.
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/config"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/mailer"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
)
|
||||
|
||||
type app struct {
|
||||
@@ -37,8 +37,11 @@ func new(conf *config.Config) *app {
|
||||
}
|
||||
|
||||
func (a *app) startBgTask(t time.Duration, fn func()) {
|
||||
timer := time.NewTimer(t)
|
||||
|
||||
for {
|
||||
timer.Reset(t)
|
||||
a.server.Background(fn)
|
||||
time.Sleep(t)
|
||||
<-timer.C
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ func (a *app) SetupDemo() {
|
||||
}
|
||||
|
||||
// First check if we've already setup a demo user and skip if so
|
||||
_, err := a.services.User.Login(context.Background(), registration.Email, registration.Password)
|
||||
_, err := a.services.User.Login(context.Background(), registration.Email, registration.Password, false)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
@@ -36,7 +36,7 @@ func (a *app) SetupDemo() {
|
||||
log.Fatal().Msg("Failed to setup demo")
|
||||
}
|
||||
|
||||
token, _ := a.services.User.Login(context.Background(), registration.Email, registration.Password)
|
||||
token, _ := a.services.User.Login(context.Background(), registration.Email, registration.Password, false)
|
||||
self, _ := a.services.User.GetSelf(context.Background(), token.Raw)
|
||||
|
||||
_, err = a.services.Items.CsvImport(context.Background(), self.GroupID, strings.NewReader(csvText))
|
||||
|
||||
@@ -5,9 +5,26 @@ import (
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
)
|
||||
|
||||
type Results[T any] struct {
|
||||
Items []T `json:"items"`
|
||||
}
|
||||
|
||||
func WrapResults[T any](items []T) Results[T] {
|
||||
return Results[T]{Items: items}
|
||||
}
|
||||
|
||||
type Wrapped struct {
|
||||
Item interface{} `json:"item"`
|
||||
}
|
||||
|
||||
func Wrap(v any) Wrapped {
|
||||
return Wrapped{Item: v}
|
||||
}
|
||||
|
||||
func WithMaxUploadSize(maxUploadSize int64) func(*V1Controller) {
|
||||
return func(ctrl *V1Controller) {
|
||||
ctrl.maxUploadSize = maxUploadSize
|
||||
@@ -75,17 +92,18 @@ func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, options ..
|
||||
}
|
||||
|
||||
// HandleBase godoc
|
||||
// @Summary Retrieves the basic information about the API
|
||||
// @Tags Base
|
||||
// @Produce json
|
||||
// @Success 200 {object} ApiSummary
|
||||
// @Router /v1/status [GET]
|
||||
func (ctrl *V1Controller) HandleBase(ready ReadyFunc, build Build) server.HandlerFunc {
|
||||
//
|
||||
// @Summary Application Info
|
||||
// @Tags Base
|
||||
// @Produce json
|
||||
// @Success 200 {object} ApiSummary
|
||||
// @Router /v1/status [GET]
|
||||
func (ctrl *V1Controller) HandleBase(ready ReadyFunc, build Build) errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
return server.Respond(w, http.StatusOK, ApiSummary{
|
||||
return server.JSON(w, http.StatusOK, ApiSummary{
|
||||
Healthy: ready(),
|
||||
Title: "Go API Template",
|
||||
Message: "Welcome to the Go API Template Application!",
|
||||
Title: "Homebox",
|
||||
Message: "Track, Manage, and Organize your Things",
|
||||
Build: build,
|
||||
Demo: ctrl.isDemo,
|
||||
AllowRegistration: ctrl.allowRegistration,
|
||||
|
||||
@@ -21,7 +21,7 @@ func (ctrl *V1Controller) routeID(r *http.Request) (uuid.UUID, error) {
|
||||
func (ctrl *V1Controller) routeUUID(r *http.Request, key string) (uuid.UUID, error) {
|
||||
ID, err := uuid.Parse(chi.URLParam(r, key))
|
||||
if err != nil {
|
||||
return uuid.Nil, validate.NewInvalidRouteKeyError(key)
|
||||
return uuid.Nil, validate.NewRouteKeyError(key)
|
||||
}
|
||||
return ID, nil
|
||||
}
|
||||
|
||||
@@ -7,7 +7,8 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -15,7 +16,7 @@ type ActionAmountResult struct {
|
||||
Completed int `json:"completed"`
|
||||
}
|
||||
|
||||
func actionHandlerFactory(ref string, fn func(context.Context, uuid.UUID) (int, error)) server.HandlerFunc {
|
||||
func actionHandlerFactory(ref string, fn func(context.Context, uuid.UUID) (int, error)) errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
|
||||
@@ -25,39 +26,45 @@ func actionHandlerFactory(ref string, fn func(context.Context, uuid.UUID) (int,
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, ActionAmountResult{Completed: totalCompleted})
|
||||
return server.JSON(w, http.StatusOK, ActionAmountResult{Completed: totalCompleted})
|
||||
}
|
||||
}
|
||||
|
||||
// HandleGroupInvitationsCreate godoc
|
||||
// @Summary Ensures all items in the database have an asset id
|
||||
// @Tags Group
|
||||
// @Produce json
|
||||
// @Success 200 {object} ActionAmountResult
|
||||
// @Router /v1/actions/ensure-asset-ids [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleEnsureAssetID() server.HandlerFunc {
|
||||
// HandleEnsureAssetID godoc
|
||||
//
|
||||
// @Summary Ensure Asset IDs
|
||||
// @Description Ensures all items in the database have an asset ID
|
||||
// @Tags Actions
|
||||
// @Produce json
|
||||
// @Success 200 {object} ActionAmountResult
|
||||
// @Router /v1/actions/ensure-asset-ids [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleEnsureAssetID() errchain.HandlerFunc {
|
||||
return actionHandlerFactory("ensure asset IDs", ctrl.svc.Items.EnsureAssetID)
|
||||
}
|
||||
|
||||
// HandleEnsureImportRefs godoc
|
||||
// @Summary Ensures all items in the database have an import ref
|
||||
// @Tags Group
|
||||
// @Produce json
|
||||
// @Success 200 {object} ActionAmountResult
|
||||
// @Router /v1/actions/ensure-import-refs [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleEnsureImportRefs() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Ensures Import Refs
|
||||
// @Description Ensures all items in the database have an import ref
|
||||
// @Tags Actions
|
||||
// @Produce json
|
||||
// @Success 200 {object} ActionAmountResult
|
||||
// @Router /v1/actions/ensure-import-refs [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleEnsureImportRefs() errchain.HandlerFunc {
|
||||
return actionHandlerFactory("ensure import refs", ctrl.svc.Items.EnsureImportRef)
|
||||
}
|
||||
|
||||
// HandleItemDateZeroOut godoc
|
||||
// @Summary Resets all item date fields to the beginning of the day
|
||||
// @Tags Group
|
||||
// @Produce json
|
||||
// @Success 200 {object} ActionAmountResult
|
||||
// @Router /v1/actions/zero-item-time-fields [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemDateZeroOut() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Zero Out Time Fields
|
||||
// @Description Resets all item date fields to the beginning of the day
|
||||
// @Tags Actions
|
||||
// @Produce json
|
||||
// @Success 200 {object} ActionAmountResult
|
||||
// @Router /v1/actions/zero-item-time-fields [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemDateZeroOut() errchain.HandlerFunc {
|
||||
return actionHandlerFactory("zero out date time", ctrl.repo.Items.ZeroOutTimeFields)
|
||||
}
|
||||
|
||||
@@ -9,20 +9,22 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// HandleItemGet godocs
|
||||
// @Summary Gets an item by Asset ID
|
||||
// @Tags Assets
|
||||
// @Produce json
|
||||
// @Param id path string true "Asset ID"
|
||||
// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
|
||||
// @Router /v1/assets/{id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc {
|
||||
// HandleAssetGet godocs
|
||||
//
|
||||
// @Summary Get Item by Asset ID
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param id path string true "Asset ID"
|
||||
// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
|
||||
// @Router /v1/assets/{id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleAssetGet() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
assetIdParam := chi.URLParam(r, "id")
|
||||
@@ -37,7 +39,7 @@ func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc {
|
||||
if pageParam != "" {
|
||||
page, err = strconv.ParseInt(pageParam, 10, 64)
|
||||
if err != nil {
|
||||
return server.Respond(w, http.StatusBadRequest, "Invalid page number")
|
||||
return server.JSON(w, http.StatusBadRequest, "Invalid page number")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,7 +48,7 @@ func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc {
|
||||
if pageSizeParam != "" {
|
||||
pageSize, err = strconv.ParseInt(pageSizeParam, 10, 64)
|
||||
if err != nil {
|
||||
return server.Respond(w, http.StatusBadRequest, "Invalid page size")
|
||||
return server.JSON(w, http.StatusBadRequest, "Invalid page size")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +57,6 @@ func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc {
|
||||
log.Err(err).Msg("failed to get item")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, items)
|
||||
return server.JSON(w, http.StatusOK, items)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -20,41 +21,46 @@ type (
|
||||
}
|
||||
|
||||
LoginForm struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
StayLoggedIn bool `json:"stayLoggedIn"`
|
||||
}
|
||||
)
|
||||
|
||||
// HandleAuthLogin godoc
|
||||
// @Summary User Login
|
||||
// @Tags Authentication
|
||||
// @Accept x-www-form-urlencoded
|
||||
// @Accept application/json
|
||||
// @Param username formData string false "string" example(admin@admin.com)
|
||||
// @Param password formData string false "string" example(admin)
|
||||
// @Produce json
|
||||
// @Success 200 {object} TokenResponse
|
||||
// @Router /v1/users/login [POST]
|
||||
func (ctrl *V1Controller) HandleAuthLogin() server.HandlerFunc {
|
||||
//
|
||||
// @Summary User Login
|
||||
// @Tags Authentication
|
||||
// @Accept x-www-form-urlencoded
|
||||
// @Accept application/json
|
||||
// @Param username formData string false "string" example(admin@admin.com)
|
||||
// @Param password formData string false "string" example(admin)
|
||||
// @Param payload body LoginForm true "Login Data"
|
||||
// @Produce json
|
||||
// @Success 200 {object} TokenResponse
|
||||
// @Router /v1/users/login [POST]
|
||||
func (ctrl *V1Controller) HandleAuthLogin() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
loginForm := &LoginForm{}
|
||||
|
||||
switch r.Header.Get("Content-Type") {
|
||||
case server.ContentFormUrlEncoded:
|
||||
case "application/x-www-form-urlencoded":
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
return server.Respond(w, http.StatusBadRequest, server.Wrap(err))
|
||||
return errors.New("failed to parse form")
|
||||
}
|
||||
|
||||
loginForm.Username = r.PostFormValue("username")
|
||||
loginForm.Password = r.PostFormValue("password")
|
||||
case server.ContentJSON:
|
||||
loginForm.StayLoggedIn = r.PostFormValue("stayLoggedIn") == "true"
|
||||
case "application/json":
|
||||
err := server.Decode(r, loginForm)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to decode login form")
|
||||
return errors.New("failed to decode login form")
|
||||
}
|
||||
default:
|
||||
return server.Respond(w, http.StatusBadRequest, errors.New("invalid content type"))
|
||||
return server.JSON(w, http.StatusBadRequest, errors.New("invalid content type"))
|
||||
}
|
||||
|
||||
if loginForm.Username == "" || loginForm.Password == "" {
|
||||
@@ -70,12 +76,12 @@ func (ctrl *V1Controller) HandleAuthLogin() server.HandlerFunc {
|
||||
)
|
||||
}
|
||||
|
||||
newToken, err := ctrl.svc.User.Login(r.Context(), strings.ToLower(loginForm.Username), loginForm.Password)
|
||||
newToken, err := ctrl.svc.User.Login(r.Context(), strings.ToLower(loginForm.Username), loginForm.Password, loginForm.StayLoggedIn)
|
||||
if err != nil {
|
||||
return validate.NewRequestError(errors.New("authentication failed"), http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, TokenResponse{
|
||||
return server.JSON(w, http.StatusOK, TokenResponse{
|
||||
Token: "Bearer " + newToken.Raw,
|
||||
ExpiresAt: newToken.ExpiresAt,
|
||||
AttachmentToken: newToken.AttachmentToken,
|
||||
@@ -84,12 +90,13 @@ func (ctrl *V1Controller) HandleAuthLogin() server.HandlerFunc {
|
||||
}
|
||||
|
||||
// HandleAuthLogout godoc
|
||||
// @Summary User Logout
|
||||
// @Tags Authentication
|
||||
// @Success 204
|
||||
// @Router /v1/users/logout [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleAuthLogout() server.HandlerFunc {
|
||||
//
|
||||
// @Summary User Logout
|
||||
// @Tags Authentication
|
||||
// @Success 204
|
||||
// @Router /v1/users/logout [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleAuthLogout() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
token := services.UseTokenCtx(r.Context())
|
||||
if token == "" {
|
||||
@@ -101,19 +108,20 @@ func (ctrl *V1Controller) HandleAuthLogout() server.HandlerFunc {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
return server.JSON(w, http.StatusNoContent, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleAuthLogout godoc
|
||||
// @Summary User Token Refresh
|
||||
// @Description handleAuthRefresh returns a handler that will issue a new token from an existing token.
|
||||
// @Description This does not validate that the user still exists within the database.
|
||||
// @Tags Authentication
|
||||
// @Success 200
|
||||
// @Router /v1/users/refresh [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleAuthRefresh() server.HandlerFunc {
|
||||
//
|
||||
// @Summary User Token Refresh
|
||||
// @Description handleAuthRefresh returns a handler that will issue a new token from an existing token.
|
||||
// @Description This does not validate that the user still exists within the database.
|
||||
// @Tags Authentication
|
||||
// @Success 200
|
||||
// @Router /v1/users/refresh [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleAuthRefresh() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
requestToken := services.UseTokenCtx(r.Context())
|
||||
if requestToken == "" {
|
||||
@@ -125,6 +133,6 @@ func (ctrl *V1Controller) HandleAuthRefresh() server.HandlerFunc {
|
||||
return validate.NewUnauthorizedError()
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, newToken)
|
||||
return server.JSON(w, http.StatusOK, newToken)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,14 +6,13 @@ import (
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
)
|
||||
|
||||
type (
|
||||
GroupInvitationCreate struct {
|
||||
Uses int `json:"uses"`
|
||||
Uses int `json:"uses" validate:"required,min=1,max=100"`
|
||||
ExpiresAt time.Time `json:"expiresAt"`
|
||||
}
|
||||
|
||||
@@ -25,93 +24,65 @@ type (
|
||||
)
|
||||
|
||||
// HandleGroupGet godoc
|
||||
// @Summary Get the current user's group
|
||||
// @Tags Group
|
||||
// @Produce json
|
||||
// @Success 200 {object} repo.Group
|
||||
// @Router /v1/groups [Get]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupGet() server.HandlerFunc {
|
||||
return ctrl.handleGroupGeneral()
|
||||
//
|
||||
// @Summary Get Group
|
||||
// @Tags Group
|
||||
// @Produce json
|
||||
// @Success 200 {object} repo.Group
|
||||
// @Router /v1/groups [Get]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupGet() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request) (repo.Group, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Groups.GroupByID(auth, auth.GID)
|
||||
}
|
||||
|
||||
return adapters.Command(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleGroupUpdate godoc
|
||||
// @Summary Updates some fields of the current users group
|
||||
// @Tags Group
|
||||
// @Produce json
|
||||
// @Param payload body repo.GroupUpdate true "User Data"
|
||||
// @Success 200 {object} repo.Group
|
||||
// @Router /v1/groups [Put]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupUpdate() server.HandlerFunc {
|
||||
return ctrl.handleGroupGeneral()
|
||||
}
|
||||
|
||||
func (ctrl *V1Controller) handleGroupGeneral() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
group, err := ctrl.repo.Groups.GroupByID(ctx, ctx.GID)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to get group")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, group)
|
||||
|
||||
case http.MethodPut:
|
||||
data := repo.GroupUpdate{}
|
||||
if err := server.Decode(r, &data); err != nil {
|
||||
return validate.NewRequestError(err, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
group, err := ctrl.svc.Group.UpdateGroup(ctx, data)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to update group")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, group)
|
||||
}
|
||||
|
||||
return nil
|
||||
//
|
||||
// @Summary Update Group
|
||||
// @Tags Group
|
||||
// @Produce json
|
||||
// @Param payload body repo.GroupUpdate true "User Data"
|
||||
// @Success 200 {object} repo.Group
|
||||
// @Router /v1/groups [Put]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupUpdate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, body repo.GroupUpdate) (repo.Group, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.svc.Group.UpdateGroup(auth, body)
|
||||
}
|
||||
|
||||
return adapters.Action(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleGroupInvitationsCreate godoc
|
||||
// @Summary Get the current user
|
||||
// @Tags Group
|
||||
// @Produce json
|
||||
// @Param payload body GroupInvitationCreate true "User Data"
|
||||
// @Success 200 {object} GroupInvitation
|
||||
// @Router /v1/groups/invitations [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupInvitationsCreate() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
data := GroupInvitationCreate{}
|
||||
if err := server.Decode(r, &data); err != nil {
|
||||
log.Err(err).Msg("failed to decode user registration data")
|
||||
return validate.NewRequestError(err, http.StatusBadRequest)
|
||||
//
|
||||
// @Summary Create Group Invitation
|
||||
// @Tags Group
|
||||
// @Produce json
|
||||
// @Param payload body GroupInvitationCreate true "User Data"
|
||||
// @Success 200 {object} GroupInvitation
|
||||
// @Router /v1/groups/invitations [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupInvitationsCreate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, body GroupInvitationCreate) (GroupInvitation, error) {
|
||||
if body.ExpiresAt.IsZero() {
|
||||
body.ExpiresAt = time.Now().Add(time.Hour * 24)
|
||||
}
|
||||
|
||||
if data.ExpiresAt.IsZero() {
|
||||
data.ExpiresAt = time.Now().Add(time.Hour * 24)
|
||||
}
|
||||
auth := services.NewContext(r.Context())
|
||||
|
||||
ctx := services.NewContext(r.Context())
|
||||
token, err := ctrl.svc.Group.NewInvitation(auth, body.Uses, body.ExpiresAt)
|
||||
|
||||
token, err := ctrl.svc.Group.NewInvitation(ctx, data.Uses, data.ExpiresAt)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to create new token")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusCreated, GroupInvitation{
|
||||
return GroupInvitation{
|
||||
Token: token,
|
||||
ExpiresAt: data.ExpiresAt,
|
||||
Uses: data.Uses,
|
||||
})
|
||||
ExpiresAt: body.ExpiresAt,
|
||||
Uses: body.Uses,
|
||||
}, err
|
||||
}
|
||||
|
||||
return adapters.Action(fn, http.StatusCreated)
|
||||
}
|
||||
|
||||
@@ -7,26 +7,30 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// HandleItemsGetAll godoc
|
||||
// @Summary Get All Items
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param q query string false "search string"
|
||||
// @Param page query int false "page number"
|
||||
// @Param pageSize query int false "items per page"
|
||||
// @Param labels query []string false "label Ids" collectionFormat(multi)
|
||||
// @Param locations query []string false "location Ids" collectionFormat(multi)
|
||||
// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
|
||||
// @Router /v1/items [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Query All Items
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param q query string false "search string"
|
||||
// @Param page query int false "page number"
|
||||
// @Param pageSize query int false "items per page"
|
||||
// @Param labels query []string false "label Ids" collectionFormat(multi)
|
||||
// @Param locations query []string false "location Ids" collectionFormat(multi)
|
||||
// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
|
||||
// @Router /v1/items [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemsGetAll() errchain.HandlerFunc {
|
||||
extractQuery := func(r *http.Request) repo.ItemQuery {
|
||||
params := r.URL.Query()
|
||||
|
||||
@@ -75,174 +79,144 @@ func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc {
|
||||
items, err := ctrl.repo.Items.QueryByGroup(ctx, ctx.GID, extractQuery(r))
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return server.Respond(w, http.StatusOK, repo.PaginationResult[repo.ItemSummary]{
|
||||
return server.JSON(w, http.StatusOK, repo.PaginationResult[repo.ItemSummary]{
|
||||
Items: []repo.ItemSummary{},
|
||||
})
|
||||
}
|
||||
log.Err(err).Msg("failed to get items")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, items)
|
||||
return server.JSON(w, http.StatusOK, items)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleItemsCreate godoc
|
||||
// @Summary Create a new item
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param payload body repo.ItemCreate true "Item Data"
|
||||
// @Success 200 {object} repo.ItemSummary
|
||||
// @Router /v1/items [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemsCreate() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
createData := repo.ItemCreate{}
|
||||
if err := server.Decode(r, &createData); err != nil {
|
||||
log.Err(err).Msg("failed to decode request body")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
ctx := services.NewContext(r.Context())
|
||||
item, err := ctrl.svc.Items.Create(ctx, createData)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to create item")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusCreated, item)
|
||||
//
|
||||
// @Summary Create Item
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param payload body repo.ItemCreate true "Item Data"
|
||||
// @Success 201 {object} repo.ItemSummary
|
||||
// @Router /v1/items [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemsCreate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, body repo.ItemCreate) (repo.ItemOut, error) {
|
||||
return ctrl.svc.Items.Create(services.NewContext(r.Context()), body)
|
||||
}
|
||||
|
||||
return adapters.Action(fn, http.StatusCreated)
|
||||
}
|
||||
|
||||
// HandleItemGet godocs
|
||||
// @Summary Gets a item and fields
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param id path string true "Item ID"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Router /v1/items/{id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemGet() server.HandlerFunc {
|
||||
return ctrl.handleItemsGeneral()
|
||||
//
|
||||
// @Summary Get Item
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param id path string true "Item ID"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Router /v1/items/{id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemGet() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID) (repo.ItemOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
|
||||
return ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID)
|
||||
}
|
||||
|
||||
return adapters.CommandID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleItemDelete godocs
|
||||
// @Summary deletes a item
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param id path string true "Item ID"
|
||||
// @Success 204
|
||||
// @Router /v1/items/{id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemDelete() server.HandlerFunc {
|
||||
return ctrl.handleItemsGeneral()
|
||||
//
|
||||
// @Summary Delete Item
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param id path string true "Item ID"
|
||||
// @Success 204
|
||||
// @Router /v1/items/{id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemDelete() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID) (any, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
err := ctrl.repo.Items.DeleteByGroup(auth, auth.GID, ID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return adapters.CommandID("id", fn, http.StatusNoContent)
|
||||
}
|
||||
|
||||
// HandleItemUpdate godocs
|
||||
// @Summary updates a item
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param payload body repo.ItemUpdate true "Item Data"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Router /v1/items/{id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemUpdate() server.HandlerFunc {
|
||||
return ctrl.handleItemsGeneral()
|
||||
}
|
||||
//
|
||||
// @Summary Update Item
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param payload body repo.ItemUpdate true "Item Data"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Router /v1/items/{id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemUpdate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID, body repo.ItemUpdate) (repo.ItemOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
|
||||
func (ctrl *V1Controller) handleItemsGeneral() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
ID, err := ctrl.routeID(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
items, err := ctrl.repo.Items.GetOneByGroup(r.Context(), ctx.GID, ID)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to get item")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, items)
|
||||
case http.MethodDelete:
|
||||
err = ctrl.repo.Items.DeleteByGroup(r.Context(), ctx.GID, ID)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to delete item")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
case http.MethodPut:
|
||||
body := repo.ItemUpdate{}
|
||||
if err := server.Decode(r, &body); err != nil {
|
||||
log.Err(err).Msg("failed to decode request body")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
body.ID = ID
|
||||
result, err := ctrl.repo.Items.UpdateByGroup(r.Context(), ctx.GID, body)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to update item")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
return nil
|
||||
body.ID = ID
|
||||
return ctrl.repo.Items.UpdateByGroup(auth, auth.GID, body)
|
||||
}
|
||||
|
||||
return adapters.ActionID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleGetAllCustomFieldNames godocs
|
||||
// @Summary imports items into the database
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Success 200
|
||||
// @Router /v1/items/fields [GET]
|
||||
// @Success 200 {object} []string
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGetAllCustomFieldNames() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
|
||||
v, err := ctrl.repo.Items.GetAllCustomFieldNames(r.Context(), ctx.GID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, v)
|
||||
//
|
||||
// @Summary Get All Custom Field Names
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Success 200
|
||||
// @Router /v1/items/fields [GET]
|
||||
// @Success 200 {object} []string
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGetAllCustomFieldNames() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request) ([]string, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Items.GetAllCustomFieldNames(auth, auth.GID)
|
||||
}
|
||||
|
||||
return adapters.Command(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleGetAllCustomFieldValues godocs
|
||||
// @Summary imports items into the database
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Success 200
|
||||
// @Router /v1/items/fields/values [GET]
|
||||
// @Success 200 {object} []string
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGetAllCustomFieldValues() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
|
||||
v, err := ctrl.repo.Items.GetAllCustomFieldValues(r.Context(), ctx.GID, r.URL.Query().Get("field"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, v)
|
||||
//
|
||||
// @Summary Get All Custom Field Values
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Success 200
|
||||
// @Router /v1/items/fields/values [GET]
|
||||
// @Success 200 {object} []string
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGetAllCustomFieldValues() errchain.HandlerFunc {
|
||||
type query struct {
|
||||
Field string `schema:"field" validate:"required"`
|
||||
}
|
||||
|
||||
fn := func(r *http.Request, q query) ([]string, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Items.GetAllCustomFieldValues(auth, auth.GID, q.Field)
|
||||
}
|
||||
|
||||
return adapters.Action(fn, http.StatusOK)
|
||||
|
||||
}
|
||||
|
||||
// HandleItemsImport godocs
|
||||
// @Summary imports items into the database
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Success 204
|
||||
// @Param csv formData file true "Image to upload"
|
||||
// @Router /v1/items/import [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemsImport() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Import Items
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Success 204
|
||||
// @Param csv formData file true "Image to upload"
|
||||
// @Router /v1/items/import [Post]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemsImport() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
err := r.ParseMultipartForm(ctrl.maxUploadSize << 20)
|
||||
if err != nil {
|
||||
@@ -264,17 +238,18 @@ func (ctrl *V1Controller) HandleItemsImport() server.HandlerFunc {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
return server.JSON(w, http.StatusNoContent, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleItemsImport godocs
|
||||
// @Summary exports items into the database
|
||||
// @Tags Items
|
||||
// @Success 200 {string} string "text/csv"
|
||||
// @Router /v1/items/export [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemsExport() server.HandlerFunc {
|
||||
// HandleItemsExport godocs
|
||||
//
|
||||
// @Summary Export Items
|
||||
// @Tags Items
|
||||
// @Success 200 {string} string "text/csv"
|
||||
// @Router /v1/items/export [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemsExport() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
|
||||
@@ -286,7 +261,9 @@ func (ctrl *V1Controller) HandleItemsExport() server.HandlerFunc {
|
||||
|
||||
w.Header().Set("Content-Type", "text/tsv")
|
||||
w.Header().Set("Content-Disposition", "attachment;filename=homebox-items.tsv")
|
||||
|
||||
writer := csv.NewWriter(w)
|
||||
writer.Comma = '\t'
|
||||
return writer.WriteAll(csvData)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
@@ -18,19 +19,20 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
// HandleItemsImport godocs
|
||||
// @Summary imports items into the database
|
||||
// @Tags Items Attachments
|
||||
// @Produce json
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param file formData file true "File attachment"
|
||||
// @Param type formData string true "Type of file"
|
||||
// @Param name formData string true "name of the file including extension"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Failure 422 {object} server.ErrorResponse
|
||||
// @Router /v1/items/{id}/attachments [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
|
||||
// HandleItemAttachmentCreate godocs
|
||||
//
|
||||
// @Summary Create Item Attachment
|
||||
// @Tags Items Attachments
|
||||
// @Produce json
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param file formData file true "File attachment"
|
||||
// @Param type formData string true "Type of file"
|
||||
// @Param name formData string true "name of the file including extension"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Failure 422 {object} mid.ErrorResponse
|
||||
// @Router /v1/items/{id}/attachments [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemAttachmentCreate() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
err := r.ParseMultipartForm(ctrl.maxUploadSize << 20)
|
||||
if err != nil {
|
||||
@@ -60,7 +62,7 @@ func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
|
||||
}
|
||||
|
||||
if !errs.Nil() {
|
||||
return server.Respond(w, http.StatusUnprocessableEntity, errs)
|
||||
return server.JSON(w, http.StatusUnprocessableEntity, errs)
|
||||
}
|
||||
|
||||
attachmentType := r.FormValue("type")
|
||||
@@ -87,45 +89,48 @@ func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusCreated, item)
|
||||
return server.JSON(w, http.StatusCreated, item)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleItemAttachmentGet godocs
|
||||
// @Summary retrieves an attachment for an item
|
||||
// @Tags Items Attachments
|
||||
// @Produce application/octet-stream
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param attachment_id path string true "Attachment ID"
|
||||
// @Success 200 {object} ItemAttachmentToken
|
||||
// @Router /v1/items/{id}/attachments/{attachment_id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemAttachmentGet() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Get Item Attachment
|
||||
// @Tags Items Attachments
|
||||
// @Produce application/octet-stream
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param attachment_id path string true "Attachment ID"
|
||||
// @Success 200 {object} ItemAttachmentToken
|
||||
// @Router /v1/items/{id}/attachments/{attachment_id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemAttachmentGet() errchain.HandlerFunc {
|
||||
return ctrl.handleItemAttachmentsHandler
|
||||
}
|
||||
|
||||
// HandleItemAttachmentDelete godocs
|
||||
// @Summary retrieves an attachment for an item
|
||||
// @Tags Items Attachments
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param attachment_id path string true "Attachment ID"
|
||||
// @Success 204
|
||||
// @Router /v1/items/{id}/attachments/{attachment_id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemAttachmentDelete() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Delete Item Attachment
|
||||
// @Tags Items Attachments
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param attachment_id path string true "Attachment ID"
|
||||
// @Success 204
|
||||
// @Router /v1/items/{id}/attachments/{attachment_id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemAttachmentDelete() errchain.HandlerFunc {
|
||||
return ctrl.handleItemAttachmentsHandler
|
||||
}
|
||||
|
||||
// HandleItemAttachmentUpdate godocs
|
||||
// @Summary retrieves an attachment for an item
|
||||
// @Tags Items Attachments
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param attachment_id path string true "Attachment ID"
|
||||
// @Param payload body repo.ItemAttachmentUpdate true "Attachment Update"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Router /v1/items/{id}/attachments/{attachment_id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemAttachmentUpdate() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Update Item Attachment
|
||||
// @Tags Items Attachments
|
||||
// @Param id path string true "Item ID"
|
||||
// @Param attachment_id path string true "Attachment ID"
|
||||
// @Param payload body repo.ItemAttachmentUpdate true "Attachment Update"
|
||||
// @Success 200 {object} repo.ItemOut
|
||||
// @Router /v1/items/{id}/attachments/{attachment_id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleItemAttachmentUpdate() errchain.HandlerFunc {
|
||||
return ctrl.handleItemAttachmentsHandler
|
||||
}
|
||||
|
||||
@@ -160,7 +165,7 @@ func (ctrl *V1Controller) handleItemAttachmentsHandler(w http.ResponseWriter, r
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
return server.JSON(w, http.StatusNoContent, nil)
|
||||
|
||||
// Update Attachment Handler
|
||||
case http.MethodPut:
|
||||
@@ -178,7 +183,7 @@ func (ctrl *V1Controller) handleItemAttachmentsHandler(w http.ResponseWriter, r
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, val)
|
||||
return server.JSON(w, http.StatusOK, val)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -3,141 +3,100 @@ package v1
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
)
|
||||
|
||||
// HandleLabelsGetAll godoc
|
||||
// @Summary Get All Labels
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Success 200 {object} server.Results{items=[]repo.LabelOut}
|
||||
// @Router /v1/labels [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelsGetAll() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
user := services.UseUserCtx(r.Context())
|
||||
labels, err := ctrl.repo.Labels.GetAll(r.Context(), user.GroupID)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("error getting labels")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, server.Results{Items: labels})
|
||||
//
|
||||
// @Summary Get All Labels
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Success 200 {object} []repo.LabelOut
|
||||
// @Router /v1/labels [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelsGetAll() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request) ([]repo.LabelSummary, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Labels.GetAll(auth, auth.GID)
|
||||
}
|
||||
|
||||
return adapters.Command(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleLabelsCreate godoc
|
||||
// @Summary Create a new label
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Param payload body repo.LabelCreate true "Label Data"
|
||||
// @Success 200 {object} repo.LabelSummary
|
||||
// @Router /v1/labels [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelsCreate() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
createData := repo.LabelCreate{}
|
||||
if err := server.Decode(r, &createData); err != nil {
|
||||
log.Err(err).Msg("error decoding label create data")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
user := services.UseUserCtx(r.Context())
|
||||
label, err := ctrl.repo.Labels.Create(r.Context(), user.GroupID, createData)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("error creating label")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusCreated, label)
|
||||
//
|
||||
// @Summary Create Label
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Param payload body repo.LabelCreate true "Label Data"
|
||||
// @Success 200 {object} repo.LabelSummary
|
||||
// @Router /v1/labels [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelsCreate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, data repo.LabelCreate) (repo.LabelOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Labels.Create(auth, auth.GID, data)
|
||||
}
|
||||
|
||||
return adapters.Action(fn, http.StatusCreated)
|
||||
}
|
||||
|
||||
// HandleLabelDelete godocs
|
||||
// @Summary deletes a label
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Param id path string true "Label ID"
|
||||
// @Success 204
|
||||
// @Router /v1/labels/{id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelDelete() server.HandlerFunc {
|
||||
return ctrl.handleLabelsGeneral()
|
||||
//
|
||||
// @Summary Delete Label
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Param id path string true "Label ID"
|
||||
// @Success 204
|
||||
// @Router /v1/labels/{id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelDelete() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID) (any, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
err := ctrl.repo.Labels.DeleteByGroup(auth, auth.GID, ID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return adapters.CommandID("id", fn, http.StatusNoContent)
|
||||
}
|
||||
|
||||
// HandleLabelGet godocs
|
||||
// @Summary Gets a label and fields
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Param id path string true "Label ID"
|
||||
// @Success 200 {object} repo.LabelOut
|
||||
// @Router /v1/labels/{id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelGet() server.HandlerFunc {
|
||||
return ctrl.handleLabelsGeneral()
|
||||
//
|
||||
// @Summary Get Label
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Param id path string true "Label ID"
|
||||
// @Success 200 {object} repo.LabelOut
|
||||
// @Router /v1/labels/{id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelGet() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID) (repo.LabelOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Labels.GetOneByGroup(auth, auth.GID, ID)
|
||||
}
|
||||
|
||||
return adapters.CommandID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleLabelUpdate godocs
|
||||
// @Summary updates a label
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Param id path string true "Label ID"
|
||||
// @Success 200 {object} repo.LabelOut
|
||||
// @Router /v1/labels/{id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelUpdate() server.HandlerFunc {
|
||||
return ctrl.handleLabelsGeneral()
|
||||
}
|
||||
|
||||
func (ctrl *V1Controller) handleLabelsGeneral() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
ID, err := ctrl.routeID(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
labels, err := ctrl.repo.Labels.GetOneByGroup(r.Context(), ctx.GID, ID)
|
||||
if err != nil {
|
||||
if ent.IsNotFound(err) {
|
||||
log.Err(err).
|
||||
Str("id", ID.String()).
|
||||
Msg("label not found")
|
||||
return validate.NewRequestError(err, http.StatusNotFound)
|
||||
}
|
||||
log.Err(err).Msg("error getting label")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, labels)
|
||||
|
||||
case http.MethodDelete:
|
||||
err = ctrl.repo.Labels.DeleteByGroup(ctx, ctx.GID, ID)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("error deleting label")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
|
||||
case http.MethodPut:
|
||||
body := repo.LabelUpdate{}
|
||||
if err := server.Decode(r, &body); err != nil {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
body.ID = ID
|
||||
result, err := ctrl.repo.Labels.UpdateByGroup(ctx, ctx.GID, body)
|
||||
if err != nil {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, result)
|
||||
}
|
||||
|
||||
return nil
|
||||
//
|
||||
// @Summary Update Label
|
||||
// @Tags Labels
|
||||
// @Produce json
|
||||
// @Param id path string true "Label ID"
|
||||
// @Success 200 {object} repo.LabelOut
|
||||
// @Router /v1/labels/{id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLabelUpdate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID, data repo.LabelUpdate) (repo.LabelOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
data.ID = ID
|
||||
return ctrl.repo.Labels.UpdateByGroup(auth, auth.GID, data)
|
||||
}
|
||||
|
||||
return adapters.ActionID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
@@ -3,186 +3,120 @@ package v1
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
)
|
||||
|
||||
// HandleLocationTreeQuery godoc
|
||||
// @Summary Get All Locations
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param withItems query bool false "include items in response tree"
|
||||
// @Success 200 {object} server.Results{items=[]repo.TreeItem}
|
||||
// @Router /v1/locations/tree [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationTreeQuery() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
user := services.UseUserCtx(r.Context())
|
||||
|
||||
q := r.URL.Query()
|
||||
|
||||
withItems := queryBool(q.Get("withItems"))
|
||||
|
||||
locTree, err := ctrl.repo.Locations.Tree(
|
||||
r.Context(),
|
||||
user.GroupID,
|
||||
repo.TreeQuery{
|
||||
WithItems: withItems,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to get locations tree")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, server.Results{Items: locTree})
|
||||
// HandleLocationTreeQuery
|
||||
//
|
||||
// @Summary Get Locations Tree
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param withItems query bool false "include items in response tree"
|
||||
// @Success 200 {object} []repo.TreeItem
|
||||
// @Router /v1/locations/tree [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationTreeQuery() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, query repo.TreeQuery) ([]repo.TreeItem, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Locations.Tree(auth, auth.GID, query)
|
||||
}
|
||||
|
||||
return adapters.Query(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleLocationGetAll godoc
|
||||
// @Summary Get All Locations
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param filterChildren query bool false "Filter locations with parents"
|
||||
// @Success 200 {object} server.Results{items=[]repo.LocationOutCount}
|
||||
// @Router /v1/locations [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationGetAll() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
user := services.UseUserCtx(r.Context())
|
||||
|
||||
q := r.URL.Query()
|
||||
|
||||
filter := repo.LocationQuery{
|
||||
FilterChildren: queryBool(q.Get("filterChildren")),
|
||||
}
|
||||
|
||||
locations, err := ctrl.repo.Locations.GetAll(r.Context(), user.GroupID, filter)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to get locations")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, server.Results{Items: locations})
|
||||
// HandleLocationGetAll
|
||||
//
|
||||
// @Summary Get All Locations
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param filterChildren query bool false "Filter locations with parents"
|
||||
// @Success 200 {object} []repo.LocationOutCount
|
||||
// @Router /v1/locations [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationGetAll() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, q repo.LocationQuery) ([]repo.LocationOutCount, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Locations.GetAll(auth, auth.GID, q)
|
||||
}
|
||||
|
||||
return adapters.Query(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleLocationCreate godoc
|
||||
// @Summary Create a new location
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param payload body repo.LocationCreate true "Location Data"
|
||||
// @Success 200 {object} repo.LocationSummary
|
||||
// @Router /v1/locations [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationCreate() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
createData := repo.LocationCreate{}
|
||||
if err := server.Decode(r, &createData); err != nil {
|
||||
log.Err(err).Msg("failed to decode location create data")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
user := services.UseUserCtx(r.Context())
|
||||
location, err := ctrl.repo.Locations.Create(r.Context(), user.GroupID, createData)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to create location")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusCreated, location)
|
||||
// HandleLocationCreate
|
||||
//
|
||||
// @Summary Create Location
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param payload body repo.LocationCreate true "Location Data"
|
||||
// @Success 200 {object} repo.LocationSummary
|
||||
// @Router /v1/locations [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationCreate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, createData repo.LocationCreate) (repo.LocationOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Locations.Create(auth, auth.GID, createData)
|
||||
}
|
||||
|
||||
return adapters.Action(fn, http.StatusCreated)
|
||||
}
|
||||
|
||||
// HandleLocationDelete godocs
|
||||
// @Summary deletes a location
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param id path string true "Location ID"
|
||||
// @Success 204
|
||||
// @Router /v1/locations/{id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationDelete() server.HandlerFunc {
|
||||
return ctrl.handleLocationGeneral()
|
||||
}
|
||||
|
||||
// HandleLocationGet godocs
|
||||
// @Summary Gets a location and fields
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param id path string true "Location ID"
|
||||
// @Success 200 {object} repo.LocationOut
|
||||
// @Router /v1/locations/{id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationGet() server.HandlerFunc {
|
||||
return ctrl.handleLocationGeneral()
|
||||
}
|
||||
|
||||
// HandleLocationUpdate godocs
|
||||
// @Summary updates a location
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param id path string true "Location ID"
|
||||
// @Param payload body repo.LocationUpdate true "Location Data"
|
||||
// @Success 200 {object} repo.LocationOut
|
||||
// @Router /v1/locations/{id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationUpdate() server.HandlerFunc {
|
||||
return ctrl.handleLocationGeneral()
|
||||
}
|
||||
|
||||
func (ctrl *V1Controller) handleLocationGeneral() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
ID, err := ctrl.routeID(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
location, err := ctrl.repo.Locations.GetOneByGroup(r.Context(), ctx.GID, ID)
|
||||
if err != nil {
|
||||
l := log.Err(err).
|
||||
Str("ID", ID.String()).
|
||||
Str("GID", ctx.GID.String())
|
||||
|
||||
if ent.IsNotFound(err) {
|
||||
l.Msg("location not found")
|
||||
return validate.NewRequestError(err, http.StatusNotFound)
|
||||
}
|
||||
|
||||
l.Msg("failed to get location")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, location)
|
||||
case http.MethodPut:
|
||||
body := repo.LocationUpdate{}
|
||||
if err := server.Decode(r, &body); err != nil {
|
||||
log.Err(err).Msg("failed to decode location update data")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
body.ID = ID
|
||||
|
||||
result, err := ctrl.repo.Locations.UpdateOneByGroup(r.Context(), ctx.GID, ID, body)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to update location")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, result)
|
||||
case http.MethodDelete:
|
||||
err = ctrl.repo.Locations.DeleteByGroup(r.Context(), ctx.GID, ID)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to delete location")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
}
|
||||
return nil
|
||||
// HandleLocationDelete
|
||||
//
|
||||
// @Summary Delete Location
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param id path string true "Location ID"
|
||||
// @Success 204
|
||||
// @Router /v1/locations/{id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationDelete() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID) (any, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
err := ctrl.repo.Locations.DeleteByGroup(auth, auth.GID, ID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return adapters.CommandID("id", fn, http.StatusNoContent)
|
||||
}
|
||||
|
||||
// HandleLocationGet
|
||||
//
|
||||
// @Summary Get Location
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param id path string true "Location ID"
|
||||
// @Success 200 {object} repo.LocationOut
|
||||
// @Router /v1/locations/{id} [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationGet() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID) (repo.LocationOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Locations.GetOneByGroup(auth, auth.GID, ID)
|
||||
}
|
||||
|
||||
return adapters.CommandID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleLocationUpdate
|
||||
//
|
||||
// @Summary Update Location
|
||||
// @Tags Locations
|
||||
// @Produce json
|
||||
// @Param id path string true "Location ID"
|
||||
// @Param payload body repo.LocationUpdate true "Location Data"
|
||||
// @Success 200 {object} repo.LocationOut
|
||||
// @Router /v1/locations/{id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleLocationUpdate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID, body repo.LocationUpdate) (repo.LocationOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
body.ID = ID
|
||||
return ctrl.repo.Locations.UpdateByGroup(auth, auth.GID, ID, body)
|
||||
}
|
||||
|
||||
return adapters.ActionID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
@@ -2,132 +2,81 @@ package v1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
)
|
||||
|
||||
// HandleMaintenanceGetLog godoc
|
||||
// @Summary Get Maintenance Log
|
||||
// @Tags Maintenance
|
||||
// @Produce json
|
||||
// @Success 200 {object} repo.MaintenanceLog
|
||||
// @Router /v1/items/{id}/maintenance [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleMaintenanceLogGet() server.HandlerFunc {
|
||||
return ctrl.handleMaintenanceLog()
|
||||
//
|
||||
// @Summary Get Maintenance Log
|
||||
// @Tags Maintenance
|
||||
// @Produce json
|
||||
// @Success 200 {object} repo.MaintenanceLog
|
||||
// @Router /v1/items/{id}/maintenance [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleMaintenanceLogGet() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID, q repo.MaintenanceLogQuery) (repo.MaintenanceLog, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.MaintEntry.GetLog(auth, auth.GID, ID, q)
|
||||
}
|
||||
|
||||
return adapters.QueryID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleMaintenanceEntryCreate godoc
|
||||
// @Summary Create Maintenance Entry
|
||||
// @Tags Maintenance
|
||||
// @Produce json
|
||||
// @Param payload body repo.MaintenanceEntryCreate true "Entry Data"
|
||||
// @Success 200 {object} repo.MaintenanceEntry
|
||||
// @Router /v1/items/{id}/maintenance [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleMaintenanceEntryCreate() server.HandlerFunc {
|
||||
return ctrl.handleMaintenanceLog()
|
||||
//
|
||||
// @Summary Create Maintenance Entry
|
||||
// @Tags Maintenance
|
||||
// @Produce json
|
||||
// @Param payload body repo.MaintenanceEntryCreate true "Entry Data"
|
||||
// @Success 201 {object} repo.MaintenanceEntry
|
||||
// @Router /v1/items/{id}/maintenance [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleMaintenanceEntryCreate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, itemID uuid.UUID, body repo.MaintenanceEntryCreate) (repo.MaintenanceEntry, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.MaintEntry.Create(auth, itemID, body)
|
||||
}
|
||||
|
||||
return adapters.ActionID("id", fn, http.StatusCreated)
|
||||
}
|
||||
|
||||
// HandleMaintenanceEntryDelete godoc
|
||||
// @Summary Delete Maintenance Entry
|
||||
// @Tags Maintenance
|
||||
// @Produce json
|
||||
// @Success 204
|
||||
// @Router /v1/items/{id}/maintenance/{entry_id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleMaintenanceEntryDelete() server.HandlerFunc {
|
||||
return ctrl.handleMaintenanceLog()
|
||||
//
|
||||
// @Summary Delete Maintenance Entry
|
||||
// @Tags Maintenance
|
||||
// @Produce json
|
||||
// @Success 204
|
||||
// @Router /v1/items/{id}/maintenance/{entry_id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleMaintenanceEntryDelete() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, entryID uuid.UUID) (any, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
err := ctrl.repo.MaintEntry.Delete(auth, entryID)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return adapters.CommandID("entry_id", fn, http.StatusNoContent)
|
||||
}
|
||||
|
||||
// HandleMaintenanceEntryUpdate godoc
|
||||
// @Summary Update Maintenance Entry
|
||||
// @Tags Maintenance
|
||||
// @Produce json
|
||||
// @Param payload body repo.MaintenanceEntryUpdate true "Entry Data"
|
||||
// @Success 200 {object} repo.MaintenanceEntry
|
||||
// @Router /v1/items/{id}/maintenance/{entry_id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleMaintenanceEntryUpdate() server.HandlerFunc {
|
||||
return ctrl.handleMaintenanceLog()
|
||||
}
|
||||
|
||||
func (ctrl *V1Controller) handleMaintenanceLog() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
itemID, err := ctrl.routeID(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch r.Method {
|
||||
case http.MethodGet:
|
||||
completed, _ := strconv.ParseBool(r.URL.Query().Get("completed"))
|
||||
scheduled, _ := strconv.ParseBool(r.URL.Query().Get("scheduled"))
|
||||
query := repo.MaintenanceLogQuery{
|
||||
Completed: completed,
|
||||
Scheduled: scheduled,
|
||||
}
|
||||
|
||||
mlog, err := ctrl.repo.MaintEntry.GetLog(ctx, itemID, query)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to get items")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
return server.Respond(w, http.StatusOK, mlog)
|
||||
case http.MethodPost:
|
||||
var create repo.MaintenanceEntryCreate
|
||||
err := server.Decode(r, &create)
|
||||
if err != nil {
|
||||
return validate.NewRequestError(err, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
entry, err := ctrl.repo.MaintEntry.Create(ctx, itemID, create)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to create item")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusCreated, entry)
|
||||
case http.MethodPut:
|
||||
entryID, err := ctrl.routeUUID(r, "entry_id")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var update repo.MaintenanceEntryUpdate
|
||||
err = server.Decode(r, &update)
|
||||
if err != nil {
|
||||
return validate.NewRequestError(err, http.StatusBadRequest)
|
||||
}
|
||||
|
||||
entry, err := ctrl.repo.MaintEntry.Update(ctx, entryID, update)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to update item")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, entry)
|
||||
case http.MethodDelete:
|
||||
entryID, err := ctrl.routeUUID(r, "entry_id")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = ctrl.repo.MaintEntry.Delete(ctx, entryID)
|
||||
if err != nil {
|
||||
log.Err(err).Msg("failed to delete item")
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
//
|
||||
// @Summary Update Maintenance Entry
|
||||
// @Tags Maintenance
|
||||
// @Produce json
|
||||
// @Param payload body repo.MaintenanceEntryUpdate true "Entry Data"
|
||||
// @Success 200 {object} repo.MaintenanceEntry
|
||||
// @Router /v1/items/{id}/maintenance/{entry_id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleMaintenanceEntryUpdate() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, entryID uuid.UUID, body repo.MaintenanceEntryUpdate) (repo.MaintenanceEntry, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.MaintEntry.Update(auth, entryID, body)
|
||||
}
|
||||
|
||||
return adapters.ActionID("entry_id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
105
backend/app/api/handlers/v1/v1_ctrl_notifiers.go
Normal file
105
backend/app/api/handlers/v1/v1_ctrl_notifiers.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/containrrr/shoutrrr"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
)
|
||||
|
||||
// HandleGetUserNotifiers godoc
|
||||
//
|
||||
// @Summary Get Notifiers
|
||||
// @Tags Notifiers
|
||||
// @Produce json
|
||||
// @Success 200 {object} []repo.NotifierOut
|
||||
// @Router /v1/notifiers [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGetUserNotifiers() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, _ struct{}) ([]repo.NotifierOut, error) {
|
||||
user := services.UseUserCtx(r.Context())
|
||||
return ctrl.repo.Notifiers.GetByUser(r.Context(), user.ID)
|
||||
}
|
||||
|
||||
return adapters.Query(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleCreateNotifier godoc
|
||||
//
|
||||
// @Summary Create Notifier
|
||||
// @Tags Notifiers
|
||||
// @Produce json
|
||||
// @Param payload body repo.NotifierCreate true "Notifier Data"
|
||||
// @Success 200 {object} repo.NotifierOut
|
||||
// @Router /v1/notifiers [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleCreateNotifier() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, in repo.NotifierCreate) (repo.NotifierOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Notifiers.Create(auth, auth.GID, auth.UID, in)
|
||||
}
|
||||
|
||||
return adapters.Action(fn, http.StatusCreated)
|
||||
}
|
||||
|
||||
// HandleDeleteNotifier godocs
|
||||
//
|
||||
// @Summary Delete a Notifier
|
||||
// @Tags Notifiers
|
||||
// @Param id path string true "Notifier ID"
|
||||
// @Success 204
|
||||
// @Router /v1/notifiers/{id} [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleDeleteNotifier() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID) (any, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return nil, ctrl.repo.Notifiers.Delete(auth, auth.UID, ID)
|
||||
}
|
||||
|
||||
return adapters.CommandID("id", fn, http.StatusNoContent)
|
||||
}
|
||||
|
||||
// HandleUpdateNotifier godocs
|
||||
//
|
||||
// @Summary Update Notifier
|
||||
// @Tags Notifiers
|
||||
// @Param id path string true "Notifier ID"
|
||||
// @Param payload body repo.NotifierUpdate true "Notifier Data"
|
||||
// @Success 200 {object} repo.NotifierOut
|
||||
// @Router /v1/notifiers/{id} [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleUpdateNotifier() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request, ID uuid.UUID, in repo.NotifierUpdate) (repo.NotifierOut, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Notifiers.Update(auth, auth.UID, ID, in)
|
||||
}
|
||||
|
||||
return adapters.ActionID("id", fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandlerNotifierTest godoc
|
||||
//
|
||||
// @Summary Test Notifier
|
||||
// @Tags Notifiers
|
||||
// @Produce json
|
||||
// @Param id path string true "Notifier ID"
|
||||
// @Param url query string true "URL"
|
||||
// @Success 204
|
||||
// @Router /v1/notifiers/test [POST]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandlerNotifierTest() errchain.HandlerFunc {
|
||||
type body struct {
|
||||
URL string `json:"url" validate:"required"`
|
||||
}
|
||||
|
||||
fn := func(r *http.Request, q body) (any, error) {
|
||||
err := shoutrrr.Send(q.URL, "Test message from Homebox")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return adapters.Action(fn, http.StatusOK)
|
||||
}
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/yeqown/go-qrcode/v2"
|
||||
"github.com/yeqown/go-qrcode/writer/standard"
|
||||
|
||||
@@ -19,32 +19,31 @@ var qrcodeLogo []byte
|
||||
|
||||
// HandleGenerateQRCode godoc
|
||||
//
|
||||
// @Summary Encode data into QRCode
|
||||
// @Summary Create QR Code
|
||||
// @Tags Items
|
||||
// @Produce json
|
||||
// @Param data query string false "data to be encoded into qrcode"
|
||||
// @Success 200 {string} string "image/jpeg"
|
||||
// @Router /v1/qrcode [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGenerateQRCode() server.HandlerFunc {
|
||||
const MaxLength = 4_296 // assume alphanumeric characters only
|
||||
func (ctrl *V1Controller) HandleGenerateQRCode() errchain.HandlerFunc {
|
||||
type query struct {
|
||||
// 4,296 characters is the maximum length of a QR code
|
||||
Data string `schema:"data" validate:"required,max=4296"`
|
||||
}
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
data := r.URL.Query().Get("data")
|
||||
q, err := adapters.DecodeQuery[query](r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
image, err := png.Decode(bytes.NewReader(qrcodeLogo))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if len(data) > MaxLength {
|
||||
return validate.NewFieldErrors(validate.FieldError{
|
||||
Field: "data",
|
||||
Error: "max length is 4,296 characters exceeded",
|
||||
})
|
||||
}
|
||||
|
||||
qrc, err := qrcode.New(data)
|
||||
qrc, err := qrcode.New(q.Data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -4,18 +4,18 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
)
|
||||
|
||||
// HandleBillOfMaterialsExport godoc
|
||||
//
|
||||
// @Summary Generates a Bill of Materials CSV
|
||||
// @Summary Export Bill of Materials
|
||||
// @Tags Reporting
|
||||
// @Produce json
|
||||
// @Success 200 {string} string "text/csv"
|
||||
// @Router /v1/reporting/bill-of-materials [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleBillOfMaterialsExport() server.HandlerFunc {
|
||||
func (ctrl *V1Controller) HandleBillOfMaterialsExport() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
actor := services.UseUserCtx(r.Context())
|
||||
|
||||
|
||||
@@ -5,80 +5,75 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/adapters"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
)
|
||||
|
||||
// HandleGroupGet godoc
|
||||
// @Summary Get the current user's group statistics
|
||||
// @Tags Statistics
|
||||
// @Produce json
|
||||
// @Success 200 {object} []repo.TotalsByOrganizer
|
||||
// @Router /v1/groups/statistics/locations [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupStatisticsLocations() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
|
||||
stats, err := ctrl.repo.Groups.StatsLocationsByPurchasePrice(ctx, ctx.GID)
|
||||
if err != nil {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, stats)
|
||||
//
|
||||
// @Summary Get Location Statistics
|
||||
// @Tags Statistics
|
||||
// @Produce json
|
||||
// @Success 200 {object} []repo.TotalsByOrganizer
|
||||
// @Router /v1/groups/statistics/locations [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupStatisticsLocations() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request) ([]repo.TotalsByOrganizer, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Groups.StatsLocationsByPurchasePrice(auth, auth.GID)
|
||||
}
|
||||
|
||||
return adapters.Command(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleGroupGet godoc
|
||||
// @Summary Get the current user's group statistics
|
||||
// @Tags Statistics
|
||||
// @Produce json
|
||||
// @Success 200 {object} []repo.TotalsByOrganizer
|
||||
// @Router /v1/groups/statistics/labels [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupStatisticsLabels() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
|
||||
stats, err := ctrl.repo.Groups.StatsLabelsByPurchasePrice(ctx, ctx.GID)
|
||||
if err != nil {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, stats)
|
||||
// HandleGroupStatisticsLabels godoc
|
||||
//
|
||||
// @Summary Get Label Statistics
|
||||
// @Tags Statistics
|
||||
// @Produce json
|
||||
// @Success 200 {object} []repo.TotalsByOrganizer
|
||||
// @Router /v1/groups/statistics/labels [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupStatisticsLabels() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request) ([]repo.TotalsByOrganizer, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Groups.StatsLabelsByPurchasePrice(auth, auth.GID)
|
||||
}
|
||||
|
||||
return adapters.Command(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleGroupGet godoc
|
||||
// @Summary Get the current user's group statistics
|
||||
// @Tags Statistics
|
||||
// @Produce json
|
||||
// @Success 200 {object} repo.GroupStatistics
|
||||
// @Router /v1/groups/statistics [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupStatistics() server.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := services.NewContext(r.Context())
|
||||
|
||||
stats, err := ctrl.repo.Groups.StatsGroup(ctx, ctx.GID)
|
||||
if err != nil {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, stats)
|
||||
// HandleGroupStatistics godoc
|
||||
//
|
||||
// @Summary Get Group Statistics
|
||||
// @Tags Statistics
|
||||
// @Produce json
|
||||
// @Success 200 {object} repo.GroupStatistics
|
||||
// @Router /v1/groups/statistics [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupStatistics() errchain.HandlerFunc {
|
||||
fn := func(r *http.Request) (repo.GroupStatistics, error) {
|
||||
auth := services.NewContext(r.Context())
|
||||
return ctrl.repo.Groups.StatsGroup(auth, auth.GID)
|
||||
}
|
||||
|
||||
return adapters.Command(fn, http.StatusOK)
|
||||
}
|
||||
|
||||
// HandleGroupGet godoc
|
||||
// @Summary Queries the changes overtime of the purchase price over time
|
||||
// @Tags Statistics
|
||||
// @Produce json
|
||||
// @Success 200 {object} repo.ValueOverTime
|
||||
// @Param start query string false "start date"
|
||||
// @Param end query string false "end date"
|
||||
// @Router /v1/groups/statistics/purchase-price [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() server.HandlerFunc {
|
||||
// HandleGroupStatisticsPriceOverTime godoc
|
||||
//
|
||||
// @Summary Get Purchase Price Statistics
|
||||
// @Tags Statistics
|
||||
// @Produce json
|
||||
// @Success 200 {object} repo.ValueOverTime
|
||||
// @Param start query string false "start date"
|
||||
// @Param end query string false "end date"
|
||||
// @Router /v1/groups/statistics/purchase-price [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() errchain.HandlerFunc {
|
||||
parseDate := func(datestr string, defaultDate time.Time) (time.Time, error) {
|
||||
if datestr == "" {
|
||||
return defaultDate, nil
|
||||
@@ -104,6 +99,6 @@ func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() server.HandlerFun
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, stats)
|
||||
return server.JSON(w, http.StatusOK, stats)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,18 +8,20 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// HandleUserSelf godoc
|
||||
// @Summary Get the current user
|
||||
// @Tags User
|
||||
// @Produce json
|
||||
// @Param payload body services.UserRegistration true "User Data"
|
||||
// @Success 204
|
||||
// @Router /v1/users/register [Post]
|
||||
func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc {
|
||||
// HandleUserRegistration godoc
|
||||
//
|
||||
// @Summary Register New User
|
||||
// @Tags User
|
||||
// @Produce json
|
||||
// @Param payload body services.UserRegistration true "User Data"
|
||||
// @Success 204
|
||||
// @Router /v1/users/register [Post]
|
||||
func (ctrl *V1Controller) HandleUserRegistration() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
regData := services.UserRegistration{}
|
||||
|
||||
@@ -38,18 +40,19 @@ func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
return server.JSON(w, http.StatusNoContent, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// HandleUserSelf godoc
|
||||
// @Summary Get the current user
|
||||
// @Tags User
|
||||
// @Produce json
|
||||
// @Success 200 {object} server.Result{item=repo.UserOut}
|
||||
// @Router /v1/users/self [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleUserSelf() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Get User Self
|
||||
// @Tags User
|
||||
// @Produce json
|
||||
// @Success 200 {object} Wrapped{item=repo.UserOut}
|
||||
// @Router /v1/users/self [GET]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleUserSelf() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
token := services.UseTokenCtx(r.Context())
|
||||
usr, err := ctrl.svc.User.GetSelf(r.Context(), token)
|
||||
@@ -58,19 +61,20 @@ func (ctrl *V1Controller) HandleUserSelf() server.HandlerFunc {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, server.Wrap(usr))
|
||||
return server.JSON(w, http.StatusOK, Wrap(usr))
|
||||
}
|
||||
}
|
||||
|
||||
// HandleUserSelfUpdate godoc
|
||||
// @Summary Update the current user
|
||||
// @Tags User
|
||||
// @Produce json
|
||||
// @Param payload body repo.UserUpdate true "User Data"
|
||||
// @Success 200 {object} server.Result{item=repo.UserUpdate}
|
||||
// @Router /v1/users/self [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleUserSelfUpdate() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Update Account
|
||||
// @Tags User
|
||||
// @Produce json
|
||||
// @Param payload body repo.UserUpdate true "User Data"
|
||||
// @Success 200 {object} Wrapped{item=repo.UserUpdate}
|
||||
// @Router /v1/users/self [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleUserSelfUpdate() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
updateData := repo.UserUpdate{}
|
||||
if err := server.Decode(r, &updateData); err != nil {
|
||||
@@ -84,18 +88,19 @@ func (ctrl *V1Controller) HandleUserSelfUpdate() server.HandlerFunc {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusOK, server.Wrap(newData))
|
||||
return server.JSON(w, http.StatusOK, Wrap(newData))
|
||||
}
|
||||
}
|
||||
|
||||
// HandleUserSelfDelete godoc
|
||||
// @Summary Deletes the user account
|
||||
// @Tags User
|
||||
// @Produce json
|
||||
// @Success 204
|
||||
// @Router /v1/users/self [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleUserSelfDelete() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Delete Account
|
||||
// @Tags User
|
||||
// @Produce json
|
||||
// @Success 204
|
||||
// @Router /v1/users/self [DELETE]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleUserSelfDelete() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
if ctrl.isDemo {
|
||||
return validate.NewRequestError(nil, http.StatusForbidden)
|
||||
@@ -106,7 +111,7 @@ func (ctrl *V1Controller) HandleUserSelfDelete() server.HandlerFunc {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
return server.JSON(w, http.StatusNoContent, nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -118,13 +123,14 @@ type (
|
||||
)
|
||||
|
||||
// HandleUserSelfChangePassword godoc
|
||||
// @Summary Updates the users password
|
||||
// @Tags User
|
||||
// @Success 204
|
||||
// @Param payload body ChangePassword true "Password Payload"
|
||||
// @Router /v1/users/change-password [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleUserSelfChangePassword() server.HandlerFunc {
|
||||
//
|
||||
// @Summary Change Password
|
||||
// @Tags User
|
||||
// @Success 204
|
||||
// @Param payload body ChangePassword true "Password Payload"
|
||||
// @Router /v1/users/change-password [PUT]
|
||||
// @Security Bearer
|
||||
func (ctrl *V1Controller) HandleUserSelfChangePassword() errchain.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
if ctrl.isDemo {
|
||||
return validate.NewRequestError(nil, http.StatusForbidden)
|
||||
@@ -143,6 +149,6 @@ func (ctrl *V1Controller) HandleUserSelfChangePassword() server.HandlerFunc {
|
||||
return validate.NewRequestError(err, http.StatusInternalServerError)
|
||||
}
|
||||
|
||||
return server.Respond(w, http.StatusNoContent, nil)
|
||||
return server.JSON(w, http.StatusNoContent, nil)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -9,6 +10,9 @@ import (
|
||||
|
||||
atlas "ariga.io/atlas/sql/migrate"
|
||||
"entgo.io/ent/dialect/sql/schema"
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/app/api/static/docs"
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent"
|
||||
@@ -16,9 +20,13 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/config"
|
||||
"github.com/hay-kot/homebox/backend/internal/web/mid"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
"github.com/hay-kot/safeserve/server"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/rs/zerolog/pkgerrors"
|
||||
|
||||
_ "github.com/hay-kot/homebox/backend/pkgs/cgofreesqlite"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -27,9 +35,9 @@ var (
|
||||
buildTime = "now"
|
||||
)
|
||||
|
||||
// @title Go API Templates
|
||||
// @title Homebox API
|
||||
// @version 1.0
|
||||
// @description This is a simple Rest API Server Template that implements some basic User and Authentication patterns to help you get started and bootstrap your next project!.
|
||||
// @description Track, Manage, and Organize your Things.
|
||||
// @contact.name Don't
|
||||
// @license.name MIT
|
||||
// @BasePath /api
|
||||
@@ -38,6 +46,8 @@ var (
|
||||
// @name Authorization
|
||||
// @description "Type 'Bearer TOKEN' to correctly set the API Key"
|
||||
func main() {
|
||||
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
|
||||
|
||||
cfg, err := config.New()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -118,26 +128,27 @@ func run(cfg *config.Config) error {
|
||||
)
|
||||
|
||||
// =========================================================================
|
||||
// Start Server\
|
||||
// Start Server
|
||||
|
||||
logger := log.With().Caller().Logger()
|
||||
|
||||
mwLogger := mid.Logger(logger)
|
||||
if app.conf.Mode == config.ModeDevelopment {
|
||||
mwLogger = mid.SugarLogger(logger)
|
||||
}
|
||||
router := chi.NewMux()
|
||||
router.Use(
|
||||
middleware.RequestID,
|
||||
middleware.RealIP,
|
||||
mid.Logger(logger),
|
||||
middleware.Recoverer,
|
||||
middleware.StripSlashes,
|
||||
)
|
||||
|
||||
chain := errchain.New(mid.Errors(app.server, logger))
|
||||
|
||||
app.mountRoutes(router, chain, app.repos)
|
||||
|
||||
app.server = server.NewServer(
|
||||
server.WithHost(app.conf.Web.Host),
|
||||
server.WithPort(app.conf.Web.Port),
|
||||
server.WithMiddleware(
|
||||
mwLogger,
|
||||
mid.Errors(logger),
|
||||
mid.Panic(app.conf.Mode == config.ModeDevelopment),
|
||||
),
|
||||
)
|
||||
|
||||
app.mountRoutes(app.repos)
|
||||
|
||||
log.Info().Msgf("Starting HTTP Server on %s:%s", app.server.Host, app.server.Port)
|
||||
|
||||
// =========================================================================
|
||||
@@ -159,6 +170,19 @@ func run(cfg *config.Config) error {
|
||||
Msg("failed to purge expired invitations")
|
||||
}
|
||||
})
|
||||
go app.startBgTask(time.Duration(1)*time.Hour, func() {
|
||||
now := time.Now()
|
||||
|
||||
if now.Hour() == 8 {
|
||||
fmt.Println("run notifiers")
|
||||
err := app.services.BackgroundService.SendNotifiersToday(context.Background())
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("failed to send notifiers")
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: Remove through external API that does setup
|
||||
if cfg.Demo {
|
||||
@@ -175,5 +199,5 @@ func run(cfg *config.Config) error {
|
||||
}()
|
||||
}
|
||||
|
||||
return app.server.Start()
|
||||
return app.server.Start(router)
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/core/services"
|
||||
"github.com/hay-kot/homebox/backend/internal/sys/validate"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
)
|
||||
|
||||
type tokenHasKey struct {
|
||||
@@ -30,9 +30,9 @@ const (
|
||||
// the required roles, a 403 Forbidden will be returned.
|
||||
//
|
||||
// WARNING: This middleware _MUST_ be called after mwAuthToken or else it will panic
|
||||
func (a *app) mwRoles(rm RoleMode, required ...string) server.Middleware {
|
||||
return func(next server.Handler) server.Handler {
|
||||
return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||
func (a *app) mwRoles(rm RoleMode, required ...string) errchain.Middleware {
|
||||
return func(next errchain.Handler) errchain.Handler {
|
||||
return errchain.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||
ctx := r.Context()
|
||||
|
||||
maybeToken := ctx.Value(hashedToken)
|
||||
@@ -116,8 +116,8 @@ func getCookie(r *http.Request) (string, error) {
|
||||
// - header = "Bearer 1234567890"
|
||||
// - query = "?access_token=1234567890"
|
||||
// - cookie = hb.auth.token = 1234567890
|
||||
func (a *app) mwAuthToken(next server.Handler) server.Handler {
|
||||
return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||
func (a *app) mwAuthToken(next errchain.Handler) errchain.Handler {
|
||||
return errchain.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
|
||||
keyFuncs := [...]KeyFunc{
|
||||
getBearer,
|
||||
getCookie,
|
||||
|
||||
@@ -10,12 +10,13 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/hay-kot/homebox/backend/app/api/handlers/debughandlers"
|
||||
v1 "github.com/hay-kot/homebox/backend/app/api/handlers/v1"
|
||||
_ "github.com/hay-kot/homebox/backend/app/api/static/docs"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/pkgs/server"
|
||||
"github.com/hay-kot/safeserve/errchain"
|
||||
httpSwagger "github.com/swaggo/http-swagger" // http-swagger middleware
|
||||
)
|
||||
|
||||
@@ -36,12 +37,12 @@ func (a *app) debugRouter() *http.ServeMux {
|
||||
}
|
||||
|
||||
// registerRoutes registers all the routes for the API
|
||||
func (a *app) mountRoutes(repos *repo.AllRepos) {
|
||||
func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllRepos) {
|
||||
registerMimes()
|
||||
|
||||
a.server.Get("/swagger/*", server.ToHandler(httpSwagger.Handler(
|
||||
r.Get("/swagger/*", httpSwagger.Handler(
|
||||
httpSwagger.URL(fmt.Sprintf("%s://%s/swagger/doc.json", a.conf.Swagger.Scheme, a.conf.Swagger.Host)),
|
||||
)))
|
||||
))
|
||||
|
||||
// =========================================================================
|
||||
// API Version 1
|
||||
@@ -56,92 +57,103 @@ func (a *app) mountRoutes(repos *repo.AllRepos) {
|
||||
v1.WithDemoStatus(a.conf.Demo), // Disable Password Change in Demo Mode
|
||||
)
|
||||
|
||||
a.server.Get(v1Base("/status"), v1Ctrl.HandleBase(func() bool { return true }, v1.Build{
|
||||
r.Get(v1Base("/status"), chain.ToHandlerFunc(v1Ctrl.HandleBase(func() bool { return true }, v1.Build{
|
||||
Version: version,
|
||||
Commit: commit,
|
||||
BuildTime: buildTime,
|
||||
}))
|
||||
})))
|
||||
|
||||
a.server.Post(v1Base("/users/register"), v1Ctrl.HandleUserRegistration())
|
||||
a.server.Post(v1Base("/users/login"), v1Ctrl.HandleAuthLogin())
|
||||
r.Post(v1Base("/users/register"), chain.ToHandlerFunc(v1Ctrl.HandleUserRegistration()))
|
||||
r.Post(v1Base("/users/login"), chain.ToHandlerFunc(v1Ctrl.HandleAuthLogin()))
|
||||
|
||||
userMW := []server.Middleware{
|
||||
userMW := []errchain.Middleware{
|
||||
a.mwAuthToken,
|
||||
a.mwRoles(RoleModeOr, authroles.RoleUser.String()),
|
||||
}
|
||||
|
||||
a.server.Get(v1Base("/users/self"), v1Ctrl.HandleUserSelf(), userMW...)
|
||||
a.server.Put(v1Base("/users/self"), v1Ctrl.HandleUserSelfUpdate(), userMW...)
|
||||
a.server.Delete(v1Base("/users/self"), v1Ctrl.HandleUserSelfDelete(), userMW...)
|
||||
a.server.Post(v1Base("/users/logout"), v1Ctrl.HandleAuthLogout(), userMW...)
|
||||
a.server.Get(v1Base("/users/refresh"), v1Ctrl.HandleAuthRefresh(), userMW...)
|
||||
a.server.Put(v1Base("/users/self/change-password"), v1Ctrl.HandleUserSelfChangePassword(), userMW...)
|
||||
r.Get(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelf(), userMW...))
|
||||
r.Put(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfUpdate(), userMW...))
|
||||
r.Delete(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfDelete(), userMW...))
|
||||
r.Post(v1Base("/users/logout"), chain.ToHandlerFunc(v1Ctrl.HandleAuthLogout(), userMW...))
|
||||
r.Get(v1Base("/users/refresh"), chain.ToHandlerFunc(v1Ctrl.HandleAuthRefresh(), userMW...))
|
||||
r.Put(v1Base("/users/self/change-password"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfChangePassword(), userMW...))
|
||||
|
||||
a.server.Post(v1Base("/groups/invitations"), v1Ctrl.HandleGroupInvitationsCreate(), userMW...)
|
||||
a.server.Get(v1Base("/groups/statistics"), v1Ctrl.HandleGroupStatistics(), userMW...)
|
||||
a.server.Get(v1Base("/groups/statistics/purchase-price"), v1Ctrl.HandleGroupStatisticsPriceOverTime(), userMW...)
|
||||
a.server.Get(v1Base("/groups/statistics/locations"), v1Ctrl.HandleGroupStatisticsLocations(), userMW...)
|
||||
a.server.Get(v1Base("/groups/statistics/labels"), v1Ctrl.HandleGroupStatisticsLabels(), userMW...)
|
||||
r.Post(v1Base("/groups/invitations"), chain.ToHandlerFunc(v1Ctrl.HandleGroupInvitationsCreate(), userMW...))
|
||||
r.Get(v1Base("/groups/statistics"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatistics(), userMW...))
|
||||
r.Get(v1Base("/groups/statistics/purchase-price"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsPriceOverTime(), userMW...))
|
||||
r.Get(v1Base("/groups/statistics/locations"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsLocations(), userMW...))
|
||||
r.Get(v1Base("/groups/statistics/labels"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsLabels(), userMW...))
|
||||
|
||||
// TODO: I don't like /groups being the URL for users
|
||||
a.server.Get(v1Base("/groups"), v1Ctrl.HandleGroupGet(), userMW...)
|
||||
a.server.Put(v1Base("/groups"), v1Ctrl.HandleGroupUpdate(), userMW...)
|
||||
r.Get(v1Base("/groups"), chain.ToHandlerFunc(v1Ctrl.HandleGroupGet(), userMW...))
|
||||
r.Put(v1Base("/groups"), chain.ToHandlerFunc(v1Ctrl.HandleGroupUpdate(), userMW...))
|
||||
|
||||
a.server.Post(v1Base("/actions/ensure-asset-ids"), v1Ctrl.HandleEnsureAssetID(), userMW...)
|
||||
a.server.Post(v1Base("/actions/zero-item-time-fields"), v1Ctrl.HandleItemDateZeroOut(), userMW...)
|
||||
a.server.Post(v1Base("/actions/ensure-import-refs"), v1Ctrl.HandleEnsureImportRefs(), userMW...)
|
||||
r.Post(v1Base("/actions/ensure-asset-ids"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureAssetID(), userMW...))
|
||||
r.Post(v1Base("/actions/zero-item-time-fields"), chain.ToHandlerFunc(v1Ctrl.HandleItemDateZeroOut(), userMW...))
|
||||
r.Post(v1Base("/actions/ensure-import-refs"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureImportRefs(), userMW...))
|
||||
|
||||
a.server.Get(v1Base("/locations"), v1Ctrl.HandleLocationGetAll(), userMW...)
|
||||
a.server.Post(v1Base("/locations"), v1Ctrl.HandleLocationCreate(), userMW...)
|
||||
a.server.Get(v1Base("/locations/tree"), v1Ctrl.HandleLocationTreeQuery(), userMW...)
|
||||
a.server.Get(v1Base("/locations/{id}"), v1Ctrl.HandleLocationGet(), userMW...)
|
||||
a.server.Put(v1Base("/locations/{id}"), v1Ctrl.HandleLocationUpdate(), userMW...)
|
||||
a.server.Delete(v1Base("/locations/{id}"), v1Ctrl.HandleLocationDelete(), userMW...)
|
||||
r.Get(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationGetAll(), userMW...))
|
||||
r.Post(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationCreate(), userMW...))
|
||||
r.Get(v1Base("/locations/tree"), chain.ToHandlerFunc(v1Ctrl.HandleLocationTreeQuery(), userMW...))
|
||||
r.Get(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationGet(), userMW...))
|
||||
r.Put(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationUpdate(), userMW...))
|
||||
r.Delete(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationDelete(), userMW...))
|
||||
|
||||
a.server.Get(v1Base("/labels"), v1Ctrl.HandleLabelsGetAll(), userMW...)
|
||||
a.server.Post(v1Base("/labels"), v1Ctrl.HandleLabelsCreate(), userMW...)
|
||||
a.server.Get(v1Base("/labels/{id}"), v1Ctrl.HandleLabelGet(), userMW...)
|
||||
a.server.Put(v1Base("/labels/{id}"), v1Ctrl.HandleLabelUpdate(), userMW...)
|
||||
a.server.Delete(v1Base("/labels/{id}"), v1Ctrl.HandleLabelDelete(), userMW...)
|
||||
r.Get(v1Base("/labels"), chain.ToHandlerFunc(v1Ctrl.HandleLabelsGetAll(), userMW...))
|
||||
r.Post(v1Base("/labels"), chain.ToHandlerFunc(v1Ctrl.HandleLabelsCreate(), userMW...))
|
||||
r.Get(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelGet(), userMW...))
|
||||
r.Put(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelUpdate(), userMW...))
|
||||
r.Delete(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelDelete(), userMW...))
|
||||
|
||||
a.server.Get(v1Base("/items"), v1Ctrl.HandleItemsGetAll(), userMW...)
|
||||
a.server.Post(v1Base("/items"), v1Ctrl.HandleItemsCreate(), userMW...)
|
||||
a.server.Post(v1Base("/items/import"), v1Ctrl.HandleItemsImport(), userMW...)
|
||||
a.server.Get(v1Base("/items/export"), v1Ctrl.HandleItemsExport(), userMW...)
|
||||
a.server.Get(v1Base("/items/fields"), v1Ctrl.HandleGetAllCustomFieldNames(), userMW...)
|
||||
a.server.Get(v1Base("/items/fields/values"), v1Ctrl.HandleGetAllCustomFieldValues(), userMW...)
|
||||
r.Get(v1Base("/items"), chain.ToHandlerFunc(v1Ctrl.HandleItemsGetAll(), userMW...))
|
||||
r.Post(v1Base("/items"), chain.ToHandlerFunc(v1Ctrl.HandleItemsCreate(), userMW...))
|
||||
r.Post(v1Base("/items/import"), chain.ToHandlerFunc(v1Ctrl.HandleItemsImport(), userMW...))
|
||||
r.Get(v1Base("/items/export"), chain.ToHandlerFunc(v1Ctrl.HandleItemsExport(), userMW...))
|
||||
r.Get(v1Base("/items/fields"), chain.ToHandlerFunc(v1Ctrl.HandleGetAllCustomFieldNames(), userMW...))
|
||||
r.Get(v1Base("/items/fields/values"), chain.ToHandlerFunc(v1Ctrl.HandleGetAllCustomFieldValues(), userMW...))
|
||||
|
||||
a.server.Get(v1Base("/items/{id}"), v1Ctrl.HandleItemGet(), userMW...)
|
||||
a.server.Put(v1Base("/items/{id}"), v1Ctrl.HandleItemUpdate(), userMW...)
|
||||
a.server.Delete(v1Base("/items/{id}"), v1Ctrl.HandleItemDelete(), userMW...)
|
||||
r.Get(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemGet(), userMW...))
|
||||
r.Put(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemUpdate(), userMW...))
|
||||
r.Delete(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemDelete(), userMW...))
|
||||
|
||||
a.server.Post(v1Base("/items/{id}/attachments"), v1Ctrl.HandleItemAttachmentCreate(), userMW...)
|
||||
a.server.Put(v1Base("/items/{id}/attachments/{attachment_id}"), v1Ctrl.HandleItemAttachmentUpdate(), userMW...)
|
||||
a.server.Delete(v1Base("/items/{id}/attachments/{attachment_id}"), v1Ctrl.HandleItemAttachmentDelete(), userMW...)
|
||||
r.Post(v1Base("/items/{id}/attachments"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentCreate(), userMW...))
|
||||
r.Put(v1Base("/items/{id}/attachments/{attachment_id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentUpdate(), userMW...))
|
||||
r.Delete(v1Base("/items/{id}/attachments/{attachment_id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentDelete(), userMW...))
|
||||
|
||||
a.server.Get(v1Base("/items/{id}/maintenance"), v1Ctrl.HandleMaintenanceEntryCreate(), userMW...)
|
||||
a.server.Post(v1Base("/items/{id}/maintenance"), v1Ctrl.HandleMaintenanceEntryCreate(), userMW...)
|
||||
a.server.Put(v1Base("/items/{id}/maintenance/{entry_id}"), v1Ctrl.HandleMaintenanceEntryUpdate(), userMW...)
|
||||
a.server.Delete(v1Base("/items/{id}/maintenance/{entry_id}"), v1Ctrl.HandleMaintenanceEntryDelete(), userMW...)
|
||||
r.Get(v1Base("/items/{id}/maintenance"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceLogGet(), userMW...))
|
||||
r.Post(v1Base("/items/{id}/maintenance"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryCreate(), userMW...))
|
||||
r.Put(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryUpdate(), userMW...))
|
||||
r.Delete(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryDelete(), userMW...))
|
||||
|
||||
a.server.Get(v1Base("/asset/{id}"), v1Ctrl.HandleAssetGet(), userMW...)
|
||||
r.Get(v1Base("/asset/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleAssetGet(), userMW...))
|
||||
|
||||
// Notifiers
|
||||
r.Get(v1Base("/notifiers"), chain.ToHandlerFunc(v1Ctrl.HandleGetUserNotifiers(), userMW...))
|
||||
r.Post(v1Base("/notifiers"), chain.ToHandlerFunc(v1Ctrl.HandleCreateNotifier(), userMW...))
|
||||
r.Put(v1Base("/notifiers/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleUpdateNotifier(), userMW...))
|
||||
r.Delete(v1Base("/notifiers/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleDeleteNotifier(), userMW...))
|
||||
r.Post(v1Base("/notifiers/test"), chain.ToHandlerFunc(v1Ctrl.HandlerNotifierTest(), userMW...))
|
||||
|
||||
// Asset-Like endpoints
|
||||
a.server.Get(
|
||||
assetMW := []errchain.Middleware{
|
||||
a.mwAuthToken,
|
||||
a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()),
|
||||
}
|
||||
|
||||
r.Get(
|
||||
v1Base("/qrcode"),
|
||||
v1Ctrl.HandleGenerateQRCode(),
|
||||
a.mwAuthToken, a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()),
|
||||
chain.ToHandlerFunc(v1Ctrl.HandleGenerateQRCode(), assetMW...),
|
||||
)
|
||||
a.server.Get(
|
||||
r.Get(
|
||||
v1Base("/items/{id}/attachments/{attachment_id}"),
|
||||
v1Ctrl.HandleItemAttachmentGet(),
|
||||
a.mwAuthToken, a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()),
|
||||
chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentGet(), assetMW...),
|
||||
)
|
||||
|
||||
// Reporting Services
|
||||
a.server.Get(v1Base("/reporting/bill-of-materials"), v1Ctrl.HandleBillOfMaterialsExport(), userMW...)
|
||||
r.Get(v1Base("/reporting/bill-of-materials"), chain.ToHandlerFunc(v1Ctrl.HandleBillOfMaterialsExport(), userMW...))
|
||||
|
||||
r.NotFound(chain.ToHandlerFunc(notFoundHandler()))
|
||||
|
||||
a.server.NotFound(notFoundHandler())
|
||||
}
|
||||
|
||||
func registerMimes() {
|
||||
@@ -158,7 +170,7 @@ func registerMimes() {
|
||||
|
||||
// notFoundHandler perform the main logic around handling the internal SPA embed and ensuring that
|
||||
// the client side routing is handled correctly.
|
||||
func notFoundHandler() server.HandlerFunc {
|
||||
func notFoundHandler() errchain.HandlerFunc {
|
||||
tryRead := func(fs embed.FS, prefix, requestedPath string, w http.ResponseWriter) error {
|
||||
f, err := fs.Open(path.Join(prefix, requestedPath))
|
||||
if err != nil {
|
||||
|
||||
@@ -28,13 +28,14 @@ const docTemplate = `{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Ensures all items in the database have an asset ID",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Group"
|
||||
"Actions"
|
||||
],
|
||||
"summary": "Ensures all items in the database have an asset id",
|
||||
"summary": "Ensure Asset IDs",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -52,13 +53,14 @@ const docTemplate = `{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Ensures all items in the database have an import ref",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Group"
|
||||
"Actions"
|
||||
],
|
||||
"summary": "Ensures all items in the database have an import ref",
|
||||
"summary": "Ensures Import Refs",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -76,13 +78,14 @@ const docTemplate = `{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Resets all item date fields to the beginning of the day",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Group"
|
||||
"Actions"
|
||||
],
|
||||
"summary": "Resets all item date fields to the beginning of the day",
|
||||
"summary": "Zero Out Time Fields",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -104,9 +107,9 @@ const docTemplate = `{
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Assets"
|
||||
"Items"
|
||||
],
|
||||
"summary": "Gets an item by Asset ID",
|
||||
"summary": "Get Item by Asset ID",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -139,7 +142,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Group"
|
||||
],
|
||||
"summary": "Get the current user's group",
|
||||
"summary": "Get Group",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -161,7 +164,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Group"
|
||||
],
|
||||
"summary": "Updates some fields of the current users group",
|
||||
"summary": "Update Group",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "User Data",
|
||||
@@ -196,7 +199,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Group"
|
||||
],
|
||||
"summary": "Get the current user",
|
||||
"summary": "Create Group Invitation",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "User Data",
|
||||
@@ -231,7 +234,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Statistics"
|
||||
],
|
||||
"summary": "Get the current user's group statistics",
|
||||
"summary": "Get Group Statistics",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -255,7 +258,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Statistics"
|
||||
],
|
||||
"summary": "Get the current user's group statistics",
|
||||
"summary": "Get Label Statistics",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -282,7 +285,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Statistics"
|
||||
],
|
||||
"summary": "Get the current user's group statistics",
|
||||
"summary": "Get Location Statistics",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -309,7 +312,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Statistics"
|
||||
],
|
||||
"summary": "Queries the changes overtime of the purchase price over time",
|
||||
"summary": "Get Purchase Price Statistics",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -347,7 +350,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Get All Items",
|
||||
"summary": "Query All Items",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -409,7 +412,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Create a new item",
|
||||
"summary": "Create Item",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Item Data",
|
||||
@@ -422,8 +425,8 @@ const docTemplate = `{
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
}
|
||||
@@ -441,7 +444,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "exports items into the database",
|
||||
"summary": "Export Items",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "text/csv",
|
||||
@@ -465,7 +468,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "imports items into the database",
|
||||
"summary": "Get All Custom Field Names",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -492,7 +495,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "imports items into the database",
|
||||
"summary": "Get All Custom Field Values",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -519,7 +522,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "imports items into the database",
|
||||
"summary": "Import Items",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "file",
|
||||
@@ -549,7 +552,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Gets a item and fields",
|
||||
"summary": "Get Item",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -580,7 +583,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "updates a item",
|
||||
"summary": "Update Item",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -620,7 +623,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "deletes a item",
|
||||
"summary": "Delete Item",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -650,7 +653,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items Attachments"
|
||||
],
|
||||
"summary": "imports items into the database",
|
||||
"summary": "Create Item Attachment",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -691,7 +694,7 @@ const docTemplate = `{
|
||||
"422": {
|
||||
"description": "Unprocessable Entity",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.ErrorResponse"
|
||||
"$ref": "#/definitions/mid.ErrorResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -710,7 +713,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items Attachments"
|
||||
],
|
||||
"summary": "retrieves an attachment for an item",
|
||||
"summary": "Get Item Attachment",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -745,7 +748,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items Attachments"
|
||||
],
|
||||
"summary": "retrieves an attachment for an item",
|
||||
"summary": "Update Item Attachment",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -789,7 +792,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items Attachments"
|
||||
],
|
||||
"summary": "retrieves an attachment for an item",
|
||||
"summary": "Delete Item Attachment",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -861,8 +864,8 @@ const docTemplate = `{
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.MaintenanceEntry"
|
||||
}
|
||||
@@ -942,22 +945,10 @@ const docTemplate = `{
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Results"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.LabelOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.LabelOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -974,7 +965,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Labels"
|
||||
],
|
||||
"summary": "Create a new label",
|
||||
"summary": "Create Label",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Label Data",
|
||||
@@ -1009,7 +1000,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Labels"
|
||||
],
|
||||
"summary": "Gets a label and fields",
|
||||
"summary": "Get Label",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1040,7 +1031,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Labels"
|
||||
],
|
||||
"summary": "updates a label",
|
||||
"summary": "Update Label",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1071,7 +1062,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Labels"
|
||||
],
|
||||
"summary": "deletes a label",
|
||||
"summary": "Delete Label",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1114,22 +1105,10 @@ const docTemplate = `{
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Results"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.LocationOutCount"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.LocationOutCount"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1146,7 +1125,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "Create a new location",
|
||||
"summary": "Create Location",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Location Data",
|
||||
@@ -1181,7 +1160,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "Get All Locations",
|
||||
"summary": "Get Locations Tree",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "boolean",
|
||||
@@ -1194,22 +1173,10 @@ const docTemplate = `{
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Results"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.TreeItem"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.TreeItem"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1228,7 +1195,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "Gets a location and fields",
|
||||
"summary": "Get Location",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1259,7 +1226,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "updates a location",
|
||||
"summary": "Update Location",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1299,7 +1266,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "deletes a location",
|
||||
"summary": "Delete Location",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1316,6 +1283,167 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/notifiers": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Get Notifiers",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.NotifierOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Create Notifier",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Notifier Data",
|
||||
"name": "payload",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.NotifierCreate"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.NotifierOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/notifiers/test": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Test Notifier",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Notifier ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "URL",
|
||||
"name": "url",
|
||||
"in": "query",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/notifiers/{id}": {
|
||||
"put": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Update Notifier",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Notifier ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Notifier Data",
|
||||
"name": "payload",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.NotifierUpdate"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.NotifierOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"delete": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Delete a Notifier",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Notifier ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/qrcode": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -1329,7 +1457,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Encode data into QRCode",
|
||||
"summary": "Create QR Code",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1361,7 +1489,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Reporting"
|
||||
],
|
||||
"summary": "Generates a Bill of Materials CSV",
|
||||
"summary": "Export Bill of Materials",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "text/csv",
|
||||
@@ -1380,7 +1508,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"Base"
|
||||
],
|
||||
"summary": "Retrieves the basic information about the API",
|
||||
"summary": "Application Info",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -1401,7 +1529,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Updates the users password",
|
||||
"summary": "Change Password",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Password Payload",
|
||||
@@ -1447,6 +1575,15 @@ const docTemplate = `{
|
||||
"description": "string",
|
||||
"name": "password",
|
||||
"in": "formData"
|
||||
},
|
||||
{
|
||||
"description": "Login Data",
|
||||
"name": "payload",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1.LoginForm"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1504,7 +1641,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Get the current user",
|
||||
"summary": "Register New User",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "User Data",
|
||||
@@ -1536,14 +1673,14 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Get the current user",
|
||||
"summary": "Get User Self",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Result"
|
||||
"$ref": "#/definitions/v1.Wrapped"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
@@ -1570,7 +1707,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Update the current user",
|
||||
"summary": "Update Account",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "User Data",
|
||||
@@ -1588,7 +1725,7 @@ const docTemplate = `{
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Result"
|
||||
"$ref": "#/definitions/v1.Wrapped"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
@@ -1615,7 +1752,7 @@ const docTemplate = `{
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Deletes the user account",
|
||||
"summary": "Delete Account",
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
@@ -1625,6 +1762,20 @@ const docTemplate = `{
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"mid.ErrorResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string"
|
||||
},
|
||||
"fields": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.DocumentOut": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1726,9 +1877,13 @@ const docTemplate = `{
|
||||
},
|
||||
"repo.ItemCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"maxLength": 1000
|
||||
},
|
||||
"labelIds": {
|
||||
"type": "array",
|
||||
@@ -1741,7 +1896,9 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"maxLength": 255,
|
||||
"minLength": 1
|
||||
},
|
||||
"parentId": {
|
||||
"type": "string",
|
||||
@@ -2032,15 +2189,21 @@ const docTemplate = `{
|
||||
},
|
||||
"repo.LabelCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"color": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"maxLength": 255
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"maxLength": 255,
|
||||
"minLength": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -2229,6 +2392,9 @@ const docTemplate = `{
|
||||
},
|
||||
"repo.MaintenanceEntryCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"completedDate": {
|
||||
"description": "Sold",
|
||||
@@ -2293,6 +2459,72 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.NotifierCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name",
|
||||
"url"
|
||||
],
|
||||
"properties": {
|
||||
"isActive": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"maxLength": 255,
|
||||
"minLength": 1
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.NotifierOut": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"createdAt": {
|
||||
"type": "string"
|
||||
},
|
||||
"groupId": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"isActive": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"updatedAt": {
|
||||
"type": "string"
|
||||
},
|
||||
"userId": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.NotifierUpdate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"isActive": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"maxLength": 255,
|
||||
"minLength": 1
|
||||
},
|
||||
"url": {
|
||||
"type": "string",
|
||||
"x-nullable": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.PaginationResult-repo_ItemSummary": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2421,39 +2653,6 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.ErrorResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string"
|
||||
},
|
||||
"fields": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.Result": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"details": {},
|
||||
"error": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"item": {},
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.Results": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {}
|
||||
}
|
||||
},
|
||||
"services.UserRegistration": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2549,12 +2748,17 @@ const docTemplate = `{
|
||||
},
|
||||
"v1.GroupInvitationCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"uses"
|
||||
],
|
||||
"properties": {
|
||||
"expiresAt": {
|
||||
"type": "string"
|
||||
},
|
||||
"uses": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"maximum": 100,
|
||||
"minimum": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -2566,6 +2770,20 @@ const docTemplate = `{
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.LoginForm": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"password": {
|
||||
"type": "string"
|
||||
},
|
||||
"stayLoggedIn": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"username": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.TokenResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2579,6 +2797,12 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.Wrapped": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"item": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"securityDefinitions": {
|
||||
@@ -2597,8 +2821,8 @@ var SwaggerInfo = &swag.Spec{
|
||||
Host: "",
|
||||
BasePath: "/api",
|
||||
Schemes: []string{},
|
||||
Title: "Go API Templates",
|
||||
Description: "This is a simple Rest API Server Template that implements some basic User and Authentication patterns to help you get started and bootstrap your next project!.",
|
||||
Title: "Homebox API",
|
||||
Description: "Track, Manage, and Organize your Things.",
|
||||
InfoInstanceName: "swagger",
|
||||
SwaggerTemplate: docTemplate,
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"description": "This is a simple Rest API Server Template that implements some basic User and Authentication patterns to help you get started and bootstrap your next project!.",
|
||||
"title": "Go API Templates",
|
||||
"description": "Track, Manage, and Organize your Things.",
|
||||
"title": "Homebox API",
|
||||
"contact": {
|
||||
"name": "Don't"
|
||||
},
|
||||
@@ -20,13 +20,14 @@
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Ensures all items in the database have an asset ID",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Group"
|
||||
"Actions"
|
||||
],
|
||||
"summary": "Ensures all items in the database have an asset id",
|
||||
"summary": "Ensure Asset IDs",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -44,13 +45,14 @@
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Ensures all items in the database have an import ref",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Group"
|
||||
"Actions"
|
||||
],
|
||||
"summary": "Ensures all items in the database have an import ref",
|
||||
"summary": "Ensures Import Refs",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -68,13 +70,14 @@
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"description": "Resets all item date fields to the beginning of the day",
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Group"
|
||||
"Actions"
|
||||
],
|
||||
"summary": "Resets all item date fields to the beginning of the day",
|
||||
"summary": "Zero Out Time Fields",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -96,9 +99,9 @@
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Assets"
|
||||
"Items"
|
||||
],
|
||||
"summary": "Gets an item by Asset ID",
|
||||
"summary": "Get Item by Asset ID",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -131,7 +134,7 @@
|
||||
"tags": [
|
||||
"Group"
|
||||
],
|
||||
"summary": "Get the current user's group",
|
||||
"summary": "Get Group",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -153,7 +156,7 @@
|
||||
"tags": [
|
||||
"Group"
|
||||
],
|
||||
"summary": "Updates some fields of the current users group",
|
||||
"summary": "Update Group",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "User Data",
|
||||
@@ -188,7 +191,7 @@
|
||||
"tags": [
|
||||
"Group"
|
||||
],
|
||||
"summary": "Get the current user",
|
||||
"summary": "Create Group Invitation",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "User Data",
|
||||
@@ -223,7 +226,7 @@
|
||||
"tags": [
|
||||
"Statistics"
|
||||
],
|
||||
"summary": "Get the current user's group statistics",
|
||||
"summary": "Get Group Statistics",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -247,7 +250,7 @@
|
||||
"tags": [
|
||||
"Statistics"
|
||||
],
|
||||
"summary": "Get the current user's group statistics",
|
||||
"summary": "Get Label Statistics",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -274,7 +277,7 @@
|
||||
"tags": [
|
||||
"Statistics"
|
||||
],
|
||||
"summary": "Get the current user's group statistics",
|
||||
"summary": "Get Location Statistics",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -301,7 +304,7 @@
|
||||
"tags": [
|
||||
"Statistics"
|
||||
],
|
||||
"summary": "Queries the changes overtime of the purchase price over time",
|
||||
"summary": "Get Purchase Price Statistics",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -339,7 +342,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Get All Items",
|
||||
"summary": "Query All Items",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -401,7 +404,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Create a new item",
|
||||
"summary": "Create Item",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Item Data",
|
||||
@@ -414,8 +417,8 @@
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.ItemSummary"
|
||||
}
|
||||
@@ -433,7 +436,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "exports items into the database",
|
||||
"summary": "Export Items",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "text/csv",
|
||||
@@ -457,7 +460,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "imports items into the database",
|
||||
"summary": "Get All Custom Field Names",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -484,7 +487,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "imports items into the database",
|
||||
"summary": "Get All Custom Field Values",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -511,7 +514,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "imports items into the database",
|
||||
"summary": "Import Items",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "file",
|
||||
@@ -541,7 +544,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Gets a item and fields",
|
||||
"summary": "Get Item",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -572,7 +575,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "updates a item",
|
||||
"summary": "Update Item",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -612,7 +615,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "deletes a item",
|
||||
"summary": "Delete Item",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -642,7 +645,7 @@
|
||||
"tags": [
|
||||
"Items Attachments"
|
||||
],
|
||||
"summary": "imports items into the database",
|
||||
"summary": "Create Item Attachment",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -683,7 +686,7 @@
|
||||
"422": {
|
||||
"description": "Unprocessable Entity",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/server.ErrorResponse"
|
||||
"$ref": "#/definitions/mid.ErrorResponse"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -702,7 +705,7 @@
|
||||
"tags": [
|
||||
"Items Attachments"
|
||||
],
|
||||
"summary": "retrieves an attachment for an item",
|
||||
"summary": "Get Item Attachment",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -737,7 +740,7 @@
|
||||
"tags": [
|
||||
"Items Attachments"
|
||||
],
|
||||
"summary": "retrieves an attachment for an item",
|
||||
"summary": "Update Item Attachment",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -781,7 +784,7 @@
|
||||
"tags": [
|
||||
"Items Attachments"
|
||||
],
|
||||
"summary": "retrieves an attachment for an item",
|
||||
"summary": "Delete Item Attachment",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -853,8 +856,8 @@
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.MaintenanceEntry"
|
||||
}
|
||||
@@ -934,22 +937,10 @@
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Results"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.LabelOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.LabelOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -966,7 +957,7 @@
|
||||
"tags": [
|
||||
"Labels"
|
||||
],
|
||||
"summary": "Create a new label",
|
||||
"summary": "Create Label",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Label Data",
|
||||
@@ -1001,7 +992,7 @@
|
||||
"tags": [
|
||||
"Labels"
|
||||
],
|
||||
"summary": "Gets a label and fields",
|
||||
"summary": "Get Label",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1032,7 +1023,7 @@
|
||||
"tags": [
|
||||
"Labels"
|
||||
],
|
||||
"summary": "updates a label",
|
||||
"summary": "Update Label",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1063,7 +1054,7 @@
|
||||
"tags": [
|
||||
"Labels"
|
||||
],
|
||||
"summary": "deletes a label",
|
||||
"summary": "Delete Label",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1106,22 +1097,10 @@
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Results"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.LocationOutCount"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.LocationOutCount"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1138,7 +1117,7 @@
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "Create a new location",
|
||||
"summary": "Create Location",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Location Data",
|
||||
@@ -1173,7 +1152,7 @@
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "Get All Locations",
|
||||
"summary": "Get Locations Tree",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "boolean",
|
||||
@@ -1186,22 +1165,10 @@
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Results"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.TreeItem"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.TreeItem"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1220,7 +1187,7 @@
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "Gets a location and fields",
|
||||
"summary": "Get Location",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1251,7 +1218,7 @@
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "updates a location",
|
||||
"summary": "Update Location",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1291,7 +1258,7 @@
|
||||
"tags": [
|
||||
"Locations"
|
||||
],
|
||||
"summary": "deletes a location",
|
||||
"summary": "Delete Location",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1308,6 +1275,167 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/notifiers": {
|
||||
"get": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Get Notifiers",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/repo.NotifierOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Create Notifier",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Notifier Data",
|
||||
"name": "payload",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.NotifierCreate"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.NotifierOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/notifiers/test": {
|
||||
"post": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Test Notifier",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Notifier ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"type": "string",
|
||||
"description": "URL",
|
||||
"name": "url",
|
||||
"in": "query",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/notifiers/{id}": {
|
||||
"put": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Update Notifier",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Notifier ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
},
|
||||
{
|
||||
"description": "Notifier Data",
|
||||
"name": "payload",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.NotifierUpdate"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/repo.NotifierOut"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"delete": {
|
||||
"security": [
|
||||
{
|
||||
"Bearer": []
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Notifiers"
|
||||
],
|
||||
"summary": "Delete a Notifier",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
"description": "Notifier ID",
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/v1/qrcode": {
|
||||
"get": {
|
||||
"security": [
|
||||
@@ -1321,7 +1449,7 @@
|
||||
"tags": [
|
||||
"Items"
|
||||
],
|
||||
"summary": "Encode data into QRCode",
|
||||
"summary": "Create QR Code",
|
||||
"parameters": [
|
||||
{
|
||||
"type": "string",
|
||||
@@ -1353,7 +1481,7 @@
|
||||
"tags": [
|
||||
"Reporting"
|
||||
],
|
||||
"summary": "Generates a Bill of Materials CSV",
|
||||
"summary": "Export Bill of Materials",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "text/csv",
|
||||
@@ -1372,7 +1500,7 @@
|
||||
"tags": [
|
||||
"Base"
|
||||
],
|
||||
"summary": "Retrieves the basic information about the API",
|
||||
"summary": "Application Info",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
@@ -1393,7 +1521,7 @@
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Updates the users password",
|
||||
"summary": "Change Password",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "Password Payload",
|
||||
@@ -1439,6 +1567,15 @@
|
||||
"description": "string",
|
||||
"name": "password",
|
||||
"in": "formData"
|
||||
},
|
||||
{
|
||||
"description": "Login Data",
|
||||
"name": "payload",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/v1.LoginForm"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
@@ -1496,7 +1633,7 @@
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Get the current user",
|
||||
"summary": "Register New User",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "User Data",
|
||||
@@ -1528,14 +1665,14 @@
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Get the current user",
|
||||
"summary": "Get User Self",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Result"
|
||||
"$ref": "#/definitions/v1.Wrapped"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
@@ -1562,7 +1699,7 @@
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Update the current user",
|
||||
"summary": "Update Account",
|
||||
"parameters": [
|
||||
{
|
||||
"description": "User Data",
|
||||
@@ -1580,7 +1717,7 @@
|
||||
"schema": {
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/definitions/server.Result"
|
||||
"$ref": "#/definitions/v1.Wrapped"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
@@ -1607,7 +1744,7 @@
|
||||
"tags": [
|
||||
"User"
|
||||
],
|
||||
"summary": "Deletes the user account",
|
||||
"summary": "Delete Account",
|
||||
"responses": {
|
||||
"204": {
|
||||
"description": "No Content"
|
||||
@@ -1617,6 +1754,20 @@
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"mid.ErrorResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string"
|
||||
},
|
||||
"fields": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.DocumentOut": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -1718,9 +1869,13 @@
|
||||
},
|
||||
"repo.ItemCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"description": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"maxLength": 1000
|
||||
},
|
||||
"labelIds": {
|
||||
"type": "array",
|
||||
@@ -1733,7 +1888,9 @@
|
||||
"type": "string"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"maxLength": 255,
|
||||
"minLength": 1
|
||||
},
|
||||
"parentId": {
|
||||
"type": "string",
|
||||
@@ -2024,15 +2181,21 @@
|
||||
},
|
||||
"repo.LabelCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"color": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"maxLength": 255
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
"type": "string",
|
||||
"maxLength": 255,
|
||||
"minLength": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -2221,6 +2384,9 @@
|
||||
},
|
||||
"repo.MaintenanceEntryCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"completedDate": {
|
||||
"description": "Sold",
|
||||
@@ -2285,6 +2451,72 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.NotifierCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name",
|
||||
"url"
|
||||
],
|
||||
"properties": {
|
||||
"isActive": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"maxLength": 255,
|
||||
"minLength": 1
|
||||
},
|
||||
"url": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.NotifierOut": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"createdAt": {
|
||||
"type": "string"
|
||||
},
|
||||
"groupId": {
|
||||
"type": "string"
|
||||
},
|
||||
"id": {
|
||||
"type": "string"
|
||||
},
|
||||
"isActive": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"updatedAt": {
|
||||
"type": "string"
|
||||
},
|
||||
"userId": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.NotifierUpdate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"name"
|
||||
],
|
||||
"properties": {
|
||||
"isActive": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"maxLength": 255,
|
||||
"minLength": 1
|
||||
},
|
||||
"url": {
|
||||
"type": "string",
|
||||
"x-nullable": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"repo.PaginationResult-repo_ItemSummary": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2413,39 +2645,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.ErrorResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string"
|
||||
},
|
||||
"fields": {
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.Result": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"details": {},
|
||||
"error": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"item": {},
|
||||
"message": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"server.Results": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"items": {}
|
||||
}
|
||||
},
|
||||
"services.UserRegistration": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2541,12 +2740,17 @@
|
||||
},
|
||||
"v1.GroupInvitationCreate": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"uses"
|
||||
],
|
||||
"properties": {
|
||||
"expiresAt": {
|
||||
"type": "string"
|
||||
},
|
||||
"uses": {
|
||||
"type": "integer"
|
||||
"type": "integer",
|
||||
"maximum": 100,
|
||||
"minimum": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -2558,6 +2762,20 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.LoginForm": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"password": {
|
||||
"type": "string"
|
||||
},
|
||||
"stayLoggedIn": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"username": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.TokenResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -2571,6 +2789,12 @@
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"v1.Wrapped": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"item": {}
|
||||
}
|
||||
}
|
||||
},
|
||||
"securityDefinitions": {
|
||||
|
||||
@@ -1,5 +1,14 @@
|
||||
basePath: /api
|
||||
definitions:
|
||||
mid.ErrorResponse:
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
fields:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
repo.DocumentOut:
|
||||
properties:
|
||||
id:
|
||||
@@ -67,6 +76,7 @@ definitions:
|
||||
repo.ItemCreate:
|
||||
properties:
|
||||
description:
|
||||
maxLength: 1000
|
||||
type: string
|
||||
labelIds:
|
||||
items:
|
||||
@@ -76,10 +86,14 @@ definitions:
|
||||
description: Edges
|
||||
type: string
|
||||
name:
|
||||
maxLength: 255
|
||||
minLength: 1
|
||||
type: string
|
||||
parentId:
|
||||
type: string
|
||||
x-nullable: true
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
repo.ItemField:
|
||||
properties:
|
||||
@@ -281,9 +295,14 @@ definitions:
|
||||
color:
|
||||
type: string
|
||||
description:
|
||||
maxLength: 255
|
||||
type: string
|
||||
name:
|
||||
maxLength: 255
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
repo.LabelOut:
|
||||
properties:
|
||||
@@ -421,6 +440,8 @@ definitions:
|
||||
scheduledDate:
|
||||
description: Sold
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
repo.MaintenanceEntryUpdate:
|
||||
properties:
|
||||
@@ -451,6 +472,51 @@ definitions:
|
||||
itemId:
|
||||
type: string
|
||||
type: object
|
||||
repo.NotifierCreate:
|
||||
properties:
|
||||
isActive:
|
||||
type: boolean
|
||||
name:
|
||||
maxLength: 255
|
||||
minLength: 1
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
- url
|
||||
type: object
|
||||
repo.NotifierOut:
|
||||
properties:
|
||||
createdAt:
|
||||
type: string
|
||||
groupId:
|
||||
type: string
|
||||
id:
|
||||
type: string
|
||||
isActive:
|
||||
type: boolean
|
||||
name:
|
||||
type: string
|
||||
updatedAt:
|
||||
type: string
|
||||
userId:
|
||||
type: string
|
||||
type: object
|
||||
repo.NotifierUpdate:
|
||||
properties:
|
||||
isActive:
|
||||
type: boolean
|
||||
name:
|
||||
maxLength: 255
|
||||
minLength: 1
|
||||
type: string
|
||||
url:
|
||||
type: string
|
||||
x-nullable: true
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
repo.PaginationResult-repo_ItemSummary:
|
||||
properties:
|
||||
items:
|
||||
@@ -534,28 +600,6 @@ definitions:
|
||||
value:
|
||||
type: number
|
||||
type: object
|
||||
server.ErrorResponse:
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
fields:
|
||||
additionalProperties:
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
server.Result:
|
||||
properties:
|
||||
details: {}
|
||||
error:
|
||||
type: boolean
|
||||
item: {}
|
||||
message:
|
||||
type: string
|
||||
type: object
|
||||
server.Results:
|
||||
properties:
|
||||
items: {}
|
||||
type: object
|
||||
services.UserRegistration:
|
||||
properties:
|
||||
email:
|
||||
@@ -621,13 +665,26 @@ definitions:
|
||||
expiresAt:
|
||||
type: string
|
||||
uses:
|
||||
maximum: 100
|
||||
minimum: 1
|
||||
type: integer
|
||||
required:
|
||||
- uses
|
||||
type: object
|
||||
v1.ItemAttachmentToken:
|
||||
properties:
|
||||
token:
|
||||
type: string
|
||||
type: object
|
||||
v1.LoginForm:
|
||||
properties:
|
||||
password:
|
||||
type: string
|
||||
stayLoggedIn:
|
||||
type: boolean
|
||||
username:
|
||||
type: string
|
||||
type: object
|
||||
v1.TokenResponse:
|
||||
properties:
|
||||
attachmentToken:
|
||||
@@ -637,19 +694,22 @@ definitions:
|
||||
token:
|
||||
type: string
|
||||
type: object
|
||||
v1.Wrapped:
|
||||
properties:
|
||||
item: {}
|
||||
type: object
|
||||
info:
|
||||
contact:
|
||||
name: Don't
|
||||
description: This is a simple Rest API Server Template that implements some basic
|
||||
User and Authentication patterns to help you get started and bootstrap your next
|
||||
project!.
|
||||
description: Track, Manage, and Organize your Things.
|
||||
license:
|
||||
name: MIT
|
||||
title: Go API Templates
|
||||
title: Homebox API
|
||||
version: "1.0"
|
||||
paths:
|
||||
/v1/actions/ensure-asset-ids:
|
||||
post:
|
||||
description: Ensures all items in the database have an asset ID
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
@@ -659,11 +719,12 @@ paths:
|
||||
$ref: '#/definitions/v1.ActionAmountResult'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Ensures all items in the database have an asset id
|
||||
summary: Ensure Asset IDs
|
||||
tags:
|
||||
- Group
|
||||
- Actions
|
||||
/v1/actions/ensure-import-refs:
|
||||
post:
|
||||
description: Ensures all items in the database have an import ref
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
@@ -673,11 +734,12 @@ paths:
|
||||
$ref: '#/definitions/v1.ActionAmountResult'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Ensures all items in the database have an import ref
|
||||
summary: Ensures Import Refs
|
||||
tags:
|
||||
- Group
|
||||
- Actions
|
||||
/v1/actions/zero-item-time-fields:
|
||||
post:
|
||||
description: Resets all item date fields to the beginning of the day
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
@@ -687,9 +749,9 @@ paths:
|
||||
$ref: '#/definitions/v1.ActionAmountResult'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Resets all item date fields to the beginning of the day
|
||||
summary: Zero Out Time Fields
|
||||
tags:
|
||||
- Group
|
||||
- Actions
|
||||
/v1/assets/{id}:
|
||||
get:
|
||||
parameters:
|
||||
@@ -707,9 +769,9 @@ paths:
|
||||
$ref: '#/definitions/repo.PaginationResult-repo_ItemSummary'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Gets an item by Asset ID
|
||||
summary: Get Item by Asset ID
|
||||
tags:
|
||||
- Assets
|
||||
- Items
|
||||
/v1/groups:
|
||||
get:
|
||||
produces:
|
||||
@@ -721,7 +783,7 @@ paths:
|
||||
$ref: '#/definitions/repo.Group'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get the current user's group
|
||||
summary: Get Group
|
||||
tags:
|
||||
- Group
|
||||
put:
|
||||
@@ -741,7 +803,7 @@ paths:
|
||||
$ref: '#/definitions/repo.Group'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Updates some fields of the current users group
|
||||
summary: Update Group
|
||||
tags:
|
||||
- Group
|
||||
/v1/groups/invitations:
|
||||
@@ -762,7 +824,7 @@ paths:
|
||||
$ref: '#/definitions/v1.GroupInvitation'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get the current user
|
||||
summary: Create Group Invitation
|
||||
tags:
|
||||
- Group
|
||||
/v1/groups/statistics:
|
||||
@@ -776,7 +838,7 @@ paths:
|
||||
$ref: '#/definitions/repo.GroupStatistics'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get the current user's group statistics
|
||||
summary: Get Group Statistics
|
||||
tags:
|
||||
- Statistics
|
||||
/v1/groups/statistics/labels:
|
||||
@@ -792,7 +854,7 @@ paths:
|
||||
type: array
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get the current user's group statistics
|
||||
summary: Get Label Statistics
|
||||
tags:
|
||||
- Statistics
|
||||
/v1/groups/statistics/locations:
|
||||
@@ -808,7 +870,7 @@ paths:
|
||||
type: array
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get the current user's group statistics
|
||||
summary: Get Location Statistics
|
||||
tags:
|
||||
- Statistics
|
||||
/v1/groups/statistics/purchase-price:
|
||||
@@ -831,7 +893,7 @@ paths:
|
||||
$ref: '#/definitions/repo.ValueOverTime'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Queries the changes overtime of the purchase price over time
|
||||
summary: Get Purchase Price Statistics
|
||||
tags:
|
||||
- Statistics
|
||||
/v1/items:
|
||||
@@ -872,7 +934,7 @@ paths:
|
||||
$ref: '#/definitions/repo.PaginationResult-repo_ItemSummary'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get All Items
|
||||
summary: Query All Items
|
||||
tags:
|
||||
- Items
|
||||
post:
|
||||
@@ -886,13 +948,13 @@ paths:
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
"201":
|
||||
description: Created
|
||||
schema:
|
||||
$ref: '#/definitions/repo.ItemSummary'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Create a new item
|
||||
summary: Create Item
|
||||
tags:
|
||||
- Items
|
||||
/v1/items/{id}:
|
||||
@@ -910,7 +972,7 @@ paths:
|
||||
description: No Content
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: deletes a item
|
||||
summary: Delete Item
|
||||
tags:
|
||||
- Items
|
||||
get:
|
||||
@@ -929,7 +991,7 @@ paths:
|
||||
$ref: '#/definitions/repo.ItemOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Gets a item and fields
|
||||
summary: Get Item
|
||||
tags:
|
||||
- Items
|
||||
put:
|
||||
@@ -954,7 +1016,7 @@ paths:
|
||||
$ref: '#/definitions/repo.ItemOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: updates a item
|
||||
summary: Update Item
|
||||
tags:
|
||||
- Items
|
||||
/v1/items/{id}/attachments:
|
||||
@@ -990,10 +1052,10 @@ paths:
|
||||
"422":
|
||||
description: Unprocessable Entity
|
||||
schema:
|
||||
$ref: '#/definitions/server.ErrorResponse'
|
||||
$ref: '#/definitions/mid.ErrorResponse'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: imports items into the database
|
||||
summary: Create Item Attachment
|
||||
tags:
|
||||
- Items Attachments
|
||||
/v1/items/{id}/attachments/{attachment_id}:
|
||||
@@ -1014,7 +1076,7 @@ paths:
|
||||
description: No Content
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: retrieves an attachment for an item
|
||||
summary: Delete Item Attachment
|
||||
tags:
|
||||
- Items Attachments
|
||||
get:
|
||||
@@ -1038,7 +1100,7 @@ paths:
|
||||
$ref: '#/definitions/v1.ItemAttachmentToken'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: retrieves an attachment for an item
|
||||
summary: Get Item Attachment
|
||||
tags:
|
||||
- Items Attachments
|
||||
put:
|
||||
@@ -1066,7 +1128,7 @@ paths:
|
||||
$ref: '#/definitions/repo.ItemOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: retrieves an attachment for an item
|
||||
summary: Update Item Attachment
|
||||
tags:
|
||||
- Items Attachments
|
||||
/v1/items/{id}/maintenance:
|
||||
@@ -1094,8 +1156,8 @@ paths:
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
"201":
|
||||
description: Created
|
||||
schema:
|
||||
$ref: '#/definitions/repo.MaintenanceEntry'
|
||||
security:
|
||||
@@ -1144,7 +1206,7 @@ paths:
|
||||
type: string
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: exports items into the database
|
||||
summary: Export Items
|
||||
tags:
|
||||
- Items
|
||||
/v1/items/fields:
|
||||
@@ -1160,7 +1222,7 @@ paths:
|
||||
type: array
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: imports items into the database
|
||||
summary: Get All Custom Field Names
|
||||
tags:
|
||||
- Items
|
||||
/v1/items/fields/values:
|
||||
@@ -1176,7 +1238,7 @@ paths:
|
||||
type: array
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: imports items into the database
|
||||
summary: Get All Custom Field Values
|
||||
tags:
|
||||
- Items
|
||||
/v1/items/import:
|
||||
@@ -1194,7 +1256,7 @@ paths:
|
||||
description: No Content
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: imports items into the database
|
||||
summary: Import Items
|
||||
tags:
|
||||
- Items
|
||||
/v1/labels:
|
||||
@@ -1205,14 +1267,9 @@ paths:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: '#/definitions/server.Results'
|
||||
- properties:
|
||||
items:
|
||||
items:
|
||||
$ref: '#/definitions/repo.LabelOut'
|
||||
type: array
|
||||
type: object
|
||||
items:
|
||||
$ref: '#/definitions/repo.LabelOut'
|
||||
type: array
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get All Labels
|
||||
@@ -1235,7 +1292,7 @@ paths:
|
||||
$ref: '#/definitions/repo.LabelSummary'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Create a new label
|
||||
summary: Create Label
|
||||
tags:
|
||||
- Labels
|
||||
/v1/labels/{id}:
|
||||
@@ -1253,7 +1310,7 @@ paths:
|
||||
description: No Content
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: deletes a label
|
||||
summary: Delete Label
|
||||
tags:
|
||||
- Labels
|
||||
get:
|
||||
@@ -1272,7 +1329,7 @@ paths:
|
||||
$ref: '#/definitions/repo.LabelOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Gets a label and fields
|
||||
summary: Get Label
|
||||
tags:
|
||||
- Labels
|
||||
put:
|
||||
@@ -1291,7 +1348,7 @@ paths:
|
||||
$ref: '#/definitions/repo.LabelOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: updates a label
|
||||
summary: Update Label
|
||||
tags:
|
||||
- Labels
|
||||
/v1/locations:
|
||||
@@ -1307,14 +1364,9 @@ paths:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: '#/definitions/server.Results'
|
||||
- properties:
|
||||
items:
|
||||
items:
|
||||
$ref: '#/definitions/repo.LocationOutCount'
|
||||
type: array
|
||||
type: object
|
||||
items:
|
||||
$ref: '#/definitions/repo.LocationOutCount'
|
||||
type: array
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get All Locations
|
||||
@@ -1337,7 +1389,7 @@ paths:
|
||||
$ref: '#/definitions/repo.LocationSummary'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Create a new location
|
||||
summary: Create Location
|
||||
tags:
|
||||
- Locations
|
||||
/v1/locations/{id}:
|
||||
@@ -1355,7 +1407,7 @@ paths:
|
||||
description: No Content
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: deletes a location
|
||||
summary: Delete Location
|
||||
tags:
|
||||
- Locations
|
||||
get:
|
||||
@@ -1374,7 +1426,7 @@ paths:
|
||||
$ref: '#/definitions/repo.LocationOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Gets a location and fields
|
||||
summary: Get Location
|
||||
tags:
|
||||
- Locations
|
||||
put:
|
||||
@@ -1399,7 +1451,7 @@ paths:
|
||||
$ref: '#/definitions/repo.LocationOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: updates a location
|
||||
summary: Update Location
|
||||
tags:
|
||||
- Locations
|
||||
/v1/locations/tree:
|
||||
@@ -1415,19 +1467,112 @@ paths:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: '#/definitions/server.Results'
|
||||
- properties:
|
||||
items:
|
||||
items:
|
||||
$ref: '#/definitions/repo.TreeItem'
|
||||
type: array
|
||||
type: object
|
||||
items:
|
||||
$ref: '#/definitions/repo.TreeItem'
|
||||
type: array
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get All Locations
|
||||
summary: Get Locations Tree
|
||||
tags:
|
||||
- Locations
|
||||
/v1/notifiers:
|
||||
get:
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
items:
|
||||
$ref: '#/definitions/repo.NotifierOut'
|
||||
type: array
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get Notifiers
|
||||
tags:
|
||||
- Notifiers
|
||||
post:
|
||||
parameters:
|
||||
- description: Notifier Data
|
||||
in: body
|
||||
name: payload
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/repo.NotifierCreate'
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/repo.NotifierOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Create Notifier
|
||||
tags:
|
||||
- Notifiers
|
||||
/v1/notifiers/{id}:
|
||||
delete:
|
||||
parameters:
|
||||
- description: Notifier ID
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
responses:
|
||||
"204":
|
||||
description: No Content
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Delete a Notifier
|
||||
tags:
|
||||
- Notifiers
|
||||
put:
|
||||
parameters:
|
||||
- description: Notifier ID
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: Notifier Data
|
||||
in: body
|
||||
name: payload
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/repo.NotifierUpdate'
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/repo.NotifierOut'
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Update Notifier
|
||||
tags:
|
||||
- Notifiers
|
||||
/v1/notifiers/test:
|
||||
post:
|
||||
parameters:
|
||||
- description: Notifier ID
|
||||
in: path
|
||||
name: id
|
||||
required: true
|
||||
type: string
|
||||
- description: URL
|
||||
in: query
|
||||
name: url
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"204":
|
||||
description: No Content
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Test Notifier
|
||||
tags:
|
||||
- Notifiers
|
||||
/v1/qrcode:
|
||||
get:
|
||||
parameters:
|
||||
@@ -1444,7 +1589,7 @@ paths:
|
||||
type: string
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Encode data into QRCode
|
||||
summary: Create QR Code
|
||||
tags:
|
||||
- Items
|
||||
/v1/reporting/bill-of-materials:
|
||||
@@ -1458,7 +1603,7 @@ paths:
|
||||
type: string
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Generates a Bill of Materials CSV
|
||||
summary: Export Bill of Materials
|
||||
tags:
|
||||
- Reporting
|
||||
/v1/status:
|
||||
@@ -1470,7 +1615,7 @@ paths:
|
||||
description: OK
|
||||
schema:
|
||||
$ref: '#/definitions/v1.ApiSummary'
|
||||
summary: Retrieves the basic information about the API
|
||||
summary: Application Info
|
||||
tags:
|
||||
- Base
|
||||
/v1/users/change-password:
|
||||
@@ -1487,7 +1632,7 @@ paths:
|
||||
description: No Content
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Updates the users password
|
||||
summary: Change Password
|
||||
tags:
|
||||
- User
|
||||
/v1/users/login:
|
||||
@@ -1506,6 +1651,12 @@ paths:
|
||||
in: formData
|
||||
name: password
|
||||
type: string
|
||||
- description: Login Data
|
||||
in: body
|
||||
name: payload
|
||||
required: true
|
||||
schema:
|
||||
$ref: '#/definitions/v1.LoginForm'
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
@@ -1553,7 +1704,7 @@ paths:
|
||||
responses:
|
||||
"204":
|
||||
description: No Content
|
||||
summary: Get the current user
|
||||
summary: Register New User
|
||||
tags:
|
||||
- User
|
||||
/v1/users/self:
|
||||
@@ -1565,7 +1716,7 @@ paths:
|
||||
description: No Content
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Deletes the user account
|
||||
summary: Delete Account
|
||||
tags:
|
||||
- User
|
||||
get:
|
||||
@@ -1576,14 +1727,14 @@ paths:
|
||||
description: OK
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: '#/definitions/server.Result'
|
||||
- $ref: '#/definitions/v1.Wrapped'
|
||||
- properties:
|
||||
item:
|
||||
$ref: '#/definitions/repo.UserOut'
|
||||
type: object
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Get the current user
|
||||
summary: Get User Self
|
||||
tags:
|
||||
- User
|
||||
put:
|
||||
@@ -1601,14 +1752,14 @@ paths:
|
||||
description: OK
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: '#/definitions/server.Result'
|
||||
- $ref: '#/definitions/v1.Wrapped'
|
||||
- properties:
|
||||
item:
|
||||
$ref: '#/definitions/repo.UserUpdate'
|
||||
type: object
|
||||
security:
|
||||
- Bearer: []
|
||||
summary: Update the current user
|
||||
summary: Update Account
|
||||
tags:
|
||||
- User
|
||||
securityDefinitions:
|
||||
|
||||
@@ -1,23 +1,28 @@
|
||||
module github.com/hay-kot/homebox/backend
|
||||
|
||||
go 1.19
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb
|
||||
entgo.io/ent v0.11.8
|
||||
github.com/ardanlabs/conf/v3 v3.1.4
|
||||
ariga.io/atlas v0.10.0
|
||||
entgo.io/ent v0.11.10
|
||||
github.com/ardanlabs/conf/v3 v3.1.5
|
||||
github.com/containrrr/shoutrrr v0.7.1
|
||||
github.com/go-chi/chi/v5 v5.0.8
|
||||
github.com/go-playground/validator/v10 v10.11.2
|
||||
github.com/gocarina/gocsv v0.0.0-20230219202803-bcce7dc8d0bb
|
||||
github.com/go-playground/validator/v10 v10.12.0
|
||||
github.com/gocarina/gocsv v0.0.0-20230226133904-70c27cb2918a
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/schema v1.2.0
|
||||
github.com/hay-kot/safeserve v0.0.1
|
||||
github.com/mattn/go-sqlite3 v1.14.16
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.29.0
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/swaggo/http-swagger v1.3.3
|
||||
github.com/swaggo/swag v1.8.10
|
||||
github.com/swaggo/http-swagger v1.3.4
|
||||
github.com/swaggo/swag v1.8.11
|
||||
github.com/yeqown/go-qrcode/v2 v2.2.1
|
||||
github.com/yeqown/go-qrcode/writer/standard v1.2.1
|
||||
golang.org/x/crypto v0.7.0
|
||||
modernc.org/sqlite v1.21.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -25,6 +30,8 @@ require (
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fogleman/gg v1.3.0 // indirect
|
||||
github.com/go-openapi/inflect v0.19.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
@@ -37,13 +44,14 @@ require (
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.15.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
|
||||
github.com/leodido/go-urn v1.2.2 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/swaggo/files v1.0.0 // indirect
|
||||
github.com/yeqown/reedsolomon v1.0.0 // indirect
|
||||
github.com/zclconf/go-cty v1.12.1 // indirect
|
||||
@@ -52,6 +60,15 @@ require (
|
||||
golang.org/x/net v0.8.0 // indirect
|
||||
golang.org/x/sys v0.6.0 // indirect
|
||||
golang.org/x/text v0.8.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
golang.org/x/tools v0.6.1-0.20230222164832-25d2519c8696 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
lukechampine.com/uint128 v1.2.0 // indirect
|
||||
modernc.org/cc/v3 v3.40.0 // indirect
|
||||
modernc.org/ccgo/v3 v3.16.13 // indirect
|
||||
modernc.org/libc v1.22.3 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.5.0 // indirect
|
||||
modernc.org/opt v0.1.3 // indirect
|
||||
modernc.org/strutil v1.1.3 // indirect
|
||||
modernc.org/token v1.0.1 // indirect
|
||||
)
|
||||
|
||||
1161
backend/go.sum
1161
backend/go.sum
File diff suppressed because it is too large
Load Diff
@@ -5,9 +5,10 @@ import (
|
||||
)
|
||||
|
||||
type AllServices struct {
|
||||
User *UserService
|
||||
Group *GroupService
|
||||
Items *ItemService
|
||||
User *UserService
|
||||
Group *GroupService
|
||||
Items *ItemService
|
||||
BackgroundService *BackgroundService
|
||||
}
|
||||
|
||||
type OptionsFunc func(*options)
|
||||
@@ -42,5 +43,6 @@ func New(repos *repo.AllRepos, opts ...OptionsFunc) *AllServices {
|
||||
repo: repos,
|
||||
autoIncrementAssetID: options.autoIncrementAssetID,
|
||||
},
|
||||
BackgroundService: &BackgroundService{repos},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,8 @@ package services
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
@@ -49,8 +47,6 @@ func bootstrap() {
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
rand.Seed(int64(time.Now().Unix()))
|
||||
|
||||
client, err := ent.Open("sqlite3", "file:ent?mode=memory&cache=shared&_fk=1")
|
||||
if err != nil {
|
||||
log.Fatalf("failed opening connection to sqlite: %v", err)
|
||||
|
||||
81
backend/internal/core/services/service_background.go
Normal file
81
backend/internal/core/services/service_background.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containrrr/shoutrrr"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/repo"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/types"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type BackgroundService struct {
|
||||
repos *repo.AllRepos
|
||||
}
|
||||
|
||||
func (svc *BackgroundService) SendNotifiersToday(ctx context.Context) error {
|
||||
// Get All Groups
|
||||
groups, err := svc.repos.Groups.GetAllGroups(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
today := types.DateFromTime(time.Now())
|
||||
|
||||
for i := range groups {
|
||||
group := groups[i]
|
||||
|
||||
entries, err := svc.repos.MaintEntry.GetScheduled(ctx, group.ID, today)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(entries) == 0 {
|
||||
log.Debug().
|
||||
Str("group_name", group.Name).
|
||||
Str("group_id", group.ID.String()).
|
||||
Msg("No scheduled maintenance for today")
|
||||
continue
|
||||
}
|
||||
|
||||
notifiers, err := svc.repos.Notifiers.GetByGroup(ctx, group.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
urls := make([]string, len(notifiers))
|
||||
for i := range notifiers {
|
||||
urls[i] = notifiers[i].URL
|
||||
}
|
||||
|
||||
bldr := strings.Builder{}
|
||||
|
||||
bldr.WriteString("Homebox Maintenance for (")
|
||||
bldr.WriteString(today.String())
|
||||
bldr.WriteString("):\n")
|
||||
|
||||
for i := range entries {
|
||||
entry := entries[i]
|
||||
bldr.WriteString(" - ")
|
||||
bldr.WriteString(entry.Name)
|
||||
bldr.WriteString("\n")
|
||||
}
|
||||
|
||||
var sendErrs []error
|
||||
for i := range urls {
|
||||
err := shoutrrr.Send(urls[i], bldr.String())
|
||||
|
||||
if err != nil {
|
||||
sendErrs = append(sendErrs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(sendErrs) > 0 {
|
||||
return sendErrs[0]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -61,6 +61,7 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration)
|
||||
|
||||
switch data.GroupToken {
|
||||
case "":
|
||||
log.Debug().Msg("creating new group")
|
||||
creatingGroup = true
|
||||
group, err = svc.repos.Groups.GroupCreate(ctx, "Home")
|
||||
if err != nil {
|
||||
@@ -68,6 +69,7 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration)
|
||||
return repo.UserOut{}, err
|
||||
}
|
||||
default:
|
||||
log.Debug().Msg("joining existing group")
|
||||
token, err = svc.repos.Groups.InvitationGet(ctx, hasher.HashToken(data.GroupToken))
|
||||
if err != nil {
|
||||
log.Err(err).Msg("Failed to get invitation token")
|
||||
@@ -94,14 +96,14 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration)
|
||||
// Create the default labels and locations for the group.
|
||||
if creatingGroup {
|
||||
for _, label := range defaultLabels() {
|
||||
_, err := svc.repos.Labels.Create(ctx, group.ID, label)
|
||||
_, err := svc.repos.Labels.Create(ctx, usr.GroupID, label)
|
||||
if err != nil {
|
||||
return repo.UserOut{}, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, location := range defaultLocations() {
|
||||
_, err := svc.repos.Locations.Create(ctx, group.ID, location)
|
||||
_, err := svc.repos.Locations.Create(ctx, usr.GroupID, location)
|
||||
if err != nil {
|
||||
return repo.UserOut{}, err
|
||||
}
|
||||
@@ -138,12 +140,18 @@ func (svc *UserService) UpdateSelf(ctx context.Context, ID uuid.UUID, data repo.
|
||||
// ============================================================================
|
||||
// User Authentication
|
||||
|
||||
func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID) (UserAuthTokenDetail, error) {
|
||||
func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID, extendedSession bool) (UserAuthTokenDetail, error) {
|
||||
attachmentToken := hasher.GenerateToken()
|
||||
|
||||
expiresAt := time.Now().Add(oneWeek)
|
||||
if extendedSession {
|
||||
expiresAt = time.Now().Add(oneWeek * 4)
|
||||
}
|
||||
|
||||
attachmentData := repo.UserAuthTokenCreate{
|
||||
UserID: userId,
|
||||
TokenHash: attachmentToken.Hash,
|
||||
ExpiresAt: time.Now().Add(oneWeek),
|
||||
ExpiresAt: expiresAt,
|
||||
}
|
||||
|
||||
_, err := svc.repos.AuthTokens.CreateToken(ctx, attachmentData, authroles.RoleAttachments)
|
||||
@@ -155,7 +163,7 @@ func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID
|
||||
data := repo.UserAuthTokenCreate{
|
||||
UserID: userId,
|
||||
TokenHash: userToken.Hash,
|
||||
ExpiresAt: time.Now().Add(oneWeek),
|
||||
ExpiresAt: expiresAt,
|
||||
}
|
||||
|
||||
created, err := svc.repos.AuthTokens.CreateToken(ctx, data, authroles.RoleUser)
|
||||
@@ -170,7 +178,7 @@ func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (svc *UserService) Login(ctx context.Context, username, password string) (UserAuthTokenDetail, error) {
|
||||
func (svc *UserService) Login(ctx context.Context, username, password string, extendedSession bool) (UserAuthTokenDetail, error) {
|
||||
usr, err := svc.repos.Users.GetOneEmail(ctx, username)
|
||||
if err != nil {
|
||||
// SECURITY: Perform hash to ensure response times are the same
|
||||
@@ -182,7 +190,7 @@ func (svc *UserService) Login(ctx context.Context, username, password string) (U
|
||||
return UserAuthTokenDetail{}, ErrorInvalidLogin
|
||||
}
|
||||
|
||||
return svc.createSessionToken(ctx, usr.ID)
|
||||
return svc.createSessionToken(ctx, usr.ID, extendedSession)
|
||||
}
|
||||
|
||||
func (svc *UserService) Logout(ctx context.Context, token string) error {
|
||||
@@ -199,7 +207,7 @@ func (svc *UserService) RenewToken(ctx context.Context, token string) (UserAuthT
|
||||
return UserAuthTokenDetail{}, ErrorInvalidToken
|
||||
}
|
||||
|
||||
return svc.createSessionToken(ctx, dbToken.ID)
|
||||
return svc.createSessionToken(ctx, dbToken.ID, false)
|
||||
}
|
||||
|
||||
// DeleteSelf deletes the user that is currently logged based of the provided UUID
|
||||
|
||||
@@ -231,10 +231,7 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{attachment.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -251,10 +248,7 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{attachment.DocumentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -168,10 +168,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{attachment.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -184,10 +181,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{attachment.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -203,10 +197,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{attachment.DocumentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -219,10 +210,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{attachment.DocumentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -417,10 +405,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
|
||||
Columns: []string{attachment.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -433,10 +418,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
|
||||
Columns: []string{attachment.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -452,10 +434,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
|
||||
Columns: []string{attachment.DocumentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -468,10 +447,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
|
||||
Columns: []string{attachment.DocumentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -143,10 +143,7 @@ func (arc *AuthRolesCreate) createSpec() (*AuthRoles, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{authroles.TokenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: authtokens.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -133,10 +133,7 @@ func (aru *AuthRolesUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{authroles.TokenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: authtokens.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -149,10 +146,7 @@ func (aru *AuthRolesUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{authroles.TokenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: authtokens.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -314,10 +308,7 @@ func (aruo *AuthRolesUpdateOne) sqlSave(ctx context.Context) (_node *AuthRoles,
|
||||
Columns: []string{authroles.TokenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: authtokens.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -330,10 +321,7 @@ func (aruo *AuthRolesUpdateOne) sqlSave(ctx context.Context) (_node *AuthRoles,
|
||||
Columns: []string{authroles.TokenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: authtokens.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -249,10 +249,7 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{authtokens.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -269,10 +266,7 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{authtokens.RolesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeInt,
|
||||
Column: authroles.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -174,10 +174,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{authtokens.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -190,10 +187,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{authtokens.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -209,10 +203,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{authtokens.RolesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeInt,
|
||||
Column: authroles.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -225,10 +216,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{authtokens.RolesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeInt,
|
||||
Column: authroles.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -429,10 +417,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
|
||||
Columns: []string{authtokens.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -445,10 +430,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
|
||||
Columns: []string{authtokens.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -464,10 +446,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
|
||||
Columns: []string{authtokens.RolesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeInt,
|
||||
Column: authroles.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -480,10 +459,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
|
||||
Columns: []string{authtokens.RolesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeInt,
|
||||
Column: authroles.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -11,6 +11,10 @@ import (
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/migrate"
|
||||
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
|
||||
@@ -22,11 +26,8 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
|
||||
"entgo.io/ent/dialect"
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
)
|
||||
|
||||
// Client is the client that holds all ent builders.
|
||||
@@ -56,6 +57,8 @@ type Client struct {
|
||||
Location *LocationClient
|
||||
// MaintenanceEntry is the client for interacting with the MaintenanceEntry builders.
|
||||
MaintenanceEntry *MaintenanceEntryClient
|
||||
// Notifier is the client for interacting with the Notifier builders.
|
||||
Notifier *NotifierClient
|
||||
// User is the client for interacting with the User builders.
|
||||
User *UserClient
|
||||
}
|
||||
@@ -82,9 +85,59 @@ func (c *Client) init() {
|
||||
c.Label = NewLabelClient(c.config)
|
||||
c.Location = NewLocationClient(c.config)
|
||||
c.MaintenanceEntry = NewMaintenanceEntryClient(c.config)
|
||||
c.Notifier = NewNotifierClient(c.config)
|
||||
c.User = NewUserClient(c.config)
|
||||
}
|
||||
|
||||
type (
|
||||
// config is the configuration for the client and its builder.
|
||||
config struct {
|
||||
// driver used for executing database requests.
|
||||
driver dialect.Driver
|
||||
// debug enable a debug logging.
|
||||
debug bool
|
||||
// log used for logging on debug mode.
|
||||
log func(...any)
|
||||
// hooks to execute on mutations.
|
||||
hooks *hooks
|
||||
// interceptors to execute on queries.
|
||||
inters *inters
|
||||
}
|
||||
// Option function to configure the client.
|
||||
Option func(*config)
|
||||
)
|
||||
|
||||
// options applies the options on the config object.
|
||||
func (c *config) options(opts ...Option) {
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
if c.debug {
|
||||
c.driver = dialect.Debug(c.driver, c.log)
|
||||
}
|
||||
}
|
||||
|
||||
// Debug enables debug logging on the ent.Driver.
|
||||
func Debug() Option {
|
||||
return func(c *config) {
|
||||
c.debug = true
|
||||
}
|
||||
}
|
||||
|
||||
// Log sets the logging function for debug mode.
|
||||
func Log(fn func(...any)) Option {
|
||||
return func(c *config) {
|
||||
c.log = fn
|
||||
}
|
||||
}
|
||||
|
||||
// Driver configures the client driver.
|
||||
func Driver(driver dialect.Driver) Option {
|
||||
return func(c *config) {
|
||||
c.driver = driver
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens a database/sql.DB specified by the driver name and
|
||||
// the data source name, and returns a new client attached to it.
|
||||
// Optional parameters can be added for configuring the client.
|
||||
@@ -127,6 +180,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) {
|
||||
Label: NewLabelClient(cfg),
|
||||
Location: NewLocationClient(cfg),
|
||||
MaintenanceEntry: NewMaintenanceEntryClient(cfg),
|
||||
Notifier: NewNotifierClient(cfg),
|
||||
User: NewUserClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
@@ -158,6 +212,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error)
|
||||
Label: NewLabelClient(cfg),
|
||||
Location: NewLocationClient(cfg),
|
||||
MaintenanceEntry: NewMaintenanceEntryClient(cfg),
|
||||
Notifier: NewNotifierClient(cfg),
|
||||
User: NewUserClient(cfg),
|
||||
}, nil
|
||||
}
|
||||
@@ -187,35 +242,25 @@ func (c *Client) Close() error {
|
||||
// Use adds the mutation hooks to all the entity clients.
|
||||
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
|
||||
func (c *Client) Use(hooks ...Hook) {
|
||||
c.Attachment.Use(hooks...)
|
||||
c.AuthRoles.Use(hooks...)
|
||||
c.AuthTokens.Use(hooks...)
|
||||
c.Document.Use(hooks...)
|
||||
c.Group.Use(hooks...)
|
||||
c.GroupInvitationToken.Use(hooks...)
|
||||
c.Item.Use(hooks...)
|
||||
c.ItemField.Use(hooks...)
|
||||
c.Label.Use(hooks...)
|
||||
c.Location.Use(hooks...)
|
||||
c.MaintenanceEntry.Use(hooks...)
|
||||
c.User.Use(hooks...)
|
||||
for _, n := range []interface{ Use(...Hook) }{
|
||||
c.Attachment, c.AuthRoles, c.AuthTokens, c.Document, c.Group,
|
||||
c.GroupInvitationToken, c.Item, c.ItemField, c.Label, c.Location,
|
||||
c.MaintenanceEntry, c.Notifier, c.User,
|
||||
} {
|
||||
n.Use(hooks...)
|
||||
}
|
||||
}
|
||||
|
||||
// Intercept adds the query interceptors to all the entity clients.
|
||||
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
|
||||
func (c *Client) Intercept(interceptors ...Interceptor) {
|
||||
c.Attachment.Intercept(interceptors...)
|
||||
c.AuthRoles.Intercept(interceptors...)
|
||||
c.AuthTokens.Intercept(interceptors...)
|
||||
c.Document.Intercept(interceptors...)
|
||||
c.Group.Intercept(interceptors...)
|
||||
c.GroupInvitationToken.Intercept(interceptors...)
|
||||
c.Item.Intercept(interceptors...)
|
||||
c.ItemField.Intercept(interceptors...)
|
||||
c.Label.Intercept(interceptors...)
|
||||
c.Location.Intercept(interceptors...)
|
||||
c.MaintenanceEntry.Intercept(interceptors...)
|
||||
c.User.Intercept(interceptors...)
|
||||
for _, n := range []interface{ Intercept(...Interceptor) }{
|
||||
c.Attachment, c.AuthRoles, c.AuthTokens, c.Document, c.Group,
|
||||
c.GroupInvitationToken, c.Item, c.ItemField, c.Label, c.Location,
|
||||
c.MaintenanceEntry, c.Notifier, c.User,
|
||||
} {
|
||||
n.Intercept(interceptors...)
|
||||
}
|
||||
}
|
||||
|
||||
// Mutate implements the ent.Mutator interface.
|
||||
@@ -243,6 +288,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) {
|
||||
return c.Location.mutate(ctx, m)
|
||||
case *MaintenanceEntryMutation:
|
||||
return c.MaintenanceEntry.mutate(ctx, m)
|
||||
case *NotifierMutation:
|
||||
return c.Notifier.mutate(ctx, m)
|
||||
case *UserMutation:
|
||||
return c.User.mutate(ctx, m)
|
||||
default:
|
||||
@@ -1023,6 +1070,22 @@ func (c *GroupClient) QueryInvitationTokens(gr *Group) *GroupInvitationTokenQuer
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryNotifiers queries the notifiers edge of a Group.
|
||||
func (c *GroupClient) QueryNotifiers(gr *Group) *NotifierQuery {
|
||||
query := (&NotifierClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := gr.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(group.Table, group.FieldID, id),
|
||||
sqlgraph.To(notifier.Table, notifier.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, group.NotifiersTable, group.NotifiersColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(gr.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *GroupClient) Hooks() []Hook {
|
||||
return c.hooks.Group
|
||||
@@ -1275,6 +1338,22 @@ func (c *ItemClient) GetX(ctx context.Context, id uuid.UUID) *Item {
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryGroup queries the group edge of a Item.
|
||||
func (c *ItemClient) QueryGroup(i *Item) *GroupQuery {
|
||||
query := (&GroupClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := i.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(item.Table, item.FieldID, id),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(i.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryParent queries the parent edge of a Item.
|
||||
func (c *ItemClient) QueryParent(i *Item) *ItemQuery {
|
||||
query := (&ItemClient{config: c.config}).Query()
|
||||
@@ -1307,22 +1386,6 @@ func (c *ItemClient) QueryChildren(i *Item) *ItemQuery {
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryGroup queries the group edge of a Item.
|
||||
func (c *ItemClient) QueryGroup(i *Item) *GroupQuery {
|
||||
query := (&GroupClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := i.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(item.Table, item.FieldID, id),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(i.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryLabel queries the label edge of a Item.
|
||||
func (c *ItemClient) QueryLabel(i *Item) *LabelQuery {
|
||||
query := (&LabelClient{config: c.config}).Query()
|
||||
@@ -1805,6 +1868,22 @@ func (c *LocationClient) GetX(ctx context.Context, id uuid.UUID) *Location {
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryGroup queries the group edge of a Location.
|
||||
func (c *LocationClient) QueryGroup(l *Location) *GroupQuery {
|
||||
query := (&GroupClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := l.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(location.Table, location.FieldID, id),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(l.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryParent queries the parent edge of a Location.
|
||||
func (c *LocationClient) QueryParent(l *Location) *LocationQuery {
|
||||
query := (&LocationClient{config: c.config}).Query()
|
||||
@@ -1837,22 +1916,6 @@ func (c *LocationClient) QueryChildren(l *Location) *LocationQuery {
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryGroup queries the group edge of a Location.
|
||||
func (c *LocationClient) QueryGroup(l *Location) *GroupQuery {
|
||||
query := (&GroupClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := l.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(location.Table, location.FieldID, id),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(l.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryItems queries the items edge of a Location.
|
||||
func (c *LocationClient) QueryItems(l *Location) *ItemQuery {
|
||||
query := (&ItemClient{config: c.config}).Query()
|
||||
@@ -2028,6 +2091,156 @@ func (c *MaintenanceEntryClient) mutate(ctx context.Context, m *MaintenanceEntry
|
||||
}
|
||||
}
|
||||
|
||||
// NotifierClient is a client for the Notifier schema.
|
||||
type NotifierClient struct {
|
||||
config
|
||||
}
|
||||
|
||||
// NewNotifierClient returns a client for the Notifier from the given config.
|
||||
func NewNotifierClient(c config) *NotifierClient {
|
||||
return &NotifierClient{config: c}
|
||||
}
|
||||
|
||||
// Use adds a list of mutation hooks to the hooks stack.
|
||||
// A call to `Use(f, g, h)` equals to `notifier.Hooks(f(g(h())))`.
|
||||
func (c *NotifierClient) Use(hooks ...Hook) {
|
||||
c.hooks.Notifier = append(c.hooks.Notifier, hooks...)
|
||||
}
|
||||
|
||||
// Intercept adds a list of query interceptors to the interceptors stack.
|
||||
// A call to `Intercept(f, g, h)` equals to `notifier.Intercept(f(g(h())))`.
|
||||
func (c *NotifierClient) Intercept(interceptors ...Interceptor) {
|
||||
c.inters.Notifier = append(c.inters.Notifier, interceptors...)
|
||||
}
|
||||
|
||||
// Create returns a builder for creating a Notifier entity.
|
||||
func (c *NotifierClient) Create() *NotifierCreate {
|
||||
mutation := newNotifierMutation(c.config, OpCreate)
|
||||
return &NotifierCreate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// CreateBulk returns a builder for creating a bulk of Notifier entities.
|
||||
func (c *NotifierClient) CreateBulk(builders ...*NotifierCreate) *NotifierCreateBulk {
|
||||
return &NotifierCreateBulk{config: c.config, builders: builders}
|
||||
}
|
||||
|
||||
// Update returns an update builder for Notifier.
|
||||
func (c *NotifierClient) Update() *NotifierUpdate {
|
||||
mutation := newNotifierMutation(c.config, OpUpdate)
|
||||
return &NotifierUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOne returns an update builder for the given entity.
|
||||
func (c *NotifierClient) UpdateOne(n *Notifier) *NotifierUpdateOne {
|
||||
mutation := newNotifierMutation(c.config, OpUpdateOne, withNotifier(n))
|
||||
return &NotifierUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// UpdateOneID returns an update builder for the given id.
|
||||
func (c *NotifierClient) UpdateOneID(id uuid.UUID) *NotifierUpdateOne {
|
||||
mutation := newNotifierMutation(c.config, OpUpdateOne, withNotifierID(id))
|
||||
return &NotifierUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// Delete returns a delete builder for Notifier.
|
||||
func (c *NotifierClient) Delete() *NotifierDelete {
|
||||
mutation := newNotifierMutation(c.config, OpDelete)
|
||||
return &NotifierDelete{config: c.config, hooks: c.Hooks(), mutation: mutation}
|
||||
}
|
||||
|
||||
// DeleteOne returns a builder for deleting the given entity.
|
||||
func (c *NotifierClient) DeleteOne(n *Notifier) *NotifierDeleteOne {
|
||||
return c.DeleteOneID(n.ID)
|
||||
}
|
||||
|
||||
// DeleteOneID returns a builder for deleting the given entity by its id.
|
||||
func (c *NotifierClient) DeleteOneID(id uuid.UUID) *NotifierDeleteOne {
|
||||
builder := c.Delete().Where(notifier.ID(id))
|
||||
builder.mutation.id = &id
|
||||
builder.mutation.op = OpDeleteOne
|
||||
return &NotifierDeleteOne{builder}
|
||||
}
|
||||
|
||||
// Query returns a query builder for Notifier.
|
||||
func (c *NotifierClient) Query() *NotifierQuery {
|
||||
return &NotifierQuery{
|
||||
config: c.config,
|
||||
ctx: &QueryContext{Type: TypeNotifier},
|
||||
inters: c.Interceptors(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a Notifier entity by its id.
|
||||
func (c *NotifierClient) Get(ctx context.Context, id uuid.UUID) (*Notifier, error) {
|
||||
return c.Query().Where(notifier.ID(id)).Only(ctx)
|
||||
}
|
||||
|
||||
// GetX is like Get, but panics if an error occurs.
|
||||
func (c *NotifierClient) GetX(ctx context.Context, id uuid.UUID) *Notifier {
|
||||
obj, err := c.Get(ctx, id)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
// QueryGroup queries the group edge of a Notifier.
|
||||
func (c *NotifierClient) QueryGroup(n *Notifier) *GroupQuery {
|
||||
query := (&GroupClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := n.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(notifier.Table, notifier.FieldID, id),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, notifier.GroupTable, notifier.GroupColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(n.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryUser queries the user edge of a Notifier.
|
||||
func (c *NotifierClient) QueryUser(n *Notifier) *UserQuery {
|
||||
query := (&UserClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := n.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(notifier.Table, notifier.FieldID, id),
|
||||
sqlgraph.To(user.Table, user.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, notifier.UserTable, notifier.UserColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(n.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *NotifierClient) Hooks() []Hook {
|
||||
return c.hooks.Notifier
|
||||
}
|
||||
|
||||
// Interceptors returns the client interceptors.
|
||||
func (c *NotifierClient) Interceptors() []Interceptor {
|
||||
return c.inters.Notifier
|
||||
}
|
||||
|
||||
func (c *NotifierClient) mutate(ctx context.Context, m *NotifierMutation) (Value, error) {
|
||||
switch m.Op() {
|
||||
case OpCreate:
|
||||
return (&NotifierCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdate:
|
||||
return (&NotifierUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpUpdateOne:
|
||||
return (&NotifierUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx)
|
||||
case OpDelete, OpDeleteOne:
|
||||
return (&NotifierDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx)
|
||||
default:
|
||||
return nil, fmt.Errorf("ent: unknown Notifier mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// UserClient is a client for the User schema.
|
||||
type UserClient struct {
|
||||
config
|
||||
@@ -2153,6 +2366,22 @@ func (c *UserClient) QueryAuthTokens(u *User) *AuthTokensQuery {
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryNotifiers queries the notifiers edge of a User.
|
||||
func (c *UserClient) QueryNotifiers(u *User) *NotifierQuery {
|
||||
query := (&NotifierClient{config: c.config}).Query()
|
||||
query.path = func(context.Context) (fromV *sql.Selector, _ error) {
|
||||
id := u.ID
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(user.Table, user.FieldID, id),
|
||||
sqlgraph.To(notifier.Table, notifier.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, user.NotifiersTable, user.NotifiersColumn),
|
||||
)
|
||||
fromV = sqlgraph.Neighbors(u.driver.Dialect(), step)
|
||||
return fromV, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// Hooks returns the client hooks.
|
||||
func (c *UserClient) Hooks() []Hook {
|
||||
return c.hooks.User
|
||||
@@ -2177,3 +2406,15 @@ func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error)
|
||||
return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op())
|
||||
}
|
||||
}
|
||||
|
||||
// hooks and interceptors per client, for fast access.
|
||||
type (
|
||||
hooks struct {
|
||||
Attachment, AuthRoles, AuthTokens, Document, Group, GroupInvitationToken, Item,
|
||||
ItemField, Label, Location, MaintenanceEntry, Notifier, User []ent.Hook
|
||||
}
|
||||
inters struct {
|
||||
Attachment, AuthRoles, AuthTokens, Document, Group, GroupInvitationToken, Item,
|
||||
ItemField, Label, Location, MaintenanceEntry, Notifier, User []ent.Interceptor
|
||||
}
|
||||
)
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"entgo.io/ent"
|
||||
"entgo.io/ent/dialect"
|
||||
)
|
||||
|
||||
// Option function to configure the client.
|
||||
type Option func(*config)
|
||||
|
||||
// Config is the configuration for the client and its builder.
|
||||
type config struct {
|
||||
// driver used for executing database requests.
|
||||
driver dialect.Driver
|
||||
// debug enable a debug logging.
|
||||
debug bool
|
||||
// log used for logging on debug mode.
|
||||
log func(...any)
|
||||
// hooks to execute on mutations.
|
||||
hooks *hooks
|
||||
// interceptors to execute on queries.
|
||||
inters *inters
|
||||
}
|
||||
|
||||
// hooks and interceptors per client, for fast access.
|
||||
type (
|
||||
hooks struct {
|
||||
Attachment []ent.Hook
|
||||
AuthRoles []ent.Hook
|
||||
AuthTokens []ent.Hook
|
||||
Document []ent.Hook
|
||||
Group []ent.Hook
|
||||
GroupInvitationToken []ent.Hook
|
||||
Item []ent.Hook
|
||||
ItemField []ent.Hook
|
||||
Label []ent.Hook
|
||||
Location []ent.Hook
|
||||
MaintenanceEntry []ent.Hook
|
||||
User []ent.Hook
|
||||
}
|
||||
inters struct {
|
||||
Attachment []ent.Interceptor
|
||||
AuthRoles []ent.Interceptor
|
||||
AuthTokens []ent.Interceptor
|
||||
Document []ent.Interceptor
|
||||
Group []ent.Interceptor
|
||||
GroupInvitationToken []ent.Interceptor
|
||||
Item []ent.Interceptor
|
||||
ItemField []ent.Interceptor
|
||||
Label []ent.Interceptor
|
||||
Location []ent.Interceptor
|
||||
MaintenanceEntry []ent.Interceptor
|
||||
User []ent.Interceptor
|
||||
}
|
||||
)
|
||||
|
||||
// Options applies the options on the config object.
|
||||
func (c *config) options(opts ...Option) {
|
||||
for _, opt := range opts {
|
||||
opt(c)
|
||||
}
|
||||
if c.debug {
|
||||
c.driver = dialect.Debug(c.driver, c.log)
|
||||
}
|
||||
}
|
||||
|
||||
// Debug enables debug logging on the ent.Driver.
|
||||
func Debug() Option {
|
||||
return func(c *config) {
|
||||
c.debug = true
|
||||
}
|
||||
}
|
||||
|
||||
// Log sets the logging function for debug mode.
|
||||
func Log(fn func(...any)) Option {
|
||||
return func(c *config) {
|
||||
c.log = fn
|
||||
}
|
||||
}
|
||||
|
||||
// Driver configures the client driver.
|
||||
func Driver(driver dialect.Driver) Option {
|
||||
return func(c *config) {
|
||||
c.driver = driver
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
type clientCtxKey struct{}
|
||||
|
||||
// FromContext returns a Client stored inside a context, or nil if there isn't one.
|
||||
func FromContext(ctx context.Context) *Client {
|
||||
c, _ := ctx.Value(clientCtxKey{}).(*Client)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewContext returns a new context with the given Client attached.
|
||||
func NewContext(parent context.Context, c *Client) context.Context {
|
||||
return context.WithValue(parent, clientCtxKey{}, c)
|
||||
}
|
||||
|
||||
type txCtxKey struct{}
|
||||
|
||||
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
|
||||
func TxFromContext(ctx context.Context) *Tx {
|
||||
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
|
||||
return tx
|
||||
}
|
||||
|
||||
// NewTxContext returns a new context with the given Tx attached.
|
||||
func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
||||
return context.WithValue(parent, txCtxKey{}, tx)
|
||||
}
|
||||
@@ -238,10 +238,7 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{document.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -258,10 +255,7 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{document.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -190,10 +190,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{document.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -206,10 +203,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{document.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -225,10 +219,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{document.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -241,10 +232,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{document.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -260,10 +248,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{document.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -480,10 +465,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
|
||||
Columns: []string{document.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -496,10 +478,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
|
||||
Columns: []string{document.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -515,10 +494,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
|
||||
Columns: []string{document.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -531,10 +507,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
|
||||
Columns: []string{document.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -550,10 +523,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
|
||||
Columns: []string{document.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
)
|
||||
|
||||
@@ -44,6 +45,32 @@ type (
|
||||
MutateFunc = ent.MutateFunc
|
||||
)
|
||||
|
||||
type clientCtxKey struct{}
|
||||
|
||||
// FromContext returns a Client stored inside a context, or nil if there isn't one.
|
||||
func FromContext(ctx context.Context) *Client {
|
||||
c, _ := ctx.Value(clientCtxKey{}).(*Client)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewContext returns a new context with the given Client attached.
|
||||
func NewContext(parent context.Context, c *Client) context.Context {
|
||||
return context.WithValue(parent, clientCtxKey{}, c)
|
||||
}
|
||||
|
||||
type txCtxKey struct{}
|
||||
|
||||
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
|
||||
func TxFromContext(ctx context.Context) *Tx {
|
||||
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
|
||||
return tx
|
||||
}
|
||||
|
||||
// NewTxContext returns a new context with the given Tx attached.
|
||||
func NewTxContext(parent context.Context, tx *Tx) context.Context {
|
||||
return context.WithValue(parent, txCtxKey{}, tx)
|
||||
}
|
||||
|
||||
// OrderFunc applies an ordering on the sql selector.
|
||||
type OrderFunc func(*sql.Selector)
|
||||
|
||||
@@ -61,6 +88,7 @@ func columnChecker(table string) func(string) error {
|
||||
label.Table: label.ValidColumn,
|
||||
location.Table: location.ValidColumn,
|
||||
maintenanceentry.Table: maintenanceentry.ValidColumn,
|
||||
notifier.Table: notifier.ValidColumn,
|
||||
user.Table: user.ValidColumn,
|
||||
}
|
||||
check, ok := checks[table]
|
||||
@@ -501,7 +529,7 @@ func withHooks[V Value, M any, PM interface {
|
||||
return exec(ctx)
|
||||
}
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutationT, ok := m.(PM)
|
||||
mutationT, ok := any(m).(PM)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
|
||||
@@ -44,9 +44,11 @@ type GroupEdges struct {
|
||||
Documents []*Document `json:"documents,omitempty"`
|
||||
// InvitationTokens holds the value of the invitation_tokens edge.
|
||||
InvitationTokens []*GroupInvitationToken `json:"invitation_tokens,omitempty"`
|
||||
// Notifiers holds the value of the notifiers edge.
|
||||
Notifiers []*Notifier `json:"notifiers,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [6]bool
|
||||
loadedTypes [7]bool
|
||||
}
|
||||
|
||||
// UsersOrErr returns the Users value or an error if the edge
|
||||
@@ -103,6 +105,15 @@ func (e GroupEdges) InvitationTokensOrErr() ([]*GroupInvitationToken, error) {
|
||||
return nil, &NotLoadedError{edge: "invitation_tokens"}
|
||||
}
|
||||
|
||||
// NotifiersOrErr returns the Notifiers value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e GroupEdges) NotifiersOrErr() ([]*Notifier, error) {
|
||||
if e.loadedTypes[6] {
|
||||
return e.Notifiers, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "notifiers"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*Group) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
@@ -194,6 +205,11 @@ func (gr *Group) QueryInvitationTokens() *GroupInvitationTokenQuery {
|
||||
return NewGroupClient(gr.config).QueryInvitationTokens(gr)
|
||||
}
|
||||
|
||||
// QueryNotifiers queries the "notifiers" edge of the Group entity.
|
||||
func (gr *Group) QueryNotifiers() *NotifierQuery {
|
||||
return NewGroupClient(gr.config).QueryNotifiers(gr)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this Group.
|
||||
// Note that you need to call Group.Unwrap() before calling this method if this Group
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
|
||||
@@ -34,6 +34,8 @@ const (
|
||||
EdgeDocuments = "documents"
|
||||
// EdgeInvitationTokens holds the string denoting the invitation_tokens edge name in mutations.
|
||||
EdgeInvitationTokens = "invitation_tokens"
|
||||
// EdgeNotifiers holds the string denoting the notifiers edge name in mutations.
|
||||
EdgeNotifiers = "notifiers"
|
||||
// Table holds the table name of the group in the database.
|
||||
Table = "groups"
|
||||
// UsersTable is the table that holds the users relation/edge.
|
||||
@@ -78,6 +80,13 @@ const (
|
||||
InvitationTokensInverseTable = "group_invitation_tokens"
|
||||
// InvitationTokensColumn is the table column denoting the invitation_tokens relation/edge.
|
||||
InvitationTokensColumn = "group_invitation_tokens"
|
||||
// NotifiersTable is the table that holds the notifiers relation/edge.
|
||||
NotifiersTable = "notifiers"
|
||||
// NotifiersInverseTable is the table name for the Notifier entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "notifier" package.
|
||||
NotifiersInverseTable = "notifiers"
|
||||
// NotifiersColumn is the table column denoting the notifiers relation/edge.
|
||||
NotifiersColumn = "group_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for group fields.
|
||||
@@ -127,6 +136,7 @@ const (
|
||||
CurrencyZar Currency = "zar"
|
||||
CurrencyAud Currency = "aud"
|
||||
CurrencyNok Currency = "nok"
|
||||
CurrencyNzd Currency = "nzd"
|
||||
CurrencySek Currency = "sek"
|
||||
CurrencyDkk Currency = "dkk"
|
||||
CurrencyInr Currency = "inr"
|
||||
@@ -136,6 +146,7 @@ const (
|
||||
CurrencyPln Currency = "pln"
|
||||
CurrencyTry Currency = "try"
|
||||
CurrencyRon Currency = "ron"
|
||||
CurrencyCzk Currency = "czk"
|
||||
)
|
||||
|
||||
func (c Currency) String() string {
|
||||
@@ -145,7 +156,7 @@ func (c Currency) String() string {
|
||||
// CurrencyValidator is a validator for the "currency" field enum values. It is called by the builders before save.
|
||||
func CurrencyValidator(c Currency) error {
|
||||
switch c {
|
||||
case CurrencyUsd, CurrencyEur, CurrencyGbp, CurrencyJpy, CurrencyZar, CurrencyAud, CurrencyNok, CurrencySek, CurrencyDkk, CurrencyInr, CurrencyRmb, CurrencyBgn, CurrencyChf, CurrencyPln, CurrencyTry, CurrencyRon:
|
||||
case CurrencyUsd, CurrencyEur, CurrencyGbp, CurrencyJpy, CurrencyZar, CurrencyAud, CurrencyNok, CurrencyNzd, CurrencySek, CurrencyDkk, CurrencyInr, CurrencyRmb, CurrencyBgn, CurrencyChf, CurrencyPln, CurrencyTry, CurrencyRon, CurrencyCzk:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("group: invalid enum value for currency field: %q", c)
|
||||
|
||||
@@ -398,6 +398,33 @@ func HasInvitationTokensWith(preds ...predicate.GroupInvitationToken) predicate.
|
||||
})
|
||||
}
|
||||
|
||||
// HasNotifiers applies the HasEdge predicate on the "notifiers" edge.
|
||||
func HasNotifiers() predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasNotifiersWith applies the HasEdge predicate on the "notifiers" edge with a given conditions (other predicates).
|
||||
func HasNotifiersWith(preds ...predicate.Notifier) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(NotifiersInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
|
||||
)
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.Group) predicate.Group {
|
||||
return predicate.Group(func(s *sql.Selector) {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
)
|
||||
|
||||
@@ -179,6 +180,21 @@ func (gc *GroupCreate) AddInvitationTokens(g ...*GroupInvitationToken) *GroupCre
|
||||
return gc.AddInvitationTokenIDs(ids...)
|
||||
}
|
||||
|
||||
// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs.
|
||||
func (gc *GroupCreate) AddNotifierIDs(ids ...uuid.UUID) *GroupCreate {
|
||||
gc.mutation.AddNotifierIDs(ids...)
|
||||
return gc
|
||||
}
|
||||
|
||||
// AddNotifiers adds the "notifiers" edges to the Notifier entity.
|
||||
func (gc *GroupCreate) AddNotifiers(n ...*Notifier) *GroupCreate {
|
||||
ids := make([]uuid.UUID, len(n))
|
||||
for i := range n {
|
||||
ids[i] = n[i].ID
|
||||
}
|
||||
return gc.AddNotifierIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the GroupMutation object of the builder.
|
||||
func (gc *GroupCreate) Mutation() *GroupMutation {
|
||||
return gc.mutation
|
||||
@@ -315,10 +331,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{group.UsersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -334,10 +347,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{group.LocationsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -353,10 +363,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{group.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -372,10 +379,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{group.LabelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -391,10 +395,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{group.DocumentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -410,10 +411,23 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{group.InvitationTokensColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: groupinvitationtoken.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := gc.mutation.NotifiersIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: group.NotifiersTable,
|
||||
Columns: []string{group.NotifiersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
)
|
||||
@@ -35,6 +36,7 @@ type GroupQuery struct {
|
||||
withLabels *LabelQuery
|
||||
withDocuments *DocumentQuery
|
||||
withInvitationTokens *GroupInvitationTokenQuery
|
||||
withNotifiers *NotifierQuery
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
@@ -203,6 +205,28 @@ func (gq *GroupQuery) QueryInvitationTokens() *GroupInvitationTokenQuery {
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryNotifiers chains the current query on the "notifiers" edge.
|
||||
func (gq *GroupQuery) QueryNotifiers() *NotifierQuery {
|
||||
query := (&NotifierClient{config: gq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := gq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := gq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(group.Table, group.FieldID, selector),
|
||||
sqlgraph.To(notifier.Table, notifier.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.O2M, false, group.NotifiersTable, group.NotifiersColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(gq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first Group entity from the query.
|
||||
// Returns a *NotFoundError when no Group was found.
|
||||
func (gq *GroupQuery) First(ctx context.Context) (*Group, error) {
|
||||
@@ -401,6 +425,7 @@ func (gq *GroupQuery) Clone() *GroupQuery {
|
||||
withLabels: gq.withLabels.Clone(),
|
||||
withDocuments: gq.withDocuments.Clone(),
|
||||
withInvitationTokens: gq.withInvitationTokens.Clone(),
|
||||
withNotifiers: gq.withNotifiers.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: gq.sql.Clone(),
|
||||
path: gq.path,
|
||||
@@ -473,6 +498,17 @@ func (gq *GroupQuery) WithInvitationTokens(opts ...func(*GroupInvitationTokenQue
|
||||
return gq
|
||||
}
|
||||
|
||||
// WithNotifiers tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "notifiers" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (gq *GroupQuery) WithNotifiers(opts ...func(*NotifierQuery)) *GroupQuery {
|
||||
query := (&NotifierClient{config: gq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
gq.withNotifiers = query
|
||||
return gq
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
@@ -551,13 +587,14 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
|
||||
var (
|
||||
nodes = []*Group{}
|
||||
_spec = gq.querySpec()
|
||||
loadedTypes = [6]bool{
|
||||
loadedTypes = [7]bool{
|
||||
gq.withUsers != nil,
|
||||
gq.withLocations != nil,
|
||||
gq.withItems != nil,
|
||||
gq.withLabels != nil,
|
||||
gq.withDocuments != nil,
|
||||
gq.withInvitationTokens != nil,
|
||||
gq.withNotifiers != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
@@ -622,6 +659,13 @@ func (gq *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group,
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := gq.withNotifiers; query != nil {
|
||||
if err := gq.loadNotifiers(ctx, query, nodes,
|
||||
func(n *Group) { n.Edges.Notifiers = []*Notifier{} },
|
||||
func(n *Group, e *Notifier) { n.Edges.Notifiers = append(n.Edges.Notifiers, e) }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
@@ -811,6 +855,33 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (gq *GroupQuery) loadNotifiers(ctx context.Context, query *NotifierQuery, nodes []*Group, init func(*Group), assign func(*Group, *Notifier)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID]*Group)
|
||||
for i := range nodes {
|
||||
fks = append(fks, nodes[i].ID)
|
||||
nodeids[nodes[i].ID] = nodes[i]
|
||||
if init != nil {
|
||||
init(nodes[i])
|
||||
}
|
||||
}
|
||||
query.Where(predicate.Notifier(func(s *sql.Selector) {
|
||||
s.Where(sql.InValues(group.NotifiersColumn, fks...))
|
||||
}))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
fk := n.GroupID
|
||||
node, ok := nodeids[fk]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_id" returned %v for node %v`, fk, n.ID)
|
||||
}
|
||||
assign(node, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gq *GroupQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := gq.querySpec()
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/item"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
)
|
||||
@@ -151,6 +152,21 @@ func (gu *GroupUpdate) AddInvitationTokens(g ...*GroupInvitationToken) *GroupUpd
|
||||
return gu.AddInvitationTokenIDs(ids...)
|
||||
}
|
||||
|
||||
// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs.
|
||||
func (gu *GroupUpdate) AddNotifierIDs(ids ...uuid.UUID) *GroupUpdate {
|
||||
gu.mutation.AddNotifierIDs(ids...)
|
||||
return gu
|
||||
}
|
||||
|
||||
// AddNotifiers adds the "notifiers" edges to the Notifier entity.
|
||||
func (gu *GroupUpdate) AddNotifiers(n ...*Notifier) *GroupUpdate {
|
||||
ids := make([]uuid.UUID, len(n))
|
||||
for i := range n {
|
||||
ids[i] = n[i].ID
|
||||
}
|
||||
return gu.AddNotifierIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the GroupMutation object of the builder.
|
||||
func (gu *GroupUpdate) Mutation() *GroupMutation {
|
||||
return gu.mutation
|
||||
@@ -282,6 +298,27 @@ func (gu *GroupUpdate) RemoveInvitationTokens(g ...*GroupInvitationToken) *Group
|
||||
return gu.RemoveInvitationTokenIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearNotifiers clears all "notifiers" edges to the Notifier entity.
|
||||
func (gu *GroupUpdate) ClearNotifiers() *GroupUpdate {
|
||||
gu.mutation.ClearNotifiers()
|
||||
return gu
|
||||
}
|
||||
|
||||
// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs.
|
||||
func (gu *GroupUpdate) RemoveNotifierIDs(ids ...uuid.UUID) *GroupUpdate {
|
||||
gu.mutation.RemoveNotifierIDs(ids...)
|
||||
return gu
|
||||
}
|
||||
|
||||
// RemoveNotifiers removes "notifiers" edges to Notifier entities.
|
||||
func (gu *GroupUpdate) RemoveNotifiers(n ...*Notifier) *GroupUpdate {
|
||||
ids := make([]uuid.UUID, len(n))
|
||||
for i := range n {
|
||||
ids[i] = n[i].ID
|
||||
}
|
||||
return gu.RemoveNotifierIDs(ids...)
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (gu *GroupUpdate) Save(ctx context.Context) (int, error) {
|
||||
gu.defaults()
|
||||
@@ -362,10 +399,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.UsersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -378,10 +412,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.UsersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -397,10 +428,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.UsersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -416,10 +444,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.LocationsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -432,10 +457,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.LocationsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -451,10 +473,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.LocationsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -470,10 +489,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -486,10 +502,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -505,10 +518,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -524,10 +534,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.LabelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -540,10 +547,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.LabelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -559,10 +563,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.LabelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -578,10 +579,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.DocumentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -594,10 +592,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.DocumentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -613,10 +608,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.DocumentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -632,10 +624,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.InvitationTokensColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: groupinvitationtoken.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -648,10 +637,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.InvitationTokensColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: groupinvitationtoken.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -667,10 +653,52 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{group.InvitationTokensColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: groupinvitationtoken.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if gu.mutation.NotifiersCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: group.NotifiersTable,
|
||||
Columns: []string{group.NotifiersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := gu.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !gu.mutation.NotifiersCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: group.NotifiersTable,
|
||||
Columns: []string{group.NotifiersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := gu.mutation.NotifiersIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: group.NotifiersTable,
|
||||
Columns: []string{group.NotifiersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -814,6 +842,21 @@ func (guo *GroupUpdateOne) AddInvitationTokens(g ...*GroupInvitationToken) *Grou
|
||||
return guo.AddInvitationTokenIDs(ids...)
|
||||
}
|
||||
|
||||
// AddNotifierIDs adds the "notifiers" edge to the Notifier entity by IDs.
|
||||
func (guo *GroupUpdateOne) AddNotifierIDs(ids ...uuid.UUID) *GroupUpdateOne {
|
||||
guo.mutation.AddNotifierIDs(ids...)
|
||||
return guo
|
||||
}
|
||||
|
||||
// AddNotifiers adds the "notifiers" edges to the Notifier entity.
|
||||
func (guo *GroupUpdateOne) AddNotifiers(n ...*Notifier) *GroupUpdateOne {
|
||||
ids := make([]uuid.UUID, len(n))
|
||||
for i := range n {
|
||||
ids[i] = n[i].ID
|
||||
}
|
||||
return guo.AddNotifierIDs(ids...)
|
||||
}
|
||||
|
||||
// Mutation returns the GroupMutation object of the builder.
|
||||
func (guo *GroupUpdateOne) Mutation() *GroupMutation {
|
||||
return guo.mutation
|
||||
@@ -945,6 +988,27 @@ func (guo *GroupUpdateOne) RemoveInvitationTokens(g ...*GroupInvitationToken) *G
|
||||
return guo.RemoveInvitationTokenIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearNotifiers clears all "notifiers" edges to the Notifier entity.
|
||||
func (guo *GroupUpdateOne) ClearNotifiers() *GroupUpdateOne {
|
||||
guo.mutation.ClearNotifiers()
|
||||
return guo
|
||||
}
|
||||
|
||||
// RemoveNotifierIDs removes the "notifiers" edge to Notifier entities by IDs.
|
||||
func (guo *GroupUpdateOne) RemoveNotifierIDs(ids ...uuid.UUID) *GroupUpdateOne {
|
||||
guo.mutation.RemoveNotifierIDs(ids...)
|
||||
return guo
|
||||
}
|
||||
|
||||
// RemoveNotifiers removes "notifiers" edges to Notifier entities.
|
||||
func (guo *GroupUpdateOne) RemoveNotifiers(n ...*Notifier) *GroupUpdateOne {
|
||||
ids := make([]uuid.UUID, len(n))
|
||||
for i := range n {
|
||||
ids[i] = n[i].ID
|
||||
}
|
||||
return guo.RemoveNotifierIDs(ids...)
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the GroupUpdate builder.
|
||||
func (guo *GroupUpdateOne) Where(ps ...predicate.Group) *GroupUpdateOne {
|
||||
guo.mutation.Where(ps...)
|
||||
@@ -1055,10 +1119,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.UsersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1071,10 +1132,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.UsersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1090,10 +1148,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.UsersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: user.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1109,10 +1164,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.LocationsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1125,10 +1177,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.LocationsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1144,10 +1193,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.LocationsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1163,10 +1209,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1179,10 +1222,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1198,10 +1238,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1217,10 +1254,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.LabelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1233,10 +1267,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.LabelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1252,10 +1283,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.LabelsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1271,10 +1299,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.DocumentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1287,10 +1312,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.DocumentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1306,10 +1328,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.DocumentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: document.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1325,10 +1344,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.InvitationTokensColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: groupinvitationtoken.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1341,10 +1357,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.InvitationTokensColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: groupinvitationtoken.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1360,10 +1373,52 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
|
||||
Columns: []string{group.InvitationTokensColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: groupinvitationtoken.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if guo.mutation.NotifiersCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: group.NotifiersTable,
|
||||
Columns: []string{group.NotifiersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := guo.mutation.RemovedNotifiersIDs(); len(nodes) > 0 && !guo.mutation.NotifiersCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: group.NotifiersTable,
|
||||
Columns: []string{group.NotifiersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := guo.mutation.NotifiersIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
Inverse: false,
|
||||
Table: group.NotifiersTable,
|
||||
Columns: []string{group.NotifiersColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -254,10 +254,7 @@ func (gitc *GroupInvitationTokenCreate) createSpec() (*GroupInvitationToken, *sq
|
||||
Columns: []string{groupinvitationtoken.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -175,10 +175,7 @@ func (gitu *GroupInvitationTokenUpdate) sqlSave(ctx context.Context) (n int, err
|
||||
Columns: []string{groupinvitationtoken.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -191,10 +188,7 @@ func (gitu *GroupInvitationTokenUpdate) sqlSave(ctx context.Context) (n int, err
|
||||
Columns: []string{groupinvitationtoken.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -397,10 +391,7 @@ func (gituo *GroupInvitationTokenUpdateOne) sqlSave(ctx context.Context) (_node
|
||||
Columns: []string{groupinvitationtoken.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -413,10 +404,7 @@ func (gituo *GroupInvitationTokenUpdateOne) sqlSave(ctx context.Context) (_node
|
||||
Columns: []string{groupinvitationtoken.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -48,6 +48,10 @@ func (me *MaintenanceEntry) GetID() uuid.UUID {
|
||||
return me.ID
|
||||
}
|
||||
|
||||
func (n *Notifier) GetID() uuid.UUID {
|
||||
return n.ID
|
||||
}
|
||||
|
||||
func (u *User) GetID() uuid.UUID {
|
||||
return u.ID
|
||||
}
|
||||
|
||||
@@ -141,6 +141,18 @@ func (f MaintenanceEntryFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.V
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.MaintenanceEntryMutation", m)
|
||||
}
|
||||
|
||||
// The NotifierFunc type is an adapter to allow the use of ordinary
|
||||
// function as Notifier mutator.
|
||||
type NotifierFunc func(context.Context, *ent.NotifierMutation) (ent.Value, error)
|
||||
|
||||
// Mutate calls f(ctx, m).
|
||||
func (f NotifierFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) {
|
||||
if mv, ok := m.(*ent.NotifierMutation); ok {
|
||||
return f(ctx, mv)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.NotifierMutation", m)
|
||||
}
|
||||
|
||||
// The UserFunc type is an adapter to allow the use of ordinary
|
||||
// function as User mutator.
|
||||
type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error)
|
||||
|
||||
@@ -75,12 +75,12 @@ type Item struct {
|
||||
|
||||
// ItemEdges holds the relations/edges for other nodes in the graph.
|
||||
type ItemEdges struct {
|
||||
// Group holds the value of the group edge.
|
||||
Group *Group `json:"group,omitempty"`
|
||||
// Parent holds the value of the parent edge.
|
||||
Parent *Item `json:"parent,omitempty"`
|
||||
// Children holds the value of the children edge.
|
||||
Children []*Item `json:"children,omitempty"`
|
||||
// Group holds the value of the group edge.
|
||||
Group *Group `json:"group,omitempty"`
|
||||
// Label holds the value of the label edge.
|
||||
Label []*Label `json:"label,omitempty"`
|
||||
// Location holds the value of the location edge.
|
||||
@@ -96,10 +96,23 @@ type ItemEdges struct {
|
||||
loadedTypes [8]bool
|
||||
}
|
||||
|
||||
// GroupOrErr returns the Group value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e ItemEdges) GroupOrErr() (*Group, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.Group == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: group.Label}
|
||||
}
|
||||
return e.Group, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "group"}
|
||||
}
|
||||
|
||||
// ParentOrErr returns the Parent value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e ItemEdges) ParentOrErr() (*Item, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.loadedTypes[1] {
|
||||
if e.Parent == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: item.Label}
|
||||
@@ -112,25 +125,12 @@ func (e ItemEdges) ParentOrErr() (*Item, error) {
|
||||
// ChildrenOrErr returns the Children value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e ItemEdges) ChildrenOrErr() ([]*Item, error) {
|
||||
if e.loadedTypes[1] {
|
||||
if e.loadedTypes[2] {
|
||||
return e.Children, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "children"}
|
||||
}
|
||||
|
||||
// GroupOrErr returns the Group value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e ItemEdges) GroupOrErr() (*Group, error) {
|
||||
if e.loadedTypes[2] {
|
||||
if e.Group == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: group.Label}
|
||||
}
|
||||
return e.Group, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "group"}
|
||||
}
|
||||
|
||||
// LabelOrErr returns the Label value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e ItemEdges) LabelOrErr() ([]*Label, error) {
|
||||
@@ -388,6 +388,11 @@ func (i *Item) assignValues(columns []string, values []any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueryGroup queries the "group" edge of the Item entity.
|
||||
func (i *Item) QueryGroup() *GroupQuery {
|
||||
return NewItemClient(i.config).QueryGroup(i)
|
||||
}
|
||||
|
||||
// QueryParent queries the "parent" edge of the Item entity.
|
||||
func (i *Item) QueryParent() *ItemQuery {
|
||||
return NewItemClient(i.config).QueryParent(i)
|
||||
@@ -398,11 +403,6 @@ func (i *Item) QueryChildren() *ItemQuery {
|
||||
return NewItemClient(i.config).QueryChildren(i)
|
||||
}
|
||||
|
||||
// QueryGroup queries the "group" edge of the Item entity.
|
||||
func (i *Item) QueryGroup() *GroupQuery {
|
||||
return NewItemClient(i.config).QueryGroup(i)
|
||||
}
|
||||
|
||||
// QueryLabel queries the "label" edge of the Item entity.
|
||||
func (i *Item) QueryLabel() *LabelQuery {
|
||||
return NewItemClient(i.config).QueryLabel(i)
|
||||
|
||||
@@ -59,12 +59,12 @@ const (
|
||||
FieldSoldPrice = "sold_price"
|
||||
// FieldSoldNotes holds the string denoting the sold_notes field in the database.
|
||||
FieldSoldNotes = "sold_notes"
|
||||
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||
EdgeGroup = "group"
|
||||
// EdgeParent holds the string denoting the parent edge name in mutations.
|
||||
EdgeParent = "parent"
|
||||
// EdgeChildren holds the string denoting the children edge name in mutations.
|
||||
EdgeChildren = "children"
|
||||
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||
EdgeGroup = "group"
|
||||
// EdgeLabel holds the string denoting the label edge name in mutations.
|
||||
EdgeLabel = "label"
|
||||
// EdgeLocation holds the string denoting the location edge name in mutations.
|
||||
@@ -77,6 +77,13 @@ const (
|
||||
EdgeAttachments = "attachments"
|
||||
// Table holds the table name of the item in the database.
|
||||
Table = "items"
|
||||
// GroupTable is the table that holds the group relation/edge.
|
||||
GroupTable = "items"
|
||||
// GroupInverseTable is the table name for the Group entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||
GroupInverseTable = "groups"
|
||||
// GroupColumn is the table column denoting the group relation/edge.
|
||||
GroupColumn = "group_items"
|
||||
// ParentTable is the table that holds the parent relation/edge.
|
||||
ParentTable = "items"
|
||||
// ParentColumn is the table column denoting the parent relation/edge.
|
||||
@@ -85,13 +92,6 @@ const (
|
||||
ChildrenTable = "items"
|
||||
// ChildrenColumn is the table column denoting the children relation/edge.
|
||||
ChildrenColumn = "item_children"
|
||||
// GroupTable is the table that holds the group relation/edge.
|
||||
GroupTable = "items"
|
||||
// GroupInverseTable is the table name for the Group entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||
GroupInverseTable = "groups"
|
||||
// GroupColumn is the table column denoting the group relation/edge.
|
||||
GroupColumn = "group_items"
|
||||
// LabelTable is the table that holds the label relation/edge. The primary key declared below.
|
||||
LabelTable = "label_items"
|
||||
// LabelInverseTable is the table name for the Label entity.
|
||||
|
||||
@@ -1406,6 +1406,33 @@ func SoldNotesContainsFold(v string) predicate.Item {
|
||||
return predicate.Item(sql.FieldContainsFold(FieldSoldNotes, v))
|
||||
}
|
||||
|
||||
// HasGroup applies the HasEdge predicate on the "group" edge.
|
||||
func HasGroup() predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||
func HasGroupWith(preds ...predicate.Group) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasParent applies the HasEdge predicate on the "parent" edge.
|
||||
func HasParent() predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
@@ -1460,33 +1487,6 @@ func HasChildrenWith(preds ...predicate.Item) predicate.Item {
|
||||
})
|
||||
}
|
||||
|
||||
// HasGroup applies the HasEdge predicate on the "group" edge.
|
||||
func HasGroup() predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||
func HasGroupWith(preds ...predicate.Group) predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasLabel applies the HasEdge predicate on the "label" edge.
|
||||
func HasLabel() predicate.Item {
|
||||
return predicate.Item(func(s *sql.Selector) {
|
||||
|
||||
@@ -355,6 +355,17 @@ func (ic *ItemCreate) SetNillableID(u *uuid.UUID) *ItemCreate {
|
||||
return ic
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (ic *ItemCreate) SetGroupID(id uuid.UUID) *ItemCreate {
|
||||
ic.mutation.SetGroupID(id)
|
||||
return ic
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (ic *ItemCreate) SetGroup(g *Group) *ItemCreate {
|
||||
return ic.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// SetParentID sets the "parent" edge to the Item entity by ID.
|
||||
func (ic *ItemCreate) SetParentID(id uuid.UUID) *ItemCreate {
|
||||
ic.mutation.SetParentID(id)
|
||||
@@ -389,17 +400,6 @@ func (ic *ItemCreate) AddChildren(i ...*Item) *ItemCreate {
|
||||
return ic.AddChildIDs(ids...)
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (ic *ItemCreate) SetGroupID(id uuid.UUID) *ItemCreate {
|
||||
ic.mutation.SetGroupID(id)
|
||||
return ic
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (ic *ItemCreate) SetGroup(g *Group) *ItemCreate {
|
||||
return ic.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// AddLabelIDs adds the "label" edge to the Label entity by IDs.
|
||||
func (ic *ItemCreate) AddLabelIDs(ids ...uuid.UUID) *ItemCreate {
|
||||
ic.mutation.AddLabelIDs(ids...)
|
||||
@@ -763,6 +763,23 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(item.FieldSoldNotes, field.TypeString, value)
|
||||
_node.SoldNotes = value
|
||||
}
|
||||
if nodes := ic.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.group_items = &nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := ic.mutation.ParentIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -771,10 +788,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{item.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -791,10 +805,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{item.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -802,26 +813,6 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
}
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := ic.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.group_items = &nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := ic.mutation.LabelIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2M,
|
||||
@@ -830,10 +821,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
Columns: item.LabelPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -849,10 +837,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{item.LocationColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -869,10 +854,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{item.FieldsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: itemfield.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -888,10 +870,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{item.MaintenanceEntriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: maintenanceentry.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -907,10 +886,7 @@ func (ic *ItemCreate) createSpec() (*Item, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{item.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -29,9 +29,9 @@ type ItemQuery struct {
|
||||
order []OrderFunc
|
||||
inters []Interceptor
|
||||
predicates []predicate.Item
|
||||
withGroup *GroupQuery
|
||||
withParent *ItemQuery
|
||||
withChildren *ItemQuery
|
||||
withGroup *GroupQuery
|
||||
withLabel *LabelQuery
|
||||
withLocation *LocationQuery
|
||||
withFields *ItemFieldQuery
|
||||
@@ -74,6 +74,28 @@ func (iq *ItemQuery) Order(o ...OrderFunc) *ItemQuery {
|
||||
return iq
|
||||
}
|
||||
|
||||
// QueryGroup chains the current query on the "group" edge.
|
||||
func (iq *ItemQuery) QueryGroup() *GroupQuery {
|
||||
query := (&GroupClient{config: iq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := iq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := iq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(item.Table, item.FieldID, selector),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryParent chains the current query on the "parent" edge.
|
||||
func (iq *ItemQuery) QueryParent() *ItemQuery {
|
||||
query := (&ItemClient{config: iq.config}).Query()
|
||||
@@ -118,28 +140,6 @@ func (iq *ItemQuery) QueryChildren() *ItemQuery {
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryGroup chains the current query on the "group" edge.
|
||||
func (iq *ItemQuery) QueryGroup() *GroupQuery {
|
||||
query := (&GroupClient{config: iq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := iq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := iq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(item.Table, item.FieldID, selector),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, item.GroupTable, item.GroupColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(iq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryLabel chains the current query on the "label" edge.
|
||||
func (iq *ItemQuery) QueryLabel() *LabelQuery {
|
||||
query := (&LabelClient{config: iq.config}).Query()
|
||||
@@ -442,9 +442,9 @@ func (iq *ItemQuery) Clone() *ItemQuery {
|
||||
order: append([]OrderFunc{}, iq.order...),
|
||||
inters: append([]Interceptor{}, iq.inters...),
|
||||
predicates: append([]predicate.Item{}, iq.predicates...),
|
||||
withGroup: iq.withGroup.Clone(),
|
||||
withParent: iq.withParent.Clone(),
|
||||
withChildren: iq.withChildren.Clone(),
|
||||
withGroup: iq.withGroup.Clone(),
|
||||
withLabel: iq.withLabel.Clone(),
|
||||
withLocation: iq.withLocation.Clone(),
|
||||
withFields: iq.withFields.Clone(),
|
||||
@@ -456,6 +456,17 @@ func (iq *ItemQuery) Clone() *ItemQuery {
|
||||
}
|
||||
}
|
||||
|
||||
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (iq *ItemQuery) WithGroup(opts ...func(*GroupQuery)) *ItemQuery {
|
||||
query := (&GroupClient{config: iq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
iq.withGroup = query
|
||||
return iq
|
||||
}
|
||||
|
||||
// WithParent tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "parent" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (iq *ItemQuery) WithParent(opts ...func(*ItemQuery)) *ItemQuery {
|
||||
@@ -478,17 +489,6 @@ func (iq *ItemQuery) WithChildren(opts ...func(*ItemQuery)) *ItemQuery {
|
||||
return iq
|
||||
}
|
||||
|
||||
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (iq *ItemQuery) WithGroup(opts ...func(*GroupQuery)) *ItemQuery {
|
||||
query := (&GroupClient{config: iq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
iq.withGroup = query
|
||||
return iq
|
||||
}
|
||||
|
||||
// WithLabel tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "label" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (iq *ItemQuery) WithLabel(opts ...func(*LabelQuery)) *ItemQuery {
|
||||
@@ -624,9 +624,9 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
|
||||
withFKs = iq.withFKs
|
||||
_spec = iq.querySpec()
|
||||
loadedTypes = [8]bool{
|
||||
iq.withGroup != nil,
|
||||
iq.withParent != nil,
|
||||
iq.withChildren != nil,
|
||||
iq.withGroup != nil,
|
||||
iq.withLabel != nil,
|
||||
iq.withLocation != nil,
|
||||
iq.withFields != nil,
|
||||
@@ -634,7 +634,7 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
|
||||
iq.withAttachments != nil,
|
||||
}
|
||||
)
|
||||
if iq.withParent != nil || iq.withGroup != nil || iq.withLocation != nil {
|
||||
if iq.withGroup != nil || iq.withParent != nil || iq.withLocation != nil {
|
||||
withFKs = true
|
||||
}
|
||||
if withFKs {
|
||||
@@ -658,6 +658,12 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := iq.withGroup; query != nil {
|
||||
if err := iq.loadGroup(ctx, query, nodes, nil,
|
||||
func(n *Item, e *Group) { n.Edges.Group = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := iq.withParent; query != nil {
|
||||
if err := iq.loadParent(ctx, query, nodes, nil,
|
||||
func(n *Item, e *Item) { n.Edges.Parent = e }); err != nil {
|
||||
@@ -671,12 +677,6 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := iq.withGroup; query != nil {
|
||||
if err := iq.loadGroup(ctx, query, nodes, nil,
|
||||
func(n *Item, e *Group) { n.Edges.Group = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := iq.withLabel; query != nil {
|
||||
if err := iq.loadLabel(ctx, query, nodes,
|
||||
func(n *Item) { n.Edges.Label = []*Label{} },
|
||||
@@ -714,6 +714,38 @@ func (iq *ItemQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Item, e
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (iq *ItemQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Item, init func(*Item), assign func(*Item, *Group)) error {
|
||||
ids := make([]uuid.UUID, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID][]*Item)
|
||||
for i := range nodes {
|
||||
if nodes[i].group_items == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].group_items
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(group.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_items" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (iq *ItemQuery) loadParent(ctx context.Context, query *ItemQuery, nodes []*Item, init func(*Item), assign func(*Item, *Item)) error {
|
||||
ids := make([]uuid.UUID, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID][]*Item)
|
||||
@@ -777,38 +809,6 @@ func (iq *ItemQuery) loadChildren(ctx context.Context, query *ItemQuery, nodes [
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (iq *ItemQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Item, init func(*Item), assign func(*Item, *Group)) error {
|
||||
ids := make([]uuid.UUID, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID][]*Item)
|
||||
for i := range nodes {
|
||||
if nodes[i].group_items == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].group_items
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(group.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_items" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (iq *ItemQuery) loadLabel(ctx context.Context, query *LabelQuery, nodes []*Item, init func(*Item), assign func(*Item, *Label)) error {
|
||||
edgeIDs := make([]driver.Value, len(nodes))
|
||||
byID := make(map[uuid.UUID]*Item)
|
||||
|
||||
@@ -433,6 +433,17 @@ func (iu *ItemUpdate) ClearSoldNotes() *ItemUpdate {
|
||||
return iu
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (iu *ItemUpdate) SetGroupID(id uuid.UUID) *ItemUpdate {
|
||||
iu.mutation.SetGroupID(id)
|
||||
return iu
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (iu *ItemUpdate) SetGroup(g *Group) *ItemUpdate {
|
||||
return iu.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// SetParentID sets the "parent" edge to the Item entity by ID.
|
||||
func (iu *ItemUpdate) SetParentID(id uuid.UUID) *ItemUpdate {
|
||||
iu.mutation.SetParentID(id)
|
||||
@@ -467,17 +478,6 @@ func (iu *ItemUpdate) AddChildren(i ...*Item) *ItemUpdate {
|
||||
return iu.AddChildIDs(ids...)
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (iu *ItemUpdate) SetGroupID(id uuid.UUID) *ItemUpdate {
|
||||
iu.mutation.SetGroupID(id)
|
||||
return iu
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (iu *ItemUpdate) SetGroup(g *Group) *ItemUpdate {
|
||||
return iu.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// AddLabelIDs adds the "label" edge to the Label entity by IDs.
|
||||
func (iu *ItemUpdate) AddLabelIDs(ids ...uuid.UUID) *ItemUpdate {
|
||||
iu.mutation.AddLabelIDs(ids...)
|
||||
@@ -562,6 +562,12 @@ func (iu *ItemUpdate) Mutation() *ItemMutation {
|
||||
return iu.mutation
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (iu *ItemUpdate) ClearGroup() *ItemUpdate {
|
||||
iu.mutation.ClearGroup()
|
||||
return iu
|
||||
}
|
||||
|
||||
// ClearParent clears the "parent" edge to the Item entity.
|
||||
func (iu *ItemUpdate) ClearParent() *ItemUpdate {
|
||||
iu.mutation.ClearParent()
|
||||
@@ -589,12 +595,6 @@ func (iu *ItemUpdate) RemoveChildren(i ...*Item) *ItemUpdate {
|
||||
return iu.RemoveChildIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (iu *ItemUpdate) ClearGroup() *ItemUpdate {
|
||||
iu.mutation.ClearGroup()
|
||||
return iu
|
||||
}
|
||||
|
||||
// ClearLabel clears all "label" edges to the Label entity.
|
||||
func (iu *ItemUpdate) ClearLabel() *ItemUpdate {
|
||||
iu.mutation.ClearLabel()
|
||||
@@ -903,6 +903,35 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
if iu.mutation.SoldNotesCleared() {
|
||||
_spec.ClearField(item.FieldSoldNotes, field.TypeString)
|
||||
}
|
||||
if iu.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := iu.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if iu.mutation.ParentCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -911,10 +940,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -927,10 +953,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -946,10 +969,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -962,10 +982,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -981,45 +998,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if iu.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := iu.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1035,10 +1014,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: item.LabelPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1051,10 +1027,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: item.LabelPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1070,10 +1043,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: item.LabelPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1089,10 +1059,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.LocationColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1105,10 +1072,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.LocationColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1124,10 +1088,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.FieldsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: itemfield.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1140,10 +1101,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.FieldsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: itemfield.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1159,10 +1117,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.FieldsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: itemfield.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1178,10 +1133,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.MaintenanceEntriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: maintenanceentry.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1194,10 +1146,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.MaintenanceEntriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: maintenanceentry.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1213,10 +1162,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.MaintenanceEntriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: maintenanceentry.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1232,10 +1178,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -1248,10 +1191,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1267,10 +1207,7 @@ func (iu *ItemUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{item.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -1696,6 +1633,17 @@ func (iuo *ItemUpdateOne) ClearSoldNotes() *ItemUpdateOne {
|
||||
return iuo
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (iuo *ItemUpdateOne) SetGroupID(id uuid.UUID) *ItemUpdateOne {
|
||||
iuo.mutation.SetGroupID(id)
|
||||
return iuo
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (iuo *ItemUpdateOne) SetGroup(g *Group) *ItemUpdateOne {
|
||||
return iuo.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// SetParentID sets the "parent" edge to the Item entity by ID.
|
||||
func (iuo *ItemUpdateOne) SetParentID(id uuid.UUID) *ItemUpdateOne {
|
||||
iuo.mutation.SetParentID(id)
|
||||
@@ -1730,17 +1678,6 @@ func (iuo *ItemUpdateOne) AddChildren(i ...*Item) *ItemUpdateOne {
|
||||
return iuo.AddChildIDs(ids...)
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (iuo *ItemUpdateOne) SetGroupID(id uuid.UUID) *ItemUpdateOne {
|
||||
iuo.mutation.SetGroupID(id)
|
||||
return iuo
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (iuo *ItemUpdateOne) SetGroup(g *Group) *ItemUpdateOne {
|
||||
return iuo.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// AddLabelIDs adds the "label" edge to the Label entity by IDs.
|
||||
func (iuo *ItemUpdateOne) AddLabelIDs(ids ...uuid.UUID) *ItemUpdateOne {
|
||||
iuo.mutation.AddLabelIDs(ids...)
|
||||
@@ -1825,6 +1762,12 @@ func (iuo *ItemUpdateOne) Mutation() *ItemMutation {
|
||||
return iuo.mutation
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (iuo *ItemUpdateOne) ClearGroup() *ItemUpdateOne {
|
||||
iuo.mutation.ClearGroup()
|
||||
return iuo
|
||||
}
|
||||
|
||||
// ClearParent clears the "parent" edge to the Item entity.
|
||||
func (iuo *ItemUpdateOne) ClearParent() *ItemUpdateOne {
|
||||
iuo.mutation.ClearParent()
|
||||
@@ -1852,12 +1795,6 @@ func (iuo *ItemUpdateOne) RemoveChildren(i ...*Item) *ItemUpdateOne {
|
||||
return iuo.RemoveChildIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (iuo *ItemUpdateOne) ClearGroup() *ItemUpdateOne {
|
||||
iuo.mutation.ClearGroup()
|
||||
return iuo
|
||||
}
|
||||
|
||||
// ClearLabel clears all "label" edges to the Label entity.
|
||||
func (iuo *ItemUpdateOne) ClearLabel() *ItemUpdateOne {
|
||||
iuo.mutation.ClearLabel()
|
||||
@@ -2196,6 +2133,35 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
if iuo.mutation.SoldNotesCleared() {
|
||||
_spec.ClearField(item.FieldSoldNotes, field.TypeString)
|
||||
}
|
||||
if iuo.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := iuo.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if iuo.mutation.ParentCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -2204,10 +2170,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -2220,10 +2183,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2239,10 +2199,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -2255,10 +2212,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2274,45 +2228,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if iuo.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := iuo.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: item.GroupTable,
|
||||
Columns: []string{item.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2328,10 +2244,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: item.LabelPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -2344,10 +2257,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: item.LabelPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2363,10 +2273,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: item.LabelPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: label.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2382,10 +2289,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.LocationColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -2398,10 +2302,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.LocationColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2417,10 +2318,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.FieldsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: itemfield.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -2433,10 +2331,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.FieldsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: itemfield.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2452,10 +2347,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.FieldsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: itemfield.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(itemfield.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2471,10 +2363,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.MaintenanceEntriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: maintenanceentry.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -2487,10 +2376,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.MaintenanceEntriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: maintenanceentry.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2506,10 +2392,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.MaintenanceEntriesColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: maintenanceentry.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(maintenanceentry.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2525,10 +2408,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -2541,10 +2421,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -2560,10 +2437,7 @@ func (iuo *ItemUpdateOne) sqlSave(ctx context.Context) (_node *Item, err error)
|
||||
Columns: []string{item.AttachmentsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: attachment.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -341,10 +341,7 @@ func (ifc *ItemFieldCreate) createSpec() (*ItemField, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{itemfield.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -290,10 +290,7 @@ func (ifu *ItemFieldUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{itemfield.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -306,10 +303,7 @@ func (ifu *ItemFieldUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{itemfield.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -627,10 +621,7 @@ func (ifuo *ItemFieldUpdateOne) sqlSave(ctx context.Context) (_node *ItemField,
|
||||
Columns: []string{itemfield.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -643,10 +634,7 @@ func (ifuo *ItemFieldUpdateOne) sqlSave(ctx context.Context) (_node *ItemField,
|
||||
Columns: []string{itemfield.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -266,10 +266,7 @@ func (lc *LabelCreate) createSpec() (*Label, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{label.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -286,10 +283,7 @@ func (lc *LabelCreate) createSpec() (*Label, *sqlgraph.CreateSpec) {
|
||||
Columns: label.ItemsPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -238,10 +238,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{label.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -254,10 +251,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{label.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -273,10 +267,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: label.ItemsPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -289,10 +280,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: label.ItemsPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -308,10 +296,7 @@ func (lu *LabelUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: label.ItemsPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -576,10 +561,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
|
||||
Columns: []string{label.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -592,10 +574,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
|
||||
Columns: []string{label.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -611,10 +590,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
|
||||
Columns: label.ItemsPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -627,10 +603,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
|
||||
Columns: label.ItemsPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -646,10 +619,7 @@ func (luo *LabelUpdateOne) sqlSave(ctx context.Context) (_node *Label, err error
|
||||
Columns: label.ItemsPrimaryKey,
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -35,12 +35,12 @@ type Location struct {
|
||||
|
||||
// LocationEdges holds the relations/edges for other nodes in the graph.
|
||||
type LocationEdges struct {
|
||||
// Group holds the value of the group edge.
|
||||
Group *Group `json:"group,omitempty"`
|
||||
// Parent holds the value of the parent edge.
|
||||
Parent *Location `json:"parent,omitempty"`
|
||||
// Children holds the value of the children edge.
|
||||
Children []*Location `json:"children,omitempty"`
|
||||
// Group holds the value of the group edge.
|
||||
Group *Group `json:"group,omitempty"`
|
||||
// Items holds the value of the items edge.
|
||||
Items []*Item `json:"items,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
@@ -48,10 +48,23 @@ type LocationEdges struct {
|
||||
loadedTypes [4]bool
|
||||
}
|
||||
|
||||
// GroupOrErr returns the Group value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e LocationEdges) GroupOrErr() (*Group, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.Group == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: group.Label}
|
||||
}
|
||||
return e.Group, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "group"}
|
||||
}
|
||||
|
||||
// ParentOrErr returns the Parent value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e LocationEdges) ParentOrErr() (*Location, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.loadedTypes[1] {
|
||||
if e.Parent == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: location.Label}
|
||||
@@ -64,25 +77,12 @@ func (e LocationEdges) ParentOrErr() (*Location, error) {
|
||||
// ChildrenOrErr returns the Children value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e LocationEdges) ChildrenOrErr() ([]*Location, error) {
|
||||
if e.loadedTypes[1] {
|
||||
if e.loadedTypes[2] {
|
||||
return e.Children, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "children"}
|
||||
}
|
||||
|
||||
// GroupOrErr returns the Group value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e LocationEdges) GroupOrErr() (*Group, error) {
|
||||
if e.loadedTypes[2] {
|
||||
if e.Group == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: group.Label}
|
||||
}
|
||||
return e.Group, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "group"}
|
||||
}
|
||||
|
||||
// ItemsOrErr returns the Items value or an error if the edge
|
||||
// was not loaded in eager-loading.
|
||||
func (e LocationEdges) ItemsOrErr() ([]*Item, error) {
|
||||
@@ -171,6 +171,11 @@ func (l *Location) assignValues(columns []string, values []any) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueryGroup queries the "group" edge of the Location entity.
|
||||
func (l *Location) QueryGroup() *GroupQuery {
|
||||
return NewLocationClient(l.config).QueryGroup(l)
|
||||
}
|
||||
|
||||
// QueryParent queries the "parent" edge of the Location entity.
|
||||
func (l *Location) QueryParent() *LocationQuery {
|
||||
return NewLocationClient(l.config).QueryParent(l)
|
||||
@@ -181,11 +186,6 @@ func (l *Location) QueryChildren() *LocationQuery {
|
||||
return NewLocationClient(l.config).QueryChildren(l)
|
||||
}
|
||||
|
||||
// QueryGroup queries the "group" edge of the Location entity.
|
||||
func (l *Location) QueryGroup() *GroupQuery {
|
||||
return NewLocationClient(l.config).QueryGroup(l)
|
||||
}
|
||||
|
||||
// QueryItems queries the "items" edge of the Location entity.
|
||||
func (l *Location) QueryItems() *ItemQuery {
|
||||
return NewLocationClient(l.config).QueryItems(l)
|
||||
|
||||
@@ -21,16 +21,23 @@ const (
|
||||
FieldName = "name"
|
||||
// FieldDescription holds the string denoting the description field in the database.
|
||||
FieldDescription = "description"
|
||||
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||
EdgeGroup = "group"
|
||||
// EdgeParent holds the string denoting the parent edge name in mutations.
|
||||
EdgeParent = "parent"
|
||||
// EdgeChildren holds the string denoting the children edge name in mutations.
|
||||
EdgeChildren = "children"
|
||||
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||
EdgeGroup = "group"
|
||||
// EdgeItems holds the string denoting the items edge name in mutations.
|
||||
EdgeItems = "items"
|
||||
// Table holds the table name of the location in the database.
|
||||
Table = "locations"
|
||||
// GroupTable is the table that holds the group relation/edge.
|
||||
GroupTable = "locations"
|
||||
// GroupInverseTable is the table name for the Group entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||
GroupInverseTable = "groups"
|
||||
// GroupColumn is the table column denoting the group relation/edge.
|
||||
GroupColumn = "group_locations"
|
||||
// ParentTable is the table that holds the parent relation/edge.
|
||||
ParentTable = "locations"
|
||||
// ParentColumn is the table column denoting the parent relation/edge.
|
||||
@@ -39,13 +46,6 @@ const (
|
||||
ChildrenTable = "locations"
|
||||
// ChildrenColumn is the table column denoting the children relation/edge.
|
||||
ChildrenColumn = "location_children"
|
||||
// GroupTable is the table that holds the group relation/edge.
|
||||
GroupTable = "locations"
|
||||
// GroupInverseTable is the table name for the Group entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||
GroupInverseTable = "groups"
|
||||
// GroupColumn is the table column denoting the group relation/edge.
|
||||
GroupColumn = "group_locations"
|
||||
// ItemsTable is the table that holds the items relation/edge.
|
||||
ItemsTable = "items"
|
||||
// ItemsInverseTable is the table name for the Item entity.
|
||||
|
||||
@@ -296,6 +296,33 @@ func DescriptionContainsFold(v string) predicate.Location {
|
||||
return predicate.Location(sql.FieldContainsFold(FieldDescription, v))
|
||||
}
|
||||
|
||||
// HasGroup applies the HasEdge predicate on the "group" edge.
|
||||
func HasGroup() predicate.Location {
|
||||
return predicate.Location(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||
func HasGroupWith(preds ...predicate.Group) predicate.Location {
|
||||
return predicate.Location(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasParent applies the HasEdge predicate on the "parent" edge.
|
||||
func HasParent() predicate.Location {
|
||||
return predicate.Location(func(s *sql.Selector) {
|
||||
@@ -350,33 +377,6 @@ func HasChildrenWith(preds ...predicate.Location) predicate.Location {
|
||||
})
|
||||
}
|
||||
|
||||
// HasGroup applies the HasEdge predicate on the "group" edge.
|
||||
func HasGroup() predicate.Location {
|
||||
return predicate.Location(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||
func HasGroupWith(preds ...predicate.Group) predicate.Location {
|
||||
return predicate.Location(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasItems applies the HasEdge predicate on the "items" edge.
|
||||
func HasItems() predicate.Location {
|
||||
return predicate.Location(func(s *sql.Selector) {
|
||||
|
||||
@@ -85,6 +85,17 @@ func (lc *LocationCreate) SetNillableID(u *uuid.UUID) *LocationCreate {
|
||||
return lc
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (lc *LocationCreate) SetGroupID(id uuid.UUID) *LocationCreate {
|
||||
lc.mutation.SetGroupID(id)
|
||||
return lc
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (lc *LocationCreate) SetGroup(g *Group) *LocationCreate {
|
||||
return lc.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// SetParentID sets the "parent" edge to the Location entity by ID.
|
||||
func (lc *LocationCreate) SetParentID(id uuid.UUID) *LocationCreate {
|
||||
lc.mutation.SetParentID(id)
|
||||
@@ -119,17 +130,6 @@ func (lc *LocationCreate) AddChildren(l ...*Location) *LocationCreate {
|
||||
return lc.AddChildIDs(ids...)
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (lc *LocationCreate) SetGroupID(id uuid.UUID) *LocationCreate {
|
||||
lc.mutation.SetGroupID(id)
|
||||
return lc
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (lc *LocationCreate) SetGroup(g *Group) *LocationCreate {
|
||||
return lc.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// AddItemIDs adds the "items" edge to the Item entity by IDs.
|
||||
func (lc *LocationCreate) AddItemIDs(ids ...uuid.UUID) *LocationCreate {
|
||||
lc.mutation.AddItemIDs(ids...)
|
||||
@@ -269,6 +269,23 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
|
||||
_spec.SetField(location.FieldDescription, field.TypeString, value)
|
||||
_node.Description = value
|
||||
}
|
||||
if nodes := lc.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.group_locations = &nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := lc.mutation.ParentIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -277,10 +294,7 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{location.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -297,10 +311,7 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{location.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -308,26 +319,6 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
|
||||
}
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := lc.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.group_locations = &nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := lc.mutation.ItemsIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.O2M,
|
||||
@@ -336,10 +327,7 @@ func (lc *LocationCreate) createSpec() (*Location, *sqlgraph.CreateSpec) {
|
||||
Columns: []string{location.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -25,9 +25,9 @@ type LocationQuery struct {
|
||||
order []OrderFunc
|
||||
inters []Interceptor
|
||||
predicates []predicate.Location
|
||||
withGroup *GroupQuery
|
||||
withParent *LocationQuery
|
||||
withChildren *LocationQuery
|
||||
withGroup *GroupQuery
|
||||
withItems *ItemQuery
|
||||
withFKs bool
|
||||
// intermediate query (i.e. traversal path).
|
||||
@@ -66,6 +66,28 @@ func (lq *LocationQuery) Order(o ...OrderFunc) *LocationQuery {
|
||||
return lq
|
||||
}
|
||||
|
||||
// QueryGroup chains the current query on the "group" edge.
|
||||
func (lq *LocationQuery) QueryGroup() *GroupQuery {
|
||||
query := (&GroupClient{config: lq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := lq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := lq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(location.Table, location.FieldID, selector),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(lq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryParent chains the current query on the "parent" edge.
|
||||
func (lq *LocationQuery) QueryParent() *LocationQuery {
|
||||
query := (&LocationClient{config: lq.config}).Query()
|
||||
@@ -110,28 +132,6 @@ func (lq *LocationQuery) QueryChildren() *LocationQuery {
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryGroup chains the current query on the "group" edge.
|
||||
func (lq *LocationQuery) QueryGroup() *GroupQuery {
|
||||
query := (&GroupClient{config: lq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := lq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := lq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(location.Table, location.FieldID, selector),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, location.GroupTable, location.GroupColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(lq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryItems chains the current query on the "items" edge.
|
||||
func (lq *LocationQuery) QueryItems() *ItemQuery {
|
||||
query := (&ItemClient{config: lq.config}).Query()
|
||||
@@ -346,9 +346,9 @@ func (lq *LocationQuery) Clone() *LocationQuery {
|
||||
order: append([]OrderFunc{}, lq.order...),
|
||||
inters: append([]Interceptor{}, lq.inters...),
|
||||
predicates: append([]predicate.Location{}, lq.predicates...),
|
||||
withGroup: lq.withGroup.Clone(),
|
||||
withParent: lq.withParent.Clone(),
|
||||
withChildren: lq.withChildren.Clone(),
|
||||
withGroup: lq.withGroup.Clone(),
|
||||
withItems: lq.withItems.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: lq.sql.Clone(),
|
||||
@@ -356,6 +356,17 @@ func (lq *LocationQuery) Clone() *LocationQuery {
|
||||
}
|
||||
}
|
||||
|
||||
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (lq *LocationQuery) WithGroup(opts ...func(*GroupQuery)) *LocationQuery {
|
||||
query := (&GroupClient{config: lq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
lq.withGroup = query
|
||||
return lq
|
||||
}
|
||||
|
||||
// WithParent tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "parent" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (lq *LocationQuery) WithParent(opts ...func(*LocationQuery)) *LocationQuery {
|
||||
@@ -378,17 +389,6 @@ func (lq *LocationQuery) WithChildren(opts ...func(*LocationQuery)) *LocationQue
|
||||
return lq
|
||||
}
|
||||
|
||||
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (lq *LocationQuery) WithGroup(opts ...func(*GroupQuery)) *LocationQuery {
|
||||
query := (&GroupClient{config: lq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
lq.withGroup = query
|
||||
return lq
|
||||
}
|
||||
|
||||
// WithItems tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "items" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (lq *LocationQuery) WithItems(opts ...func(*ItemQuery)) *LocationQuery {
|
||||
@@ -480,13 +480,13 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc
|
||||
withFKs = lq.withFKs
|
||||
_spec = lq.querySpec()
|
||||
loadedTypes = [4]bool{
|
||||
lq.withGroup != nil,
|
||||
lq.withParent != nil,
|
||||
lq.withChildren != nil,
|
||||
lq.withGroup != nil,
|
||||
lq.withItems != nil,
|
||||
}
|
||||
)
|
||||
if lq.withParent != nil || lq.withGroup != nil {
|
||||
if lq.withGroup != nil || lq.withParent != nil {
|
||||
withFKs = true
|
||||
}
|
||||
if withFKs {
|
||||
@@ -510,6 +510,12 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := lq.withGroup; query != nil {
|
||||
if err := lq.loadGroup(ctx, query, nodes, nil,
|
||||
func(n *Location, e *Group) { n.Edges.Group = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := lq.withParent; query != nil {
|
||||
if err := lq.loadParent(ctx, query, nodes, nil,
|
||||
func(n *Location, e *Location) { n.Edges.Parent = e }); err != nil {
|
||||
@@ -523,12 +529,6 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := lq.withGroup; query != nil {
|
||||
if err := lq.loadGroup(ctx, query, nodes, nil,
|
||||
func(n *Location, e *Group) { n.Edges.Group = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := lq.withItems; query != nil {
|
||||
if err := lq.loadItems(ctx, query, nodes,
|
||||
func(n *Location) { n.Edges.Items = []*Item{} },
|
||||
@@ -539,6 +539,38 @@ func (lq *LocationQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Loc
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (lq *LocationQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Location, init func(*Location), assign func(*Location, *Group)) error {
|
||||
ids := make([]uuid.UUID, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID][]*Location)
|
||||
for i := range nodes {
|
||||
if nodes[i].group_locations == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].group_locations
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(group.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (lq *LocationQuery) loadParent(ctx context.Context, query *LocationQuery, nodes []*Location, init func(*Location), assign func(*Location, *Location)) error {
|
||||
ids := make([]uuid.UUID, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID][]*Location)
|
||||
@@ -602,38 +634,6 @@ func (lq *LocationQuery) loadChildren(ctx context.Context, query *LocationQuery,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (lq *LocationQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Location, init func(*Location), assign func(*Location, *Group)) error {
|
||||
ids := make([]uuid.UUID, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID][]*Location)
|
||||
for i := range nodes {
|
||||
if nodes[i].group_locations == nil {
|
||||
continue
|
||||
}
|
||||
fk := *nodes[i].group_locations
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(group.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (lq *LocationQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*Location, init func(*Location), assign func(*Location, *Item)) error {
|
||||
fks := make([]driver.Value, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID]*Location)
|
||||
|
||||
@@ -63,6 +63,17 @@ func (lu *LocationUpdate) ClearDescription() *LocationUpdate {
|
||||
return lu
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (lu *LocationUpdate) SetGroupID(id uuid.UUID) *LocationUpdate {
|
||||
lu.mutation.SetGroupID(id)
|
||||
return lu
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (lu *LocationUpdate) SetGroup(g *Group) *LocationUpdate {
|
||||
return lu.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// SetParentID sets the "parent" edge to the Location entity by ID.
|
||||
func (lu *LocationUpdate) SetParentID(id uuid.UUID) *LocationUpdate {
|
||||
lu.mutation.SetParentID(id)
|
||||
@@ -97,17 +108,6 @@ func (lu *LocationUpdate) AddChildren(l ...*Location) *LocationUpdate {
|
||||
return lu.AddChildIDs(ids...)
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (lu *LocationUpdate) SetGroupID(id uuid.UUID) *LocationUpdate {
|
||||
lu.mutation.SetGroupID(id)
|
||||
return lu
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (lu *LocationUpdate) SetGroup(g *Group) *LocationUpdate {
|
||||
return lu.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// AddItemIDs adds the "items" edge to the Item entity by IDs.
|
||||
func (lu *LocationUpdate) AddItemIDs(ids ...uuid.UUID) *LocationUpdate {
|
||||
lu.mutation.AddItemIDs(ids...)
|
||||
@@ -128,6 +128,12 @@ func (lu *LocationUpdate) Mutation() *LocationMutation {
|
||||
return lu.mutation
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (lu *LocationUpdate) ClearGroup() *LocationUpdate {
|
||||
lu.mutation.ClearGroup()
|
||||
return lu
|
||||
}
|
||||
|
||||
// ClearParent clears the "parent" edge to the Location entity.
|
||||
func (lu *LocationUpdate) ClearParent() *LocationUpdate {
|
||||
lu.mutation.ClearParent()
|
||||
@@ -155,12 +161,6 @@ func (lu *LocationUpdate) RemoveChildren(l ...*Location) *LocationUpdate {
|
||||
return lu.RemoveChildIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (lu *LocationUpdate) ClearGroup() *LocationUpdate {
|
||||
lu.mutation.ClearGroup()
|
||||
return lu
|
||||
}
|
||||
|
||||
// ClearItems clears all "items" edges to the Item entity.
|
||||
func (lu *LocationUpdate) ClearItems() *LocationUpdate {
|
||||
lu.mutation.ClearItems()
|
||||
@@ -260,6 +260,35 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
if lu.mutation.DescriptionCleared() {
|
||||
_spec.ClearField(location.FieldDescription, field.TypeString)
|
||||
}
|
||||
if lu.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := lu.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if lu.mutation.ParentCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -268,10 +297,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{location.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -284,10 +310,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{location.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -303,10 +326,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{location.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -319,10 +339,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{location.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -338,45 +355,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{location.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if lu.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := lu.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -392,10 +371,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{location.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -408,10 +384,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{location.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -427,10 +400,7 @@ func (lu *LocationUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
Columns: []string{location.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -490,6 +460,17 @@ func (luo *LocationUpdateOne) ClearDescription() *LocationUpdateOne {
|
||||
return luo
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (luo *LocationUpdateOne) SetGroupID(id uuid.UUID) *LocationUpdateOne {
|
||||
luo.mutation.SetGroupID(id)
|
||||
return luo
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (luo *LocationUpdateOne) SetGroup(g *Group) *LocationUpdateOne {
|
||||
return luo.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// SetParentID sets the "parent" edge to the Location entity by ID.
|
||||
func (luo *LocationUpdateOne) SetParentID(id uuid.UUID) *LocationUpdateOne {
|
||||
luo.mutation.SetParentID(id)
|
||||
@@ -524,17 +505,6 @@ func (luo *LocationUpdateOne) AddChildren(l ...*Location) *LocationUpdateOne {
|
||||
return luo.AddChildIDs(ids...)
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group" edge to the Group entity by ID.
|
||||
func (luo *LocationUpdateOne) SetGroupID(id uuid.UUID) *LocationUpdateOne {
|
||||
luo.mutation.SetGroupID(id)
|
||||
return luo
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (luo *LocationUpdateOne) SetGroup(g *Group) *LocationUpdateOne {
|
||||
return luo.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// AddItemIDs adds the "items" edge to the Item entity by IDs.
|
||||
func (luo *LocationUpdateOne) AddItemIDs(ids ...uuid.UUID) *LocationUpdateOne {
|
||||
luo.mutation.AddItemIDs(ids...)
|
||||
@@ -555,6 +525,12 @@ func (luo *LocationUpdateOne) Mutation() *LocationMutation {
|
||||
return luo.mutation
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (luo *LocationUpdateOne) ClearGroup() *LocationUpdateOne {
|
||||
luo.mutation.ClearGroup()
|
||||
return luo
|
||||
}
|
||||
|
||||
// ClearParent clears the "parent" edge to the Location entity.
|
||||
func (luo *LocationUpdateOne) ClearParent() *LocationUpdateOne {
|
||||
luo.mutation.ClearParent()
|
||||
@@ -582,12 +558,6 @@ func (luo *LocationUpdateOne) RemoveChildren(l ...*Location) *LocationUpdateOne
|
||||
return luo.RemoveChildIDs(ids...)
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (luo *LocationUpdateOne) ClearGroup() *LocationUpdateOne {
|
||||
luo.mutation.ClearGroup()
|
||||
return luo
|
||||
}
|
||||
|
||||
// ClearItems clears all "items" edges to the Item entity.
|
||||
func (luo *LocationUpdateOne) ClearItems() *LocationUpdateOne {
|
||||
luo.mutation.ClearItems()
|
||||
@@ -717,6 +687,35 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
|
||||
if luo.mutation.DescriptionCleared() {
|
||||
_spec.ClearField(location.FieldDescription, field.TypeString)
|
||||
}
|
||||
if luo.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := luo.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if luo.mutation.ParentCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
@@ -725,10 +724,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
|
||||
Columns: []string{location.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -741,10 +737,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
|
||||
Columns: []string{location.ParentColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -760,10 +753,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
|
||||
Columns: []string{location.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -776,10 +766,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
|
||||
Columns: []string{location.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -795,45 +782,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
|
||||
Columns: []string{location.ChildrenColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: location.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if luo.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := luo.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: location.GroupTable,
|
||||
Columns: []string{location.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: group.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -849,10 +798,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
|
||||
Columns: []string{location.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -865,10 +811,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
|
||||
Columns: []string{location.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -884,10 +827,7 @@ func (luo *LocationUpdateOne) sqlSave(ctx context.Context) (_node *Location, err
|
||||
Columns: []string{location.ItemsColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -291,10 +291,7 @@ func (mec *MaintenanceEntryCreate) createSpec() (*MaintenanceEntry, *sqlgraph.Cr
|
||||
Columns: []string{maintenanceentry.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -249,10 +249,7 @@ func (meu *MaintenanceEntryUpdate) sqlSave(ctx context.Context) (n int, err erro
|
||||
Columns: []string{maintenanceentry.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -265,10 +262,7 @@ func (meu *MaintenanceEntryUpdate) sqlSave(ctx context.Context) (n int, err erro
|
||||
Columns: []string{maintenanceentry.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
@@ -545,10 +539,7 @@ func (meuo *MaintenanceEntryUpdateOne) sqlSave(ctx context.Context) (_node *Main
|
||||
Columns: []string{maintenanceentry.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
@@ -561,10 +552,7 @@ func (meuo *MaintenanceEntryUpdateOne) sqlSave(ctx context.Context) (_node *Main
|
||||
Columns: []string{maintenanceentry.ItemColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: &sqlgraph.FieldSpec{
|
||||
Type: field.TypeUUID,
|
||||
Column: item.FieldID,
|
||||
},
|
||||
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
|
||||
@@ -116,7 +116,7 @@ var (
|
||||
{Name: "created_at", Type: field.TypeTime},
|
||||
{Name: "updated_at", Type: field.TypeTime},
|
||||
{Name: "name", Type: field.TypeString, Size: 255},
|
||||
{Name: "currency", Type: field.TypeEnum, Enums: []string{"usd", "eur", "gbp", "jpy", "zar", "aud", "nok", "sek", "dkk", "inr", "rmb", "bgn", "chf", "pln", "try", "ron"}, Default: "usd"},
|
||||
{Name: "currency", Type: field.TypeEnum, Enums: []string{"usd", "eur", "gbp", "jpy", "zar", "aud", "nok", "nzd", "sek", "dkk", "inr", "rmb", "bgn", "chf", "pln", "try", "ron", "czk"}, Default: "usd"},
|
||||
}
|
||||
// GroupsTable holds the schema information for the "groups" table.
|
||||
GroupsTable = &schema.Table{
|
||||
@@ -344,6 +344,59 @@ var (
|
||||
},
|
||||
},
|
||||
}
|
||||
// NotifiersColumns holds the columns for the "notifiers" table.
|
||||
NotifiersColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeUUID},
|
||||
{Name: "created_at", Type: field.TypeTime},
|
||||
{Name: "updated_at", Type: field.TypeTime},
|
||||
{Name: "name", Type: field.TypeString, Size: 255},
|
||||
{Name: "url", Type: field.TypeString, Size: 2083},
|
||||
{Name: "is_active", Type: field.TypeBool, Default: true},
|
||||
{Name: "group_id", Type: field.TypeUUID},
|
||||
{Name: "user_id", Type: field.TypeUUID},
|
||||
}
|
||||
// NotifiersTable holds the schema information for the "notifiers" table.
|
||||
NotifiersTable = &schema.Table{
|
||||
Name: "notifiers",
|
||||
Columns: NotifiersColumns,
|
||||
PrimaryKey: []*schema.Column{NotifiersColumns[0]},
|
||||
ForeignKeys: []*schema.ForeignKey{
|
||||
{
|
||||
Symbol: "notifiers_groups_notifiers",
|
||||
Columns: []*schema.Column{NotifiersColumns[6]},
|
||||
RefColumns: []*schema.Column{GroupsColumns[0]},
|
||||
OnDelete: schema.Cascade,
|
||||
},
|
||||
{
|
||||
Symbol: "notifiers_users_notifiers",
|
||||
Columns: []*schema.Column{NotifiersColumns[7]},
|
||||
RefColumns: []*schema.Column{UsersColumns[0]},
|
||||
OnDelete: schema.Cascade,
|
||||
},
|
||||
},
|
||||
Indexes: []*schema.Index{
|
||||
{
|
||||
Name: "notifier_user_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{NotifiersColumns[7]},
|
||||
},
|
||||
{
|
||||
Name: "notifier_user_id_is_active",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{NotifiersColumns[7], NotifiersColumns[5]},
|
||||
},
|
||||
{
|
||||
Name: "notifier_group_id",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{NotifiersColumns[6]},
|
||||
},
|
||||
{
|
||||
Name: "notifier_group_id_is_active",
|
||||
Unique: false,
|
||||
Columns: []*schema.Column{NotifiersColumns[6], NotifiersColumns[5]},
|
||||
},
|
||||
},
|
||||
}
|
||||
// UsersColumns holds the columns for the "users" table.
|
||||
UsersColumns = []*schema.Column{
|
||||
{Name: "id", Type: field.TypeUUID},
|
||||
@@ -353,8 +406,8 @@ var (
|
||||
{Name: "email", Type: field.TypeString, Unique: true, Size: 255},
|
||||
{Name: "password", Type: field.TypeString, Size: 255},
|
||||
{Name: "is_superuser", Type: field.TypeBool, Default: false},
|
||||
{Name: "role", Type: field.TypeEnum, Enums: []string{"user", "owner"}, Default: "user"},
|
||||
{Name: "superuser", Type: field.TypeBool, Default: false},
|
||||
{Name: "role", Type: field.TypeEnum, Enums: []string{"user", "owner"}, Default: "user"},
|
||||
{Name: "activated_on", Type: field.TypeTime, Nullable: true},
|
||||
{Name: "group_users", Type: field.TypeUUID},
|
||||
}
|
||||
@@ -410,6 +463,7 @@ var (
|
||||
LabelsTable,
|
||||
LocationsTable,
|
||||
MaintenanceEntriesTable,
|
||||
NotifiersTable,
|
||||
UsersTable,
|
||||
LabelItemsTable,
|
||||
}
|
||||
@@ -430,6 +484,8 @@ func init() {
|
||||
LocationsTable.ForeignKeys[0].RefTable = GroupsTable
|
||||
LocationsTable.ForeignKeys[1].RefTable = LocationsTable
|
||||
MaintenanceEntriesTable.ForeignKeys[0].RefTable = ItemsTable
|
||||
NotifiersTable.ForeignKeys[0].RefTable = GroupsTable
|
||||
NotifiersTable.ForeignKeys[1].RefTable = UsersTable
|
||||
UsersTable.ForeignKeys[0].RefTable = GroupsTable
|
||||
LabelItemsTable.ForeignKeys[0].RefTable = LabelsTable
|
||||
LabelItemsTable.ForeignKeys[1].RefTable = ItemsTable
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
216
backend/internal/data/ent/notifier.go
Normal file
216
backend/internal/data/ent/notifier.go
Normal file
@@ -0,0 +1,216 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
)
|
||||
|
||||
// Notifier is the model entity for the Notifier schema.
|
||||
type Notifier struct {
|
||||
config `json:"-"`
|
||||
// ID of the ent.
|
||||
ID uuid.UUID `json:"id,omitempty"`
|
||||
// CreatedAt holds the value of the "created_at" field.
|
||||
CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// UpdatedAt holds the value of the "updated_at" field.
|
||||
UpdatedAt time.Time `json:"updated_at,omitempty"`
|
||||
// GroupID holds the value of the "group_id" field.
|
||||
GroupID uuid.UUID `json:"group_id,omitempty"`
|
||||
// UserID holds the value of the "user_id" field.
|
||||
UserID uuid.UUID `json:"user_id,omitempty"`
|
||||
// Name holds the value of the "name" field.
|
||||
Name string `json:"name,omitempty"`
|
||||
// URL holds the value of the "url" field.
|
||||
URL string `json:"-"`
|
||||
// IsActive holds the value of the "is_active" field.
|
||||
IsActive bool `json:"is_active,omitempty"`
|
||||
// Edges holds the relations/edges for other nodes in the graph.
|
||||
// The values are being populated by the NotifierQuery when eager-loading is set.
|
||||
Edges NotifierEdges `json:"edges"`
|
||||
}
|
||||
|
||||
// NotifierEdges holds the relations/edges for other nodes in the graph.
|
||||
type NotifierEdges struct {
|
||||
// Group holds the value of the group edge.
|
||||
Group *Group `json:"group,omitempty"`
|
||||
// User holds the value of the user edge.
|
||||
User *User `json:"user,omitempty"`
|
||||
// loadedTypes holds the information for reporting if a
|
||||
// type was loaded (or requested) in eager-loading or not.
|
||||
loadedTypes [2]bool
|
||||
}
|
||||
|
||||
// GroupOrErr returns the Group value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e NotifierEdges) GroupOrErr() (*Group, error) {
|
||||
if e.loadedTypes[0] {
|
||||
if e.Group == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: group.Label}
|
||||
}
|
||||
return e.Group, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "group"}
|
||||
}
|
||||
|
||||
// UserOrErr returns the User value or an error if the edge
|
||||
// was not loaded in eager-loading, or loaded but was not found.
|
||||
func (e NotifierEdges) UserOrErr() (*User, error) {
|
||||
if e.loadedTypes[1] {
|
||||
if e.User == nil {
|
||||
// Edge was loaded but was not found.
|
||||
return nil, &NotFoundError{label: user.Label}
|
||||
}
|
||||
return e.User, nil
|
||||
}
|
||||
return nil, &NotLoadedError{edge: "user"}
|
||||
}
|
||||
|
||||
// scanValues returns the types for scanning values from sql.Rows.
|
||||
func (*Notifier) scanValues(columns []string) ([]any, error) {
|
||||
values := make([]any, len(columns))
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case notifier.FieldIsActive:
|
||||
values[i] = new(sql.NullBool)
|
||||
case notifier.FieldName, notifier.FieldURL:
|
||||
values[i] = new(sql.NullString)
|
||||
case notifier.FieldCreatedAt, notifier.FieldUpdatedAt:
|
||||
values[i] = new(sql.NullTime)
|
||||
case notifier.FieldID, notifier.FieldGroupID, notifier.FieldUserID:
|
||||
values[i] = new(uuid.UUID)
|
||||
default:
|
||||
return nil, fmt.Errorf("unexpected column %q for type Notifier", columns[i])
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// assignValues assigns the values that were returned from sql.Rows (after scanning)
|
||||
// to the Notifier fields.
|
||||
func (n *Notifier) assignValues(columns []string, values []any) error {
|
||||
if m, n := len(values), len(columns); m < n {
|
||||
return fmt.Errorf("mismatch number of scan values: %d != %d", m, n)
|
||||
}
|
||||
for i := range columns {
|
||||
switch columns[i] {
|
||||
case notifier.FieldID:
|
||||
if value, ok := values[i].(*uuid.UUID); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field id", values[i])
|
||||
} else if value != nil {
|
||||
n.ID = *value
|
||||
}
|
||||
case notifier.FieldCreatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field created_at", values[i])
|
||||
} else if value.Valid {
|
||||
n.CreatedAt = value.Time
|
||||
}
|
||||
case notifier.FieldUpdatedAt:
|
||||
if value, ok := values[i].(*sql.NullTime); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field updated_at", values[i])
|
||||
} else if value.Valid {
|
||||
n.UpdatedAt = value.Time
|
||||
}
|
||||
case notifier.FieldGroupID:
|
||||
if value, ok := values[i].(*uuid.UUID); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field group_id", values[i])
|
||||
} else if value != nil {
|
||||
n.GroupID = *value
|
||||
}
|
||||
case notifier.FieldUserID:
|
||||
if value, ok := values[i].(*uuid.UUID); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field user_id", values[i])
|
||||
} else if value != nil {
|
||||
n.UserID = *value
|
||||
}
|
||||
case notifier.FieldName:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field name", values[i])
|
||||
} else if value.Valid {
|
||||
n.Name = value.String
|
||||
}
|
||||
case notifier.FieldURL:
|
||||
if value, ok := values[i].(*sql.NullString); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field url", values[i])
|
||||
} else if value.Valid {
|
||||
n.URL = value.String
|
||||
}
|
||||
case notifier.FieldIsActive:
|
||||
if value, ok := values[i].(*sql.NullBool); !ok {
|
||||
return fmt.Errorf("unexpected type %T for field is_active", values[i])
|
||||
} else if value.Valid {
|
||||
n.IsActive = value.Bool
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueryGroup queries the "group" edge of the Notifier entity.
|
||||
func (n *Notifier) QueryGroup() *GroupQuery {
|
||||
return NewNotifierClient(n.config).QueryGroup(n)
|
||||
}
|
||||
|
||||
// QueryUser queries the "user" edge of the Notifier entity.
|
||||
func (n *Notifier) QueryUser() *UserQuery {
|
||||
return NewNotifierClient(n.config).QueryUser(n)
|
||||
}
|
||||
|
||||
// Update returns a builder for updating this Notifier.
|
||||
// Note that you need to call Notifier.Unwrap() before calling this method if this Notifier
|
||||
// was returned from a transaction, and the transaction was committed or rolled back.
|
||||
func (n *Notifier) Update() *NotifierUpdateOne {
|
||||
return NewNotifierClient(n.config).UpdateOne(n)
|
||||
}
|
||||
|
||||
// Unwrap unwraps the Notifier entity that was returned from a transaction after it was closed,
|
||||
// so that all future queries will be executed through the driver which created the transaction.
|
||||
func (n *Notifier) Unwrap() *Notifier {
|
||||
_tx, ok := n.config.driver.(*txDriver)
|
||||
if !ok {
|
||||
panic("ent: Notifier is not a transactional entity")
|
||||
}
|
||||
n.config.driver = _tx.drv
|
||||
return n
|
||||
}
|
||||
|
||||
// String implements the fmt.Stringer.
|
||||
func (n *Notifier) String() string {
|
||||
var builder strings.Builder
|
||||
builder.WriteString("Notifier(")
|
||||
builder.WriteString(fmt.Sprintf("id=%v, ", n.ID))
|
||||
builder.WriteString("created_at=")
|
||||
builder.WriteString(n.CreatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("updated_at=")
|
||||
builder.WriteString(n.UpdatedAt.Format(time.ANSIC))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("group_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", n.GroupID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("user_id=")
|
||||
builder.WriteString(fmt.Sprintf("%v", n.UserID))
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("name=")
|
||||
builder.WriteString(n.Name)
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("url=<sensitive>")
|
||||
builder.WriteString(", ")
|
||||
builder.WriteString("is_active=")
|
||||
builder.WriteString(fmt.Sprintf("%v", n.IsActive))
|
||||
builder.WriteByte(')')
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// Notifiers is a parsable slice of Notifier.
|
||||
type Notifiers []*Notifier
|
||||
89
backend/internal/data/ent/notifier/notifier.go
Normal file
89
backend/internal/data/ent/notifier/notifier.go
Normal file
@@ -0,0 +1,89 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// Label holds the string label denoting the notifier type in the database.
|
||||
Label = "notifier"
|
||||
// FieldID holds the string denoting the id field in the database.
|
||||
FieldID = "id"
|
||||
// FieldCreatedAt holds the string denoting the created_at field in the database.
|
||||
FieldCreatedAt = "created_at"
|
||||
// FieldUpdatedAt holds the string denoting the updated_at field in the database.
|
||||
FieldUpdatedAt = "updated_at"
|
||||
// FieldGroupID holds the string denoting the group_id field in the database.
|
||||
FieldGroupID = "group_id"
|
||||
// FieldUserID holds the string denoting the user_id field in the database.
|
||||
FieldUserID = "user_id"
|
||||
// FieldName holds the string denoting the name field in the database.
|
||||
FieldName = "name"
|
||||
// FieldURL holds the string denoting the url field in the database.
|
||||
FieldURL = "url"
|
||||
// FieldIsActive holds the string denoting the is_active field in the database.
|
||||
FieldIsActive = "is_active"
|
||||
// EdgeGroup holds the string denoting the group edge name in mutations.
|
||||
EdgeGroup = "group"
|
||||
// EdgeUser holds the string denoting the user edge name in mutations.
|
||||
EdgeUser = "user"
|
||||
// Table holds the table name of the notifier in the database.
|
||||
Table = "notifiers"
|
||||
// GroupTable is the table that holds the group relation/edge.
|
||||
GroupTable = "notifiers"
|
||||
// GroupInverseTable is the table name for the Group entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "group" package.
|
||||
GroupInverseTable = "groups"
|
||||
// GroupColumn is the table column denoting the group relation/edge.
|
||||
GroupColumn = "group_id"
|
||||
// UserTable is the table that holds the user relation/edge.
|
||||
UserTable = "notifiers"
|
||||
// UserInverseTable is the table name for the User entity.
|
||||
// It exists in this package in order to avoid circular dependency with the "user" package.
|
||||
UserInverseTable = "users"
|
||||
// UserColumn is the table column denoting the user relation/edge.
|
||||
UserColumn = "user_id"
|
||||
)
|
||||
|
||||
// Columns holds all SQL columns for notifier fields.
|
||||
var Columns = []string{
|
||||
FieldID,
|
||||
FieldCreatedAt,
|
||||
FieldUpdatedAt,
|
||||
FieldGroupID,
|
||||
FieldUserID,
|
||||
FieldName,
|
||||
FieldURL,
|
||||
FieldIsActive,
|
||||
}
|
||||
|
||||
// ValidColumn reports if the column name is valid (part of the table columns).
|
||||
func ValidColumn(column string) bool {
|
||||
for i := range Columns {
|
||||
if column == Columns[i] {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var (
|
||||
// DefaultCreatedAt holds the default value on creation for the "created_at" field.
|
||||
DefaultCreatedAt func() time.Time
|
||||
// DefaultUpdatedAt holds the default value on creation for the "updated_at" field.
|
||||
DefaultUpdatedAt func() time.Time
|
||||
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
|
||||
UpdateDefaultUpdatedAt func() time.Time
|
||||
// NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||
NameValidator func(string) error
|
||||
// URLValidator is a validator for the "url" field. It is called by the builders before save.
|
||||
URLValidator func(string) error
|
||||
// DefaultIsActive holds the default value on creation for the "is_active" field.
|
||||
DefaultIsActive bool
|
||||
// DefaultID holds the default value on creation for the "id" field.
|
||||
DefaultID func() uuid.UUID
|
||||
)
|
||||
438
backend/internal/data/ent/notifier/where.go
Normal file
438
backend/internal/data/ent/notifier/where.go
Normal file
@@ -0,0 +1,438 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
|
||||
)
|
||||
|
||||
// ID filters vertices based on their ID field.
|
||||
func ID(id uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDEQ applies the EQ predicate on the ID field.
|
||||
func IDEQ(id uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDNEQ applies the NEQ predicate on the ID field.
|
||||
func IDNEQ(id uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNEQ(FieldID, id))
|
||||
}
|
||||
|
||||
// IDIn applies the In predicate on the ID field.
|
||||
func IDIn(ids ...uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDNotIn applies the NotIn predicate on the ID field.
|
||||
func IDNotIn(ids ...uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNotIn(FieldID, ids...))
|
||||
}
|
||||
|
||||
// IDGT applies the GT predicate on the ID field.
|
||||
func IDGT(id uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDGTE applies the GTE predicate on the ID field.
|
||||
func IDGTE(id uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGTE(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLT applies the LT predicate on the ID field.
|
||||
func IDLT(id uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLT(FieldID, id))
|
||||
}
|
||||
|
||||
// IDLTE applies the LTE predicate on the ID field.
|
||||
func IDLTE(id uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLTE(FieldID, id))
|
||||
}
|
||||
|
||||
// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ.
|
||||
func CreatedAt(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ.
|
||||
func UpdatedAt(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// GroupID applies equality check predicate on the "group_id" field. It's identical to GroupIDEQ.
|
||||
func GroupID(v uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldGroupID, v))
|
||||
}
|
||||
|
||||
// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ.
|
||||
func UserID(v uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldUserID, v))
|
||||
}
|
||||
|
||||
// Name applies equality check predicate on the "name" field. It's identical to NameEQ.
|
||||
func Name(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// URL applies equality check predicate on the "url" field. It's identical to URLEQ.
|
||||
func URL(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldURL, v))
|
||||
}
|
||||
|
||||
// IsActive applies equality check predicate on the "is_active" field. It's identical to IsActiveEQ.
|
||||
func IsActive(v bool) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldIsActive, v))
|
||||
}
|
||||
|
||||
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
|
||||
func CreatedAtEQ(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtNEQ applies the NEQ predicate on the "created_at" field.
|
||||
func CreatedAtNEQ(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNEQ(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtIn applies the In predicate on the "created_at" field.
|
||||
func CreatedAtIn(vs ...time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtNotIn applies the NotIn predicate on the "created_at" field.
|
||||
func CreatedAtNotIn(vs ...time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNotIn(FieldCreatedAt, vs...))
|
||||
}
|
||||
|
||||
// CreatedAtGT applies the GT predicate on the "created_at" field.
|
||||
func CreatedAtGT(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtGTE applies the GTE predicate on the "created_at" field.
|
||||
func CreatedAtGTE(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLT applies the LT predicate on the "created_at" field.
|
||||
func CreatedAtLT(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLT(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// CreatedAtLTE applies the LTE predicate on the "created_at" field.
|
||||
func CreatedAtLTE(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLTE(FieldCreatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtEQ applies the EQ predicate on the "updated_at" field.
|
||||
func UpdatedAtEQ(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field.
|
||||
func UpdatedAtNEQ(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNEQ(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtIn applies the In predicate on the "updated_at" field.
|
||||
func UpdatedAtIn(vs ...time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field.
|
||||
func UpdatedAtNotIn(vs ...time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNotIn(FieldUpdatedAt, vs...))
|
||||
}
|
||||
|
||||
// UpdatedAtGT applies the GT predicate on the "updated_at" field.
|
||||
func UpdatedAtGT(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtGTE applies the GTE predicate on the "updated_at" field.
|
||||
func UpdatedAtGTE(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLT applies the LT predicate on the "updated_at" field.
|
||||
func UpdatedAtLT(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLT(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// UpdatedAtLTE applies the LTE predicate on the "updated_at" field.
|
||||
func UpdatedAtLTE(v time.Time) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLTE(FieldUpdatedAt, v))
|
||||
}
|
||||
|
||||
// GroupIDEQ applies the EQ predicate on the "group_id" field.
|
||||
func GroupIDEQ(v uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldGroupID, v))
|
||||
}
|
||||
|
||||
// GroupIDNEQ applies the NEQ predicate on the "group_id" field.
|
||||
func GroupIDNEQ(v uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNEQ(FieldGroupID, v))
|
||||
}
|
||||
|
||||
// GroupIDIn applies the In predicate on the "group_id" field.
|
||||
func GroupIDIn(vs ...uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldIn(FieldGroupID, vs...))
|
||||
}
|
||||
|
||||
// GroupIDNotIn applies the NotIn predicate on the "group_id" field.
|
||||
func GroupIDNotIn(vs ...uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNotIn(FieldGroupID, vs...))
|
||||
}
|
||||
|
||||
// UserIDEQ applies the EQ predicate on the "user_id" field.
|
||||
func UserIDEQ(v uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldUserID, v))
|
||||
}
|
||||
|
||||
// UserIDNEQ applies the NEQ predicate on the "user_id" field.
|
||||
func UserIDNEQ(v uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNEQ(FieldUserID, v))
|
||||
}
|
||||
|
||||
// UserIDIn applies the In predicate on the "user_id" field.
|
||||
func UserIDIn(vs ...uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldIn(FieldUserID, vs...))
|
||||
}
|
||||
|
||||
// UserIDNotIn applies the NotIn predicate on the "user_id" field.
|
||||
func UserIDNotIn(vs ...uuid.UUID) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNotIn(FieldUserID, vs...))
|
||||
}
|
||||
|
||||
// NameEQ applies the EQ predicate on the "name" field.
|
||||
func NameEQ(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameNEQ applies the NEQ predicate on the "name" field.
|
||||
func NameNEQ(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNEQ(FieldName, v))
|
||||
}
|
||||
|
||||
// NameIn applies the In predicate on the "name" field.
|
||||
func NameIn(vs ...string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameNotIn applies the NotIn predicate on the "name" field.
|
||||
func NameNotIn(vs ...string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNotIn(FieldName, vs...))
|
||||
}
|
||||
|
||||
// NameGT applies the GT predicate on the "name" field.
|
||||
func NameGT(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameGTE applies the GTE predicate on the "name" field.
|
||||
func NameGTE(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLT applies the LT predicate on the "name" field.
|
||||
func NameLT(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLT(FieldName, v))
|
||||
}
|
||||
|
||||
// NameLTE applies the LTE predicate on the "name" field.
|
||||
func NameLTE(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLTE(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContains applies the Contains predicate on the "name" field.
|
||||
func NameContains(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldContains(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasPrefix applies the HasPrefix predicate on the "name" field.
|
||||
func NameHasPrefix(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldHasPrefix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameHasSuffix applies the HasSuffix predicate on the "name" field.
|
||||
func NameHasSuffix(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldHasSuffix(FieldName, v))
|
||||
}
|
||||
|
||||
// NameEqualFold applies the EqualFold predicate on the "name" field.
|
||||
func NameEqualFold(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEqualFold(FieldName, v))
|
||||
}
|
||||
|
||||
// NameContainsFold applies the ContainsFold predicate on the "name" field.
|
||||
func NameContainsFold(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldContainsFold(FieldName, v))
|
||||
}
|
||||
|
||||
// URLEQ applies the EQ predicate on the "url" field.
|
||||
func URLEQ(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLNEQ applies the NEQ predicate on the "url" field.
|
||||
func URLNEQ(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNEQ(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLIn applies the In predicate on the "url" field.
|
||||
func URLIn(vs ...string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldIn(FieldURL, vs...))
|
||||
}
|
||||
|
||||
// URLNotIn applies the NotIn predicate on the "url" field.
|
||||
func URLNotIn(vs ...string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNotIn(FieldURL, vs...))
|
||||
}
|
||||
|
||||
// URLGT applies the GT predicate on the "url" field.
|
||||
func URLGT(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGT(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLGTE applies the GTE predicate on the "url" field.
|
||||
func URLGTE(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldGTE(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLLT applies the LT predicate on the "url" field.
|
||||
func URLLT(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLT(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLLTE applies the LTE predicate on the "url" field.
|
||||
func URLLTE(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldLTE(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLContains applies the Contains predicate on the "url" field.
|
||||
func URLContains(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldContains(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLHasPrefix applies the HasPrefix predicate on the "url" field.
|
||||
func URLHasPrefix(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldHasPrefix(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLHasSuffix applies the HasSuffix predicate on the "url" field.
|
||||
func URLHasSuffix(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldHasSuffix(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLEqualFold applies the EqualFold predicate on the "url" field.
|
||||
func URLEqualFold(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEqualFold(FieldURL, v))
|
||||
}
|
||||
|
||||
// URLContainsFold applies the ContainsFold predicate on the "url" field.
|
||||
func URLContainsFold(v string) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldContainsFold(FieldURL, v))
|
||||
}
|
||||
|
||||
// IsActiveEQ applies the EQ predicate on the "is_active" field.
|
||||
func IsActiveEQ(v bool) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldEQ(FieldIsActive, v))
|
||||
}
|
||||
|
||||
// IsActiveNEQ applies the NEQ predicate on the "is_active" field.
|
||||
func IsActiveNEQ(v bool) predicate.Notifier {
|
||||
return predicate.Notifier(sql.FieldNEQ(FieldIsActive, v))
|
||||
}
|
||||
|
||||
// HasGroup applies the HasEdge predicate on the "group" edge.
|
||||
func HasGroup() predicate.Notifier {
|
||||
return predicate.Notifier(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
|
||||
func HasGroupWith(preds ...predicate.Group) predicate.Notifier {
|
||||
return predicate.Notifier(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(GroupInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
|
||||
)
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// HasUser applies the HasEdge predicate on the "user" edge.
|
||||
func HasUser() predicate.Notifier {
|
||||
return predicate.Notifier(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
sqlgraph.HasNeighbors(s, step)
|
||||
})
|
||||
}
|
||||
|
||||
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
|
||||
func HasUserWith(preds ...predicate.User) predicate.Notifier {
|
||||
return predicate.Notifier(func(s *sql.Selector) {
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(Table, FieldID),
|
||||
sqlgraph.To(UserInverseTable, FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
|
||||
)
|
||||
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
|
||||
for _, p := range preds {
|
||||
p(s)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// And groups predicates with the AND operator between them.
|
||||
func And(predicates ...predicate.Notifier) predicate.Notifier {
|
||||
return predicate.Notifier(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for _, p := range predicates {
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
}
|
||||
|
||||
// Or groups predicates with the OR operator between them.
|
||||
func Or(predicates ...predicate.Notifier) predicate.Notifier {
|
||||
return predicate.Notifier(func(s *sql.Selector) {
|
||||
s1 := s.Clone().SetP(nil)
|
||||
for i, p := range predicates {
|
||||
if i > 0 {
|
||||
s1.Or()
|
||||
}
|
||||
p(s1)
|
||||
}
|
||||
s.Where(s1.P())
|
||||
})
|
||||
}
|
||||
|
||||
// Not applies the not operator on the given predicate.
|
||||
func Not(p predicate.Notifier) predicate.Notifier {
|
||||
return predicate.Notifier(func(s *sql.Selector) {
|
||||
p(s.Not())
|
||||
})
|
||||
}
|
||||
378
backend/internal/data/ent/notifier_create.go
Normal file
378
backend/internal/data/ent/notifier_create.go
Normal file
@@ -0,0 +1,378 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
)
|
||||
|
||||
// NotifierCreate is the builder for creating a Notifier entity.
|
||||
type NotifierCreate struct {
|
||||
config
|
||||
mutation *NotifierMutation
|
||||
hooks []Hook
|
||||
}
|
||||
|
||||
// SetCreatedAt sets the "created_at" field.
|
||||
func (nc *NotifierCreate) SetCreatedAt(t time.Time) *NotifierCreate {
|
||||
nc.mutation.SetCreatedAt(t)
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetNillableCreatedAt sets the "created_at" field if the given value is not nil.
|
||||
func (nc *NotifierCreate) SetNillableCreatedAt(t *time.Time) *NotifierCreate {
|
||||
if t != nil {
|
||||
nc.SetCreatedAt(*t)
|
||||
}
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (nc *NotifierCreate) SetUpdatedAt(t time.Time) *NotifierCreate {
|
||||
nc.mutation.SetUpdatedAt(t)
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil.
|
||||
func (nc *NotifierCreate) SetNillableUpdatedAt(t *time.Time) *NotifierCreate {
|
||||
if t != nil {
|
||||
nc.SetUpdatedAt(*t)
|
||||
}
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group_id" field.
|
||||
func (nc *NotifierCreate) SetGroupID(u uuid.UUID) *NotifierCreate {
|
||||
nc.mutation.SetGroupID(u)
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetUserID sets the "user_id" field.
|
||||
func (nc *NotifierCreate) SetUserID(u uuid.UUID) *NotifierCreate {
|
||||
nc.mutation.SetUserID(u)
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (nc *NotifierCreate) SetName(s string) *NotifierCreate {
|
||||
nc.mutation.SetName(s)
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetURL sets the "url" field.
|
||||
func (nc *NotifierCreate) SetURL(s string) *NotifierCreate {
|
||||
nc.mutation.SetURL(s)
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetIsActive sets the "is_active" field.
|
||||
func (nc *NotifierCreate) SetIsActive(b bool) *NotifierCreate {
|
||||
nc.mutation.SetIsActive(b)
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetNillableIsActive sets the "is_active" field if the given value is not nil.
|
||||
func (nc *NotifierCreate) SetNillableIsActive(b *bool) *NotifierCreate {
|
||||
if b != nil {
|
||||
nc.SetIsActive(*b)
|
||||
}
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetID sets the "id" field.
|
||||
func (nc *NotifierCreate) SetID(u uuid.UUID) *NotifierCreate {
|
||||
nc.mutation.SetID(u)
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetNillableID sets the "id" field if the given value is not nil.
|
||||
func (nc *NotifierCreate) SetNillableID(u *uuid.UUID) *NotifierCreate {
|
||||
if u != nil {
|
||||
nc.SetID(*u)
|
||||
}
|
||||
return nc
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (nc *NotifierCreate) SetGroup(g *Group) *NotifierCreate {
|
||||
return nc.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (nc *NotifierCreate) SetUser(u *User) *NotifierCreate {
|
||||
return nc.SetUserID(u.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the NotifierMutation object of the builder.
|
||||
func (nc *NotifierCreate) Mutation() *NotifierMutation {
|
||||
return nc.mutation
|
||||
}
|
||||
|
||||
// Save creates the Notifier in the database.
|
||||
func (nc *NotifierCreate) Save(ctx context.Context) (*Notifier, error) {
|
||||
nc.defaults()
|
||||
return withHooks[*Notifier, NotifierMutation](ctx, nc.sqlSave, nc.mutation, nc.hooks)
|
||||
}
|
||||
|
||||
// SaveX calls Save and panics if Save returns an error.
|
||||
func (nc *NotifierCreate) SaveX(ctx context.Context) *Notifier {
|
||||
v, err := nc.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (nc *NotifierCreate) Exec(ctx context.Context) error {
|
||||
_, err := nc.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (nc *NotifierCreate) ExecX(ctx context.Context) {
|
||||
if err := nc.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (nc *NotifierCreate) defaults() {
|
||||
if _, ok := nc.mutation.CreatedAt(); !ok {
|
||||
v := notifier.DefaultCreatedAt()
|
||||
nc.mutation.SetCreatedAt(v)
|
||||
}
|
||||
if _, ok := nc.mutation.UpdatedAt(); !ok {
|
||||
v := notifier.DefaultUpdatedAt()
|
||||
nc.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
if _, ok := nc.mutation.IsActive(); !ok {
|
||||
v := notifier.DefaultIsActive
|
||||
nc.mutation.SetIsActive(v)
|
||||
}
|
||||
if _, ok := nc.mutation.ID(); !ok {
|
||||
v := notifier.DefaultID()
|
||||
nc.mutation.SetID(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (nc *NotifierCreate) check() error {
|
||||
if _, ok := nc.mutation.CreatedAt(); !ok {
|
||||
return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Notifier.created_at"`)}
|
||||
}
|
||||
if _, ok := nc.mutation.UpdatedAt(); !ok {
|
||||
return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Notifier.updated_at"`)}
|
||||
}
|
||||
if _, ok := nc.mutation.GroupID(); !ok {
|
||||
return &ValidationError{Name: "group_id", err: errors.New(`ent: missing required field "Notifier.group_id"`)}
|
||||
}
|
||||
if _, ok := nc.mutation.UserID(); !ok {
|
||||
return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "Notifier.user_id"`)}
|
||||
}
|
||||
if _, ok := nc.mutation.Name(); !ok {
|
||||
return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "Notifier.name"`)}
|
||||
}
|
||||
if v, ok := nc.mutation.Name(); ok {
|
||||
if err := notifier.NameValidator(v); err != nil {
|
||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Notifier.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := nc.mutation.URL(); !ok {
|
||||
return &ValidationError{Name: "url", err: errors.New(`ent: missing required field "Notifier.url"`)}
|
||||
}
|
||||
if v, ok := nc.mutation.URL(); ok {
|
||||
if err := notifier.URLValidator(v); err != nil {
|
||||
return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Notifier.url": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := nc.mutation.IsActive(); !ok {
|
||||
return &ValidationError{Name: "is_active", err: errors.New(`ent: missing required field "Notifier.is_active"`)}
|
||||
}
|
||||
if _, ok := nc.mutation.GroupID(); !ok {
|
||||
return &ValidationError{Name: "group", err: errors.New(`ent: missing required edge "Notifier.group"`)}
|
||||
}
|
||||
if _, ok := nc.mutation.UserID(); !ok {
|
||||
return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "Notifier.user"`)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *NotifierCreate) sqlSave(ctx context.Context) (*Notifier, error) {
|
||||
if err := nc.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_node, _spec := nc.createSpec()
|
||||
if err := sqlgraph.CreateNode(ctx, nc.driver, _spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if _spec.ID.Value != nil {
|
||||
if id, ok := _spec.ID.Value.(*uuid.UUID); ok {
|
||||
_node.ID = *id
|
||||
} else if err := _node.ID.Scan(_spec.ID.Value); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
nc.mutation.id = &_node.ID
|
||||
nc.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
|
||||
func (nc *NotifierCreate) createSpec() (*Notifier, *sqlgraph.CreateSpec) {
|
||||
var (
|
||||
_node = &Notifier{config: nc.config}
|
||||
_spec = sqlgraph.NewCreateSpec(notifier.Table, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
|
||||
)
|
||||
if id, ok := nc.mutation.ID(); ok {
|
||||
_node.ID = id
|
||||
_spec.ID.Value = &id
|
||||
}
|
||||
if value, ok := nc.mutation.CreatedAt(); ok {
|
||||
_spec.SetField(notifier.FieldCreatedAt, field.TypeTime, value)
|
||||
_node.CreatedAt = value
|
||||
}
|
||||
if value, ok := nc.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(notifier.FieldUpdatedAt, field.TypeTime, value)
|
||||
_node.UpdatedAt = value
|
||||
}
|
||||
if value, ok := nc.mutation.Name(); ok {
|
||||
_spec.SetField(notifier.FieldName, field.TypeString, value)
|
||||
_node.Name = value
|
||||
}
|
||||
if value, ok := nc.mutation.URL(); ok {
|
||||
_spec.SetField(notifier.FieldURL, field.TypeString, value)
|
||||
_node.URL = value
|
||||
}
|
||||
if value, ok := nc.mutation.IsActive(); ok {
|
||||
_spec.SetField(notifier.FieldIsActive, field.TypeBool, value)
|
||||
_node.IsActive = value
|
||||
}
|
||||
if nodes := nc.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.GroupTable,
|
||||
Columns: []string{notifier.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.GroupID = nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
if nodes := nc.mutation.UserIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.UserTable,
|
||||
Columns: []string{notifier.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_node.UserID = nodes[0]
|
||||
_spec.Edges = append(_spec.Edges, edge)
|
||||
}
|
||||
return _node, _spec
|
||||
}
|
||||
|
||||
// NotifierCreateBulk is the builder for creating many Notifier entities in bulk.
|
||||
type NotifierCreateBulk struct {
|
||||
config
|
||||
builders []*NotifierCreate
|
||||
}
|
||||
|
||||
// Save creates the Notifier entities in the database.
|
||||
func (ncb *NotifierCreateBulk) Save(ctx context.Context) ([]*Notifier, error) {
|
||||
specs := make([]*sqlgraph.CreateSpec, len(ncb.builders))
|
||||
nodes := make([]*Notifier, len(ncb.builders))
|
||||
mutators := make([]Mutator, len(ncb.builders))
|
||||
for i := range ncb.builders {
|
||||
func(i int, root context.Context) {
|
||||
builder := ncb.builders[i]
|
||||
builder.defaults()
|
||||
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
|
||||
mutation, ok := m.(*NotifierMutation)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected mutation type %T", m)
|
||||
}
|
||||
if err := builder.check(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
builder.mutation = mutation
|
||||
nodes[i], specs[i] = builder.createSpec()
|
||||
var err error
|
||||
if i < len(mutators)-1 {
|
||||
_, err = mutators[i+1].Mutate(root, ncb.builders[i+1].mutation)
|
||||
} else {
|
||||
spec := &sqlgraph.BatchCreateSpec{Nodes: specs}
|
||||
// Invoke the actual operation on the latest mutation in the chain.
|
||||
if err = sqlgraph.BatchCreate(ctx, ncb.driver, spec); err != nil {
|
||||
if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutation.id = &nodes[i].ID
|
||||
mutation.done = true
|
||||
return nodes[i], nil
|
||||
})
|
||||
for i := len(builder.hooks) - 1; i >= 0; i-- {
|
||||
mut = builder.hooks[i](mut)
|
||||
}
|
||||
mutators[i] = mut
|
||||
}(i, ctx)
|
||||
}
|
||||
if len(mutators) > 0 {
|
||||
if _, err := mutators[0].Mutate(ctx, ncb.builders[0].mutation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (ncb *NotifierCreateBulk) SaveX(ctx context.Context) []*Notifier {
|
||||
v, err := ncb.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (ncb *NotifierCreateBulk) Exec(ctx context.Context) error {
|
||||
_, err := ncb.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (ncb *NotifierCreateBulk) ExecX(ctx context.Context) {
|
||||
if err := ncb.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
88
backend/internal/data/ent/notifier_delete.go
Normal file
88
backend/internal/data/ent/notifier_delete.go
Normal file
@@ -0,0 +1,88 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
|
||||
)
|
||||
|
||||
// NotifierDelete is the builder for deleting a Notifier entity.
|
||||
type NotifierDelete struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *NotifierMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the NotifierDelete builder.
|
||||
func (nd *NotifierDelete) Where(ps ...predicate.Notifier) *NotifierDelete {
|
||||
nd.mutation.Where(ps...)
|
||||
return nd
|
||||
}
|
||||
|
||||
// Exec executes the deletion query and returns how many vertices were deleted.
|
||||
func (nd *NotifierDelete) Exec(ctx context.Context) (int, error) {
|
||||
return withHooks[int, NotifierMutation](ctx, nd.sqlExec, nd.mutation, nd.hooks)
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (nd *NotifierDelete) ExecX(ctx context.Context) int {
|
||||
n, err := nd.Exec(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (nd *NotifierDelete) sqlExec(ctx context.Context) (int, error) {
|
||||
_spec := sqlgraph.NewDeleteSpec(notifier.Table, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
|
||||
if ps := nd.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
affected, err := sqlgraph.DeleteNodes(ctx, nd.driver, _spec)
|
||||
if err != nil && sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
nd.mutation.done = true
|
||||
return affected, err
|
||||
}
|
||||
|
||||
// NotifierDeleteOne is the builder for deleting a single Notifier entity.
|
||||
type NotifierDeleteOne struct {
|
||||
nd *NotifierDelete
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the NotifierDelete builder.
|
||||
func (ndo *NotifierDeleteOne) Where(ps ...predicate.Notifier) *NotifierDeleteOne {
|
||||
ndo.nd.mutation.Where(ps...)
|
||||
return ndo
|
||||
}
|
||||
|
||||
// Exec executes the deletion query.
|
||||
func (ndo *NotifierDeleteOne) Exec(ctx context.Context) error {
|
||||
n, err := ndo.nd.Exec(ctx)
|
||||
switch {
|
||||
case err != nil:
|
||||
return err
|
||||
case n == 0:
|
||||
return &NotFoundError{notifier.Label}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (ndo *NotifierDeleteOne) ExecX(ctx context.Context) {
|
||||
if err := ndo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
675
backend/internal/data/ent/notifier_query.go
Normal file
675
backend/internal/data/ent/notifier_query.go
Normal file
@@ -0,0 +1,675 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
)
|
||||
|
||||
// NotifierQuery is the builder for querying Notifier entities.
|
||||
type NotifierQuery struct {
|
||||
config
|
||||
ctx *QueryContext
|
||||
order []OrderFunc
|
||||
inters []Interceptor
|
||||
predicates []predicate.Notifier
|
||||
withGroup *GroupQuery
|
||||
withUser *UserQuery
|
||||
// intermediate query (i.e. traversal path).
|
||||
sql *sql.Selector
|
||||
path func(context.Context) (*sql.Selector, error)
|
||||
}
|
||||
|
||||
// Where adds a new predicate for the NotifierQuery builder.
|
||||
func (nq *NotifierQuery) Where(ps ...predicate.Notifier) *NotifierQuery {
|
||||
nq.predicates = append(nq.predicates, ps...)
|
||||
return nq
|
||||
}
|
||||
|
||||
// Limit the number of records to be returned by this query.
|
||||
func (nq *NotifierQuery) Limit(limit int) *NotifierQuery {
|
||||
nq.ctx.Limit = &limit
|
||||
return nq
|
||||
}
|
||||
|
||||
// Offset to start from.
|
||||
func (nq *NotifierQuery) Offset(offset int) *NotifierQuery {
|
||||
nq.ctx.Offset = &offset
|
||||
return nq
|
||||
}
|
||||
|
||||
// Unique configures the query builder to filter duplicate records on query.
|
||||
// By default, unique is set to true, and can be disabled using this method.
|
||||
func (nq *NotifierQuery) Unique(unique bool) *NotifierQuery {
|
||||
nq.ctx.Unique = &unique
|
||||
return nq
|
||||
}
|
||||
|
||||
// Order specifies how the records should be ordered.
|
||||
func (nq *NotifierQuery) Order(o ...OrderFunc) *NotifierQuery {
|
||||
nq.order = append(nq.order, o...)
|
||||
return nq
|
||||
}
|
||||
|
||||
// QueryGroup chains the current query on the "group" edge.
|
||||
func (nq *NotifierQuery) QueryGroup() *GroupQuery {
|
||||
query := (&GroupClient{config: nq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := nq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := nq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(notifier.Table, notifier.FieldID, selector),
|
||||
sqlgraph.To(group.Table, group.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, notifier.GroupTable, notifier.GroupColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// QueryUser chains the current query on the "user" edge.
|
||||
func (nq *NotifierQuery) QueryUser() *UserQuery {
|
||||
query := (&UserClient{config: nq.config}).Query()
|
||||
query.path = func(ctx context.Context) (fromU *sql.Selector, err error) {
|
||||
if err := nq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
selector := nq.sqlQuery(ctx)
|
||||
if err := selector.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
step := sqlgraph.NewStep(
|
||||
sqlgraph.From(notifier.Table, notifier.FieldID, selector),
|
||||
sqlgraph.To(user.Table, user.FieldID),
|
||||
sqlgraph.Edge(sqlgraph.M2O, true, notifier.UserTable, notifier.UserColumn),
|
||||
)
|
||||
fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step)
|
||||
return fromU, nil
|
||||
}
|
||||
return query
|
||||
}
|
||||
|
||||
// First returns the first Notifier entity from the query.
|
||||
// Returns a *NotFoundError when no Notifier was found.
|
||||
func (nq *NotifierQuery) First(ctx context.Context) (*Notifier, error) {
|
||||
nodes, err := nq.Limit(1).All(setContextOp(ctx, nq.ctx, "First"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nil, &NotFoundError{notifier.Label}
|
||||
}
|
||||
return nodes[0], nil
|
||||
}
|
||||
|
||||
// FirstX is like First, but panics if an error occurs.
|
||||
func (nq *NotifierQuery) FirstX(ctx context.Context) *Notifier {
|
||||
node, err := nq.First(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// FirstID returns the first Notifier ID from the query.
|
||||
// Returns a *NotFoundError when no Notifier ID was found.
|
||||
func (nq *NotifierQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = nq.Limit(1).IDs(setContextOp(ctx, nq.ctx, "FirstID")); err != nil {
|
||||
return
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
err = &NotFoundError{notifier.Label}
|
||||
return
|
||||
}
|
||||
return ids[0], nil
|
||||
}
|
||||
|
||||
// FirstIDX is like FirstID, but panics if an error occurs.
|
||||
func (nq *NotifierQuery) FirstIDX(ctx context.Context) uuid.UUID {
|
||||
id, err := nq.FirstID(ctx)
|
||||
if err != nil && !IsNotFound(err) {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// Only returns a single Notifier entity found by the query, ensuring it only returns one.
|
||||
// Returns a *NotSingularError when more than one Notifier entity is found.
|
||||
// Returns a *NotFoundError when no Notifier entities are found.
|
||||
func (nq *NotifierQuery) Only(ctx context.Context) (*Notifier, error) {
|
||||
nodes, err := nq.Limit(2).All(setContextOp(ctx, nq.ctx, "Only"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch len(nodes) {
|
||||
case 1:
|
||||
return nodes[0], nil
|
||||
case 0:
|
||||
return nil, &NotFoundError{notifier.Label}
|
||||
default:
|
||||
return nil, &NotSingularError{notifier.Label}
|
||||
}
|
||||
}
|
||||
|
||||
// OnlyX is like Only, but panics if an error occurs.
|
||||
func (nq *NotifierQuery) OnlyX(ctx context.Context) *Notifier {
|
||||
node, err := nq.Only(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// OnlyID is like Only, but returns the only Notifier ID in the query.
|
||||
// Returns a *NotSingularError when more than one Notifier ID is found.
|
||||
// Returns a *NotFoundError when no entities are found.
|
||||
func (nq *NotifierQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) {
|
||||
var ids []uuid.UUID
|
||||
if ids, err = nq.Limit(2).IDs(setContextOp(ctx, nq.ctx, "OnlyID")); err != nil {
|
||||
return
|
||||
}
|
||||
switch len(ids) {
|
||||
case 1:
|
||||
id = ids[0]
|
||||
case 0:
|
||||
err = &NotFoundError{notifier.Label}
|
||||
default:
|
||||
err = &NotSingularError{notifier.Label}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// OnlyIDX is like OnlyID, but panics if an error occurs.
|
||||
func (nq *NotifierQuery) OnlyIDX(ctx context.Context) uuid.UUID {
|
||||
id, err := nq.OnlyID(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// All executes the query and returns a list of Notifiers.
|
||||
func (nq *NotifierQuery) All(ctx context.Context) ([]*Notifier, error) {
|
||||
ctx = setContextOp(ctx, nq.ctx, "All")
|
||||
if err := nq.prepareQuery(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qr := querierAll[[]*Notifier, *NotifierQuery]()
|
||||
return withInterceptors[[]*Notifier](ctx, nq, qr, nq.inters)
|
||||
}
|
||||
|
||||
// AllX is like All, but panics if an error occurs.
|
||||
func (nq *NotifierQuery) AllX(ctx context.Context) []*Notifier {
|
||||
nodes, err := nq.All(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// IDs executes the query and returns a list of Notifier IDs.
|
||||
func (nq *NotifierQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) {
|
||||
if nq.ctx.Unique == nil && nq.path != nil {
|
||||
nq.Unique(true)
|
||||
}
|
||||
ctx = setContextOp(ctx, nq.ctx, "IDs")
|
||||
if err = nq.Select(notifier.FieldID).Scan(ctx, &ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ids, nil
|
||||
}
|
||||
|
||||
// IDsX is like IDs, but panics if an error occurs.
|
||||
func (nq *NotifierQuery) IDsX(ctx context.Context) []uuid.UUID {
|
||||
ids, err := nq.IDs(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ids
|
||||
}
|
||||
|
||||
// Count returns the count of the given query.
|
||||
func (nq *NotifierQuery) Count(ctx context.Context) (int, error) {
|
||||
ctx = setContextOp(ctx, nq.ctx, "Count")
|
||||
if err := nq.prepareQuery(ctx); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return withInterceptors[int](ctx, nq, querierCount[*NotifierQuery](), nq.inters)
|
||||
}
|
||||
|
||||
// CountX is like Count, but panics if an error occurs.
|
||||
func (nq *NotifierQuery) CountX(ctx context.Context) int {
|
||||
count, err := nq.Count(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Exist returns true if the query has elements in the graph.
|
||||
func (nq *NotifierQuery) Exist(ctx context.Context) (bool, error) {
|
||||
ctx = setContextOp(ctx, nq.ctx, "Exist")
|
||||
switch _, err := nq.FirstID(ctx); {
|
||||
case IsNotFound(err):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("ent: check existence: %w", err)
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ExistX is like Exist, but panics if an error occurs.
|
||||
func (nq *NotifierQuery) ExistX(ctx context.Context) bool {
|
||||
exist, err := nq.Exist(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exist
|
||||
}
|
||||
|
||||
// Clone returns a duplicate of the NotifierQuery builder, including all associated steps. It can be
|
||||
// used to prepare common query builders and use them differently after the clone is made.
|
||||
func (nq *NotifierQuery) Clone() *NotifierQuery {
|
||||
if nq == nil {
|
||||
return nil
|
||||
}
|
||||
return &NotifierQuery{
|
||||
config: nq.config,
|
||||
ctx: nq.ctx.Clone(),
|
||||
order: append([]OrderFunc{}, nq.order...),
|
||||
inters: append([]Interceptor{}, nq.inters...),
|
||||
predicates: append([]predicate.Notifier{}, nq.predicates...),
|
||||
withGroup: nq.withGroup.Clone(),
|
||||
withUser: nq.withUser.Clone(),
|
||||
// clone intermediate query.
|
||||
sql: nq.sql.Clone(),
|
||||
path: nq.path,
|
||||
}
|
||||
}
|
||||
|
||||
// WithGroup tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "group" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (nq *NotifierQuery) WithGroup(opts ...func(*GroupQuery)) *NotifierQuery {
|
||||
query := (&GroupClient{config: nq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
nq.withGroup = query
|
||||
return nq
|
||||
}
|
||||
|
||||
// WithUser tells the query-builder to eager-load the nodes that are connected to
|
||||
// the "user" edge. The optional arguments are used to configure the query builder of the edge.
|
||||
func (nq *NotifierQuery) WithUser(opts ...func(*UserQuery)) *NotifierQuery {
|
||||
query := (&UserClient{config: nq.config}).Query()
|
||||
for _, opt := range opts {
|
||||
opt(query)
|
||||
}
|
||||
nq.withUser = query
|
||||
return nq
|
||||
}
|
||||
|
||||
// GroupBy is used to group vertices by one or more fields/columns.
|
||||
// It is often used with aggregate functions, like: count, max, mean, min, sum.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// Count int `json:"count,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.Notifier.Query().
|
||||
// GroupBy(notifier.FieldCreatedAt).
|
||||
// Aggregate(ent.Count()).
|
||||
// Scan(ctx, &v)
|
||||
func (nq *NotifierQuery) GroupBy(field string, fields ...string) *NotifierGroupBy {
|
||||
nq.ctx.Fields = append([]string{field}, fields...)
|
||||
grbuild := &NotifierGroupBy{build: nq}
|
||||
grbuild.flds = &nq.ctx.Fields
|
||||
grbuild.label = notifier.Label
|
||||
grbuild.scan = grbuild.Scan
|
||||
return grbuild
|
||||
}
|
||||
|
||||
// Select allows the selection one or more fields/columns for the given query,
|
||||
// instead of selecting all fields in the entity.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var v []struct {
|
||||
// CreatedAt time.Time `json:"created_at,omitempty"`
|
||||
// }
|
||||
//
|
||||
// client.Notifier.Query().
|
||||
// Select(notifier.FieldCreatedAt).
|
||||
// Scan(ctx, &v)
|
||||
func (nq *NotifierQuery) Select(fields ...string) *NotifierSelect {
|
||||
nq.ctx.Fields = append(nq.ctx.Fields, fields...)
|
||||
sbuild := &NotifierSelect{NotifierQuery: nq}
|
||||
sbuild.label = notifier.Label
|
||||
sbuild.flds, sbuild.scan = &nq.ctx.Fields, sbuild.Scan
|
||||
return sbuild
|
||||
}
|
||||
|
||||
// Aggregate returns a NotifierSelect configured with the given aggregations.
|
||||
func (nq *NotifierQuery) Aggregate(fns ...AggregateFunc) *NotifierSelect {
|
||||
return nq.Select().Aggregate(fns...)
|
||||
}
|
||||
|
||||
func (nq *NotifierQuery) prepareQuery(ctx context.Context) error {
|
||||
for _, inter := range nq.inters {
|
||||
if inter == nil {
|
||||
return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)")
|
||||
}
|
||||
if trv, ok := inter.(Traverser); ok {
|
||||
if err := trv.Traverse(ctx, nq); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, f := range nq.ctx.Fields {
|
||||
if !notifier.ValidColumn(f) {
|
||||
return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
}
|
||||
if nq.path != nil {
|
||||
prev, err := nq.path(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nq.sql = prev
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nq *NotifierQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Notifier, error) {
|
||||
var (
|
||||
nodes = []*Notifier{}
|
||||
_spec = nq.querySpec()
|
||||
loadedTypes = [2]bool{
|
||||
nq.withGroup != nil,
|
||||
nq.withUser != nil,
|
||||
}
|
||||
)
|
||||
_spec.ScanValues = func(columns []string) ([]any, error) {
|
||||
return (*Notifier).scanValues(nil, columns)
|
||||
}
|
||||
_spec.Assign = func(columns []string, values []any) error {
|
||||
node := &Notifier{config: nq.config}
|
||||
nodes = append(nodes, node)
|
||||
node.Edges.loadedTypes = loadedTypes
|
||||
return node.assignValues(columns, values)
|
||||
}
|
||||
for i := range hooks {
|
||||
hooks[i](ctx, _spec)
|
||||
}
|
||||
if err := sqlgraph.QueryNodes(ctx, nq.driver, _spec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(nodes) == 0 {
|
||||
return nodes, nil
|
||||
}
|
||||
if query := nq.withGroup; query != nil {
|
||||
if err := nq.loadGroup(ctx, query, nodes, nil,
|
||||
func(n *Notifier, e *Group) { n.Edges.Group = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if query := nq.withUser; query != nil {
|
||||
if err := nq.loadUser(ctx, query, nodes, nil,
|
||||
func(n *Notifier, e *User) { n.Edges.User = e }); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (nq *NotifierQuery) loadGroup(ctx context.Context, query *GroupQuery, nodes []*Notifier, init func(*Notifier), assign func(*Notifier, *Group)) error {
|
||||
ids := make([]uuid.UUID, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID][]*Notifier)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].GroupID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(group.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "group_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (nq *NotifierQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*Notifier, init func(*Notifier), assign func(*Notifier, *User)) error {
|
||||
ids := make([]uuid.UUID, 0, len(nodes))
|
||||
nodeids := make(map[uuid.UUID][]*Notifier)
|
||||
for i := range nodes {
|
||||
fk := nodes[i].UserID
|
||||
if _, ok := nodeids[fk]; !ok {
|
||||
ids = append(ids, fk)
|
||||
}
|
||||
nodeids[fk] = append(nodeids[fk], nodes[i])
|
||||
}
|
||||
if len(ids) == 0 {
|
||||
return nil
|
||||
}
|
||||
query.Where(user.IDIn(ids...))
|
||||
neighbors, err := query.All(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, n := range neighbors {
|
||||
nodes, ok := nodeids[n.ID]
|
||||
if !ok {
|
||||
return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID)
|
||||
}
|
||||
for i := range nodes {
|
||||
assign(nodes[i], n)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nq *NotifierQuery) sqlCount(ctx context.Context) (int, error) {
|
||||
_spec := nq.querySpec()
|
||||
_spec.Node.Columns = nq.ctx.Fields
|
||||
if len(nq.ctx.Fields) > 0 {
|
||||
_spec.Unique = nq.ctx.Unique != nil && *nq.ctx.Unique
|
||||
}
|
||||
return sqlgraph.CountNodes(ctx, nq.driver, _spec)
|
||||
}
|
||||
|
||||
func (nq *NotifierQuery) querySpec() *sqlgraph.QuerySpec {
|
||||
_spec := sqlgraph.NewQuerySpec(notifier.Table, notifier.Columns, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
|
||||
_spec.From = nq.sql
|
||||
if unique := nq.ctx.Unique; unique != nil {
|
||||
_spec.Unique = *unique
|
||||
} else if nq.path != nil {
|
||||
_spec.Unique = true
|
||||
}
|
||||
if fields := nq.ctx.Fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, notifier.FieldID)
|
||||
for i := range fields {
|
||||
if fields[i] != notifier.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, fields[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := nq.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if limit := nq.ctx.Limit; limit != nil {
|
||||
_spec.Limit = *limit
|
||||
}
|
||||
if offset := nq.ctx.Offset; offset != nil {
|
||||
_spec.Offset = *offset
|
||||
}
|
||||
if ps := nq.order; len(ps) > 0 {
|
||||
_spec.Order = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
return _spec
|
||||
}
|
||||
|
||||
func (nq *NotifierQuery) sqlQuery(ctx context.Context) *sql.Selector {
|
||||
builder := sql.Dialect(nq.driver.Dialect())
|
||||
t1 := builder.Table(notifier.Table)
|
||||
columns := nq.ctx.Fields
|
||||
if len(columns) == 0 {
|
||||
columns = notifier.Columns
|
||||
}
|
||||
selector := builder.Select(t1.Columns(columns...)...).From(t1)
|
||||
if nq.sql != nil {
|
||||
selector = nq.sql
|
||||
selector.Select(selector.Columns(columns...)...)
|
||||
}
|
||||
if nq.ctx.Unique != nil && *nq.ctx.Unique {
|
||||
selector.Distinct()
|
||||
}
|
||||
for _, p := range nq.predicates {
|
||||
p(selector)
|
||||
}
|
||||
for _, p := range nq.order {
|
||||
p(selector)
|
||||
}
|
||||
if offset := nq.ctx.Offset; offset != nil {
|
||||
// limit is mandatory for offset clause. We start
|
||||
// with default value, and override it below if needed.
|
||||
selector.Offset(*offset).Limit(math.MaxInt32)
|
||||
}
|
||||
if limit := nq.ctx.Limit; limit != nil {
|
||||
selector.Limit(*limit)
|
||||
}
|
||||
return selector
|
||||
}
|
||||
|
||||
// NotifierGroupBy is the group-by builder for Notifier entities.
|
||||
type NotifierGroupBy struct {
|
||||
selector
|
||||
build *NotifierQuery
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the group-by query.
|
||||
func (ngb *NotifierGroupBy) Aggregate(fns ...AggregateFunc) *NotifierGroupBy {
|
||||
ngb.fns = append(ngb.fns, fns...)
|
||||
return ngb
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ngb *NotifierGroupBy) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, ngb.build.ctx, "GroupBy")
|
||||
if err := ngb.build.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*NotifierQuery, *NotifierGroupBy](ctx, ngb.build, ngb, ngb.build.inters, v)
|
||||
}
|
||||
|
||||
func (ngb *NotifierGroupBy) sqlScan(ctx context.Context, root *NotifierQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx).Select()
|
||||
aggregation := make([]string, 0, len(ngb.fns))
|
||||
for _, fn := range ngb.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
if len(selector.SelectedColumns()) == 0 {
|
||||
columns := make([]string, 0, len(*ngb.flds)+len(ngb.fns))
|
||||
for _, f := range *ngb.flds {
|
||||
columns = append(columns, selector.C(f))
|
||||
}
|
||||
columns = append(columns, aggregation...)
|
||||
selector.Select(columns...)
|
||||
}
|
||||
selector.GroupBy(selector.Columns(*ngb.flds...)...)
|
||||
if err := selector.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := ngb.build.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
|
||||
// NotifierSelect is the builder for selecting fields of Notifier entities.
|
||||
type NotifierSelect struct {
|
||||
*NotifierQuery
|
||||
selector
|
||||
}
|
||||
|
||||
// Aggregate adds the given aggregation functions to the selector query.
|
||||
func (ns *NotifierSelect) Aggregate(fns ...AggregateFunc) *NotifierSelect {
|
||||
ns.fns = append(ns.fns, fns...)
|
||||
return ns
|
||||
}
|
||||
|
||||
// Scan applies the selector query and scans the result into the given value.
|
||||
func (ns *NotifierSelect) Scan(ctx context.Context, v any) error {
|
||||
ctx = setContextOp(ctx, ns.ctx, "Select")
|
||||
if err := ns.prepareQuery(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
return scanWithInterceptors[*NotifierQuery, *NotifierSelect](ctx, ns.NotifierQuery, ns, ns.inters, v)
|
||||
}
|
||||
|
||||
func (ns *NotifierSelect) sqlScan(ctx context.Context, root *NotifierQuery, v any) error {
|
||||
selector := root.sqlQuery(ctx)
|
||||
aggregation := make([]string, 0, len(ns.fns))
|
||||
for _, fn := range ns.fns {
|
||||
aggregation = append(aggregation, fn(selector))
|
||||
}
|
||||
switch n := len(*ns.selector.flds); {
|
||||
case n == 0 && len(aggregation) > 0:
|
||||
selector.Select(aggregation...)
|
||||
case n != 0 && len(aggregation) > 0:
|
||||
selector.AppendSelect(aggregation...)
|
||||
}
|
||||
rows := &sql.Rows{}
|
||||
query, args := selector.Query()
|
||||
if err := ns.driver.Query(ctx, query, args, rows); err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
return sql.ScanSlice(rows, v)
|
||||
}
|
||||
517
backend/internal/data/ent/notifier_update.go
Normal file
517
backend/internal/data/ent/notifier_update.go
Normal file
@@ -0,0 +1,517 @@
|
||||
// Code generated by ent, DO NOT EDIT.
|
||||
|
||||
package ent
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"entgo.io/ent/dialect/sql"
|
||||
"entgo.io/ent/dialect/sql/sqlgraph"
|
||||
"entgo.io/ent/schema/field"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/predicate"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
)
|
||||
|
||||
// NotifierUpdate is the builder for updating Notifier entities.
|
||||
type NotifierUpdate struct {
|
||||
config
|
||||
hooks []Hook
|
||||
mutation *NotifierMutation
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the NotifierUpdate builder.
|
||||
func (nu *NotifierUpdate) Where(ps ...predicate.Notifier) *NotifierUpdate {
|
||||
nu.mutation.Where(ps...)
|
||||
return nu
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (nu *NotifierUpdate) SetUpdatedAt(t time.Time) *NotifierUpdate {
|
||||
nu.mutation.SetUpdatedAt(t)
|
||||
return nu
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group_id" field.
|
||||
func (nu *NotifierUpdate) SetGroupID(u uuid.UUID) *NotifierUpdate {
|
||||
nu.mutation.SetGroupID(u)
|
||||
return nu
|
||||
}
|
||||
|
||||
// SetUserID sets the "user_id" field.
|
||||
func (nu *NotifierUpdate) SetUserID(u uuid.UUID) *NotifierUpdate {
|
||||
nu.mutation.SetUserID(u)
|
||||
return nu
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (nu *NotifierUpdate) SetName(s string) *NotifierUpdate {
|
||||
nu.mutation.SetName(s)
|
||||
return nu
|
||||
}
|
||||
|
||||
// SetURL sets the "url" field.
|
||||
func (nu *NotifierUpdate) SetURL(s string) *NotifierUpdate {
|
||||
nu.mutation.SetURL(s)
|
||||
return nu
|
||||
}
|
||||
|
||||
// SetIsActive sets the "is_active" field.
|
||||
func (nu *NotifierUpdate) SetIsActive(b bool) *NotifierUpdate {
|
||||
nu.mutation.SetIsActive(b)
|
||||
return nu
|
||||
}
|
||||
|
||||
// SetNillableIsActive sets the "is_active" field if the given value is not nil.
|
||||
func (nu *NotifierUpdate) SetNillableIsActive(b *bool) *NotifierUpdate {
|
||||
if b != nil {
|
||||
nu.SetIsActive(*b)
|
||||
}
|
||||
return nu
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (nu *NotifierUpdate) SetGroup(g *Group) *NotifierUpdate {
|
||||
return nu.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (nu *NotifierUpdate) SetUser(u *User) *NotifierUpdate {
|
||||
return nu.SetUserID(u.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the NotifierMutation object of the builder.
|
||||
func (nu *NotifierUpdate) Mutation() *NotifierMutation {
|
||||
return nu.mutation
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (nu *NotifierUpdate) ClearGroup() *NotifierUpdate {
|
||||
nu.mutation.ClearGroup()
|
||||
return nu
|
||||
}
|
||||
|
||||
// ClearUser clears the "user" edge to the User entity.
|
||||
func (nu *NotifierUpdate) ClearUser() *NotifierUpdate {
|
||||
nu.mutation.ClearUser()
|
||||
return nu
|
||||
}
|
||||
|
||||
// Save executes the query and returns the number of nodes affected by the update operation.
|
||||
func (nu *NotifierUpdate) Save(ctx context.Context) (int, error) {
|
||||
nu.defaults()
|
||||
return withHooks[int, NotifierMutation](ctx, nu.sqlSave, nu.mutation, nu.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (nu *NotifierUpdate) SaveX(ctx context.Context) int {
|
||||
affected, err := nu.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return affected
|
||||
}
|
||||
|
||||
// Exec executes the query.
|
||||
func (nu *NotifierUpdate) Exec(ctx context.Context) error {
|
||||
_, err := nu.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (nu *NotifierUpdate) ExecX(ctx context.Context) {
|
||||
if err := nu.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (nu *NotifierUpdate) defaults() {
|
||||
if _, ok := nu.mutation.UpdatedAt(); !ok {
|
||||
v := notifier.UpdateDefaultUpdatedAt()
|
||||
nu.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (nu *NotifierUpdate) check() error {
|
||||
if v, ok := nu.mutation.Name(); ok {
|
||||
if err := notifier.NameValidator(v); err != nil {
|
||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Notifier.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := nu.mutation.URL(); ok {
|
||||
if err := notifier.URLValidator(v); err != nil {
|
||||
return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Notifier.url": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := nu.mutation.GroupID(); nu.mutation.GroupCleared() && !ok {
|
||||
return errors.New(`ent: clearing a required unique edge "Notifier.group"`)
|
||||
}
|
||||
if _, ok := nu.mutation.UserID(); nu.mutation.UserCleared() && !ok {
|
||||
return errors.New(`ent: clearing a required unique edge "Notifier.user"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nu *NotifierUpdate) sqlSave(ctx context.Context) (n int, err error) {
|
||||
if err := nu.check(); err != nil {
|
||||
return n, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(notifier.Table, notifier.Columns, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
|
||||
if ps := nu.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := nu.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(notifier.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := nu.mutation.Name(); ok {
|
||||
_spec.SetField(notifier.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := nu.mutation.URL(); ok {
|
||||
_spec.SetField(notifier.FieldURL, field.TypeString, value)
|
||||
}
|
||||
if value, ok := nu.mutation.IsActive(); ok {
|
||||
_spec.SetField(notifier.FieldIsActive, field.TypeBool, value)
|
||||
}
|
||||
if nu.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.GroupTable,
|
||||
Columns: []string{notifier.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := nu.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.GroupTable,
|
||||
Columns: []string{notifier.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if nu.mutation.UserCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.UserTable,
|
||||
Columns: []string{notifier.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := nu.mutation.UserIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.UserTable,
|
||||
Columns: []string{notifier.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if n, err = sqlgraph.UpdateNodes(ctx, nu.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{notifier.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
nu.mutation.done = true
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// NotifierUpdateOne is the builder for updating a single Notifier entity.
|
||||
type NotifierUpdateOne struct {
|
||||
config
|
||||
fields []string
|
||||
hooks []Hook
|
||||
mutation *NotifierMutation
|
||||
}
|
||||
|
||||
// SetUpdatedAt sets the "updated_at" field.
|
||||
func (nuo *NotifierUpdateOne) SetUpdatedAt(t time.Time) *NotifierUpdateOne {
|
||||
nuo.mutation.SetUpdatedAt(t)
|
||||
return nuo
|
||||
}
|
||||
|
||||
// SetGroupID sets the "group_id" field.
|
||||
func (nuo *NotifierUpdateOne) SetGroupID(u uuid.UUID) *NotifierUpdateOne {
|
||||
nuo.mutation.SetGroupID(u)
|
||||
return nuo
|
||||
}
|
||||
|
||||
// SetUserID sets the "user_id" field.
|
||||
func (nuo *NotifierUpdateOne) SetUserID(u uuid.UUID) *NotifierUpdateOne {
|
||||
nuo.mutation.SetUserID(u)
|
||||
return nuo
|
||||
}
|
||||
|
||||
// SetName sets the "name" field.
|
||||
func (nuo *NotifierUpdateOne) SetName(s string) *NotifierUpdateOne {
|
||||
nuo.mutation.SetName(s)
|
||||
return nuo
|
||||
}
|
||||
|
||||
// SetURL sets the "url" field.
|
||||
func (nuo *NotifierUpdateOne) SetURL(s string) *NotifierUpdateOne {
|
||||
nuo.mutation.SetURL(s)
|
||||
return nuo
|
||||
}
|
||||
|
||||
// SetIsActive sets the "is_active" field.
|
||||
func (nuo *NotifierUpdateOne) SetIsActive(b bool) *NotifierUpdateOne {
|
||||
nuo.mutation.SetIsActive(b)
|
||||
return nuo
|
||||
}
|
||||
|
||||
// SetNillableIsActive sets the "is_active" field if the given value is not nil.
|
||||
func (nuo *NotifierUpdateOne) SetNillableIsActive(b *bool) *NotifierUpdateOne {
|
||||
if b != nil {
|
||||
nuo.SetIsActive(*b)
|
||||
}
|
||||
return nuo
|
||||
}
|
||||
|
||||
// SetGroup sets the "group" edge to the Group entity.
|
||||
func (nuo *NotifierUpdateOne) SetGroup(g *Group) *NotifierUpdateOne {
|
||||
return nuo.SetGroupID(g.ID)
|
||||
}
|
||||
|
||||
// SetUser sets the "user" edge to the User entity.
|
||||
func (nuo *NotifierUpdateOne) SetUser(u *User) *NotifierUpdateOne {
|
||||
return nuo.SetUserID(u.ID)
|
||||
}
|
||||
|
||||
// Mutation returns the NotifierMutation object of the builder.
|
||||
func (nuo *NotifierUpdateOne) Mutation() *NotifierMutation {
|
||||
return nuo.mutation
|
||||
}
|
||||
|
||||
// ClearGroup clears the "group" edge to the Group entity.
|
||||
func (nuo *NotifierUpdateOne) ClearGroup() *NotifierUpdateOne {
|
||||
nuo.mutation.ClearGroup()
|
||||
return nuo
|
||||
}
|
||||
|
||||
// ClearUser clears the "user" edge to the User entity.
|
||||
func (nuo *NotifierUpdateOne) ClearUser() *NotifierUpdateOne {
|
||||
nuo.mutation.ClearUser()
|
||||
return nuo
|
||||
}
|
||||
|
||||
// Where appends a list predicates to the NotifierUpdate builder.
|
||||
func (nuo *NotifierUpdateOne) Where(ps ...predicate.Notifier) *NotifierUpdateOne {
|
||||
nuo.mutation.Where(ps...)
|
||||
return nuo
|
||||
}
|
||||
|
||||
// Select allows selecting one or more fields (columns) of the returned entity.
|
||||
// The default is selecting all fields defined in the entity schema.
|
||||
func (nuo *NotifierUpdateOne) Select(field string, fields ...string) *NotifierUpdateOne {
|
||||
nuo.fields = append([]string{field}, fields...)
|
||||
return nuo
|
||||
}
|
||||
|
||||
// Save executes the query and returns the updated Notifier entity.
|
||||
func (nuo *NotifierUpdateOne) Save(ctx context.Context) (*Notifier, error) {
|
||||
nuo.defaults()
|
||||
return withHooks[*Notifier, NotifierMutation](ctx, nuo.sqlSave, nuo.mutation, nuo.hooks)
|
||||
}
|
||||
|
||||
// SaveX is like Save, but panics if an error occurs.
|
||||
func (nuo *NotifierUpdateOne) SaveX(ctx context.Context) *Notifier {
|
||||
node, err := nuo.Save(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// Exec executes the query on the entity.
|
||||
func (nuo *NotifierUpdateOne) Exec(ctx context.Context) error {
|
||||
_, err := nuo.Save(ctx)
|
||||
return err
|
||||
}
|
||||
|
||||
// ExecX is like Exec, but panics if an error occurs.
|
||||
func (nuo *NotifierUpdateOne) ExecX(ctx context.Context) {
|
||||
if err := nuo.Exec(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaults sets the default values of the builder before save.
|
||||
func (nuo *NotifierUpdateOne) defaults() {
|
||||
if _, ok := nuo.mutation.UpdatedAt(); !ok {
|
||||
v := notifier.UpdateDefaultUpdatedAt()
|
||||
nuo.mutation.SetUpdatedAt(v)
|
||||
}
|
||||
}
|
||||
|
||||
// check runs all checks and user-defined validators on the builder.
|
||||
func (nuo *NotifierUpdateOne) check() error {
|
||||
if v, ok := nuo.mutation.Name(); ok {
|
||||
if err := notifier.NameValidator(v); err != nil {
|
||||
return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "Notifier.name": %w`, err)}
|
||||
}
|
||||
}
|
||||
if v, ok := nuo.mutation.URL(); ok {
|
||||
if err := notifier.URLValidator(v); err != nil {
|
||||
return &ValidationError{Name: "url", err: fmt.Errorf(`ent: validator failed for field "Notifier.url": %w`, err)}
|
||||
}
|
||||
}
|
||||
if _, ok := nuo.mutation.GroupID(); nuo.mutation.GroupCleared() && !ok {
|
||||
return errors.New(`ent: clearing a required unique edge "Notifier.group"`)
|
||||
}
|
||||
if _, ok := nuo.mutation.UserID(); nuo.mutation.UserCleared() && !ok {
|
||||
return errors.New(`ent: clearing a required unique edge "Notifier.user"`)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nuo *NotifierUpdateOne) sqlSave(ctx context.Context) (_node *Notifier, err error) {
|
||||
if err := nuo.check(); err != nil {
|
||||
return _node, err
|
||||
}
|
||||
_spec := sqlgraph.NewUpdateSpec(notifier.Table, notifier.Columns, sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID))
|
||||
id, ok := nuo.mutation.ID()
|
||||
if !ok {
|
||||
return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Notifier.id" for update`)}
|
||||
}
|
||||
_spec.Node.ID.Value = id
|
||||
if fields := nuo.fields; len(fields) > 0 {
|
||||
_spec.Node.Columns = make([]string, 0, len(fields))
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, notifier.FieldID)
|
||||
for _, f := range fields {
|
||||
if !notifier.ValidColumn(f) {
|
||||
return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)}
|
||||
}
|
||||
if f != notifier.FieldID {
|
||||
_spec.Node.Columns = append(_spec.Node.Columns, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
if ps := nuo.mutation.predicates; len(ps) > 0 {
|
||||
_spec.Predicate = func(selector *sql.Selector) {
|
||||
for i := range ps {
|
||||
ps[i](selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
if value, ok := nuo.mutation.UpdatedAt(); ok {
|
||||
_spec.SetField(notifier.FieldUpdatedAt, field.TypeTime, value)
|
||||
}
|
||||
if value, ok := nuo.mutation.Name(); ok {
|
||||
_spec.SetField(notifier.FieldName, field.TypeString, value)
|
||||
}
|
||||
if value, ok := nuo.mutation.URL(); ok {
|
||||
_spec.SetField(notifier.FieldURL, field.TypeString, value)
|
||||
}
|
||||
if value, ok := nuo.mutation.IsActive(); ok {
|
||||
_spec.SetField(notifier.FieldIsActive, field.TypeBool, value)
|
||||
}
|
||||
if nuo.mutation.GroupCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.GroupTable,
|
||||
Columns: []string{notifier.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := nuo.mutation.GroupIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.GroupTable,
|
||||
Columns: []string{notifier.GroupColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
if nuo.mutation.UserCleared() {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.UserTable,
|
||||
Columns: []string{notifier.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
|
||||
}
|
||||
if nodes := nuo.mutation.UserIDs(); len(nodes) > 0 {
|
||||
edge := &sqlgraph.EdgeSpec{
|
||||
Rel: sqlgraph.M2O,
|
||||
Inverse: true,
|
||||
Table: notifier.UserTable,
|
||||
Columns: []string{notifier.UserColumn},
|
||||
Bidi: false,
|
||||
Target: &sqlgraph.EdgeTarget{
|
||||
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
|
||||
},
|
||||
}
|
||||
for _, k := range nodes {
|
||||
edge.Target.Nodes = append(edge.Target.Nodes, k)
|
||||
}
|
||||
_spec.Edges.Add = append(_spec.Edges.Add, edge)
|
||||
}
|
||||
_node = &Notifier{config: nuo.config}
|
||||
_spec.Assign = _node.assignValues
|
||||
_spec.ScanValues = _node.scanValues
|
||||
if err = sqlgraph.UpdateNode(ctx, nuo.driver, _spec); err != nil {
|
||||
if _, ok := err.(*sqlgraph.NotFoundError); ok {
|
||||
err = &NotFoundError{notifier.Label}
|
||||
} else if sqlgraph.IsConstraintError(err) {
|
||||
err = &ConstraintError{msg: err.Error(), wrap: err}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
nuo.mutation.done = true
|
||||
return _node, nil
|
||||
}
|
||||
@@ -39,5 +39,8 @@ type Location func(*sql.Selector)
|
||||
// MaintenanceEntry is the predicate function for maintenanceentry builders.
|
||||
type MaintenanceEntry func(*sql.Selector)
|
||||
|
||||
// Notifier is the predicate function for notifier builders.
|
||||
type Notifier func(*sql.Selector)
|
||||
|
||||
// User is the predicate function for user builders.
|
||||
type User func(*sql.Selector)
|
||||
|
||||
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/label"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/location"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/schema"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
|
||||
)
|
||||
@@ -476,6 +477,65 @@ func init() {
|
||||
maintenanceentryDescID := maintenanceentryMixinFields0[0].Descriptor()
|
||||
// maintenanceentry.DefaultID holds the default value on creation for the id field.
|
||||
maintenanceentry.DefaultID = maintenanceentryDescID.Default.(func() uuid.UUID)
|
||||
notifierMixin := schema.Notifier{}.Mixin()
|
||||
notifierMixinFields0 := notifierMixin[0].Fields()
|
||||
_ = notifierMixinFields0
|
||||
notifierFields := schema.Notifier{}.Fields()
|
||||
_ = notifierFields
|
||||
// notifierDescCreatedAt is the schema descriptor for created_at field.
|
||||
notifierDescCreatedAt := notifierMixinFields0[1].Descriptor()
|
||||
// notifier.DefaultCreatedAt holds the default value on creation for the created_at field.
|
||||
notifier.DefaultCreatedAt = notifierDescCreatedAt.Default.(func() time.Time)
|
||||
// notifierDescUpdatedAt is the schema descriptor for updated_at field.
|
||||
notifierDescUpdatedAt := notifierMixinFields0[2].Descriptor()
|
||||
// notifier.DefaultUpdatedAt holds the default value on creation for the updated_at field.
|
||||
notifier.DefaultUpdatedAt = notifierDescUpdatedAt.Default.(func() time.Time)
|
||||
// notifier.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field.
|
||||
notifier.UpdateDefaultUpdatedAt = notifierDescUpdatedAt.UpdateDefault.(func() time.Time)
|
||||
// notifierDescName is the schema descriptor for name field.
|
||||
notifierDescName := notifierFields[0].Descriptor()
|
||||
// notifier.NameValidator is a validator for the "name" field. It is called by the builders before save.
|
||||
notifier.NameValidator = func() func(string) error {
|
||||
validators := notifierDescName.Validators
|
||||
fns := [...]func(string) error{
|
||||
validators[0].(func(string) error),
|
||||
validators[1].(func(string) error),
|
||||
}
|
||||
return func(name string) error {
|
||||
for _, fn := range fns {
|
||||
if err := fn(name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
// notifierDescURL is the schema descriptor for url field.
|
||||
notifierDescURL := notifierFields[1].Descriptor()
|
||||
// notifier.URLValidator is a validator for the "url" field. It is called by the builders before save.
|
||||
notifier.URLValidator = func() func(string) error {
|
||||
validators := notifierDescURL.Validators
|
||||
fns := [...]func(string) error{
|
||||
validators[0].(func(string) error),
|
||||
validators[1].(func(string) error),
|
||||
}
|
||||
return func(url string) error {
|
||||
for _, fn := range fns {
|
||||
if err := fn(url); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}()
|
||||
// notifierDescIsActive is the schema descriptor for is_active field.
|
||||
notifierDescIsActive := notifierFields[2].Descriptor()
|
||||
// notifier.DefaultIsActive holds the default value on creation for the is_active field.
|
||||
notifier.DefaultIsActive = notifierDescIsActive.Default.(bool)
|
||||
// notifierDescID is the schema descriptor for id field.
|
||||
notifierDescID := notifierMixinFields0[0].Descriptor()
|
||||
// notifier.DefaultID holds the default value on creation for the id field.
|
||||
notifier.DefaultID = notifierDescID.Default.(func() uuid.UUID)
|
||||
userMixin := schema.User{}.Mixin()
|
||||
userMixinFields0 := userMixin[0].Fields()
|
||||
_ = userMixinFields0
|
||||
@@ -550,7 +610,7 @@ func init() {
|
||||
// user.DefaultIsSuperuser holds the default value on creation for the is_superuser field.
|
||||
user.DefaultIsSuperuser = userDescIsSuperuser.Default.(bool)
|
||||
// userDescSuperuser is the schema descriptor for superuser field.
|
||||
userDescSuperuser := userFields[5].Descriptor()
|
||||
userDescSuperuser := userFields[4].Descriptor()
|
||||
// user.DefaultSuperuser holds the default value on creation for the superuser field.
|
||||
user.DefaultSuperuser = userDescSuperuser.Default.(bool)
|
||||
// userDescID is the schema descriptor for id field.
|
||||
|
||||
@@ -5,6 +5,6 @@ package runtime
|
||||
// The schema-stitching logic is generated in github.com/hay-kot/homebox/backend/internal/data/ent/runtime.go
|
||||
|
||||
const (
|
||||
Version = "v0.11.8" // Version of ent codegen.
|
||||
Sum = "h1:M/M0QL1CYCUSdqGRXUrXhFYSDRJPsOOrr+RLEej/gyQ=" // Sum of ent codegen.
|
||||
Version = "v0.11.10" // Version of ent codegen.
|
||||
Sum = "h1:iqn32ybY5HRW3xSAyMNdNKpZhKgMf1Zunsej9yPKUI8=" // Sum of ent codegen.
|
||||
)
|
||||
|
||||
@@ -16,6 +16,7 @@ type Document struct {
|
||||
func (Document) Mixin() []ent.Mixin {
|
||||
return []ent.Mixin{
|
||||
mixins.BaseMixin{},
|
||||
GroupMixin{ref: "documents"},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,10 +35,6 @@ func (Document) Fields() []ent.Field {
|
||||
// Edges of the Document.
|
||||
func (Document) Edges() []ent.Edge {
|
||||
return []ent.Edge{
|
||||
edge.From("group", Group.Type).
|
||||
Ref("documents").
|
||||
Required().
|
||||
Unique(),
|
||||
edge.To("attachments", Attachment.Type).
|
||||
Annotations(entsql.Annotation{
|
||||
OnDelete: entsql.Cascade,
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"entgo.io/ent/dialect/entsql"
|
||||
"entgo.io/ent/schema/edge"
|
||||
"entgo.io/ent/schema/field"
|
||||
"entgo.io/ent/schema/mixin"
|
||||
"github.com/google/uuid"
|
||||
"github.com/hay-kot/homebox/backend/internal/data/ent/schema/mixins"
|
||||
)
|
||||
|
||||
@@ -27,36 +29,59 @@ func (Group) Fields() []ent.Field {
|
||||
NotEmpty(),
|
||||
field.Enum("currency").
|
||||
Default("usd").
|
||||
Values("usd", "eur", "gbp", "jpy", "zar", "aud", "nok", "sek", "dkk", "inr", "rmb", "bgn", "chf", "pln", "try", "ron"),
|
||||
Values("usd", "eur", "gbp", "jpy", "zar", "aud", "nok", "nzd", "sek", "dkk", "inr", "rmb", "bgn", "chf", "pln", "try", "ron", "czk"),
|
||||
}
|
||||
}
|
||||
|
||||
// Edges of the Home.
|
||||
func (Group) Edges() []ent.Edge {
|
||||
owned := func(name string, t any) ent.Edge {
|
||||
return edge.To(name, t).
|
||||
Annotations(entsql.Annotation{
|
||||
OnDelete: entsql.Cascade,
|
||||
})
|
||||
}
|
||||
|
||||
return []ent.Edge{
|
||||
edge.To("users", User.Type).
|
||||
Annotations(entsql.Annotation{
|
||||
OnDelete: entsql.Cascade,
|
||||
}),
|
||||
edge.To("locations", Location.Type).
|
||||
Annotations(entsql.Annotation{
|
||||
OnDelete: entsql.Cascade,
|
||||
}),
|
||||
edge.To("items", Item.Type).
|
||||
Annotations(entsql.Annotation{
|
||||
OnDelete: entsql.Cascade,
|
||||
}),
|
||||
edge.To("labels", Label.Type).
|
||||
Annotations(entsql.Annotation{
|
||||
OnDelete: entsql.Cascade,
|
||||
}),
|
||||
edge.To("documents", Document.Type).
|
||||
Annotations(entsql.Annotation{
|
||||
OnDelete: entsql.Cascade,
|
||||
}),
|
||||
edge.To("invitation_tokens", GroupInvitationToken.Type).
|
||||
Annotations(entsql.Annotation{
|
||||
OnDelete: entsql.Cascade,
|
||||
}),
|
||||
owned("users", User.Type),
|
||||
owned("locations", Location.Type),
|
||||
owned("items", Item.Type),
|
||||
owned("labels", Label.Type),
|
||||
owned("documents", Document.Type),
|
||||
owned("invitation_tokens", GroupInvitationToken.Type),
|
||||
owned("notifiers", Notifier.Type),
|
||||
// $scaffold_edge
|
||||
}
|
||||
}
|
||||
|
||||
// GroupMixin when embedded in an ent.Schema, adds a reference to
|
||||
// the Group entity.
|
||||
type GroupMixin struct {
|
||||
ref string
|
||||
field string
|
||||
mixin.Schema
|
||||
}
|
||||
|
||||
func (g GroupMixin) Fields() []ent.Field {
|
||||
if g.field != "" {
|
||||
return []ent.Field{
|
||||
field.UUID(g.field, uuid.UUID{}),
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (g GroupMixin) Edges() []ent.Edge {
|
||||
edge := edge.From("group", Group.Type).
|
||||
Ref(g.ref).
|
||||
Unique().
|
||||
Required()
|
||||
|
||||
if g.field != "" {
|
||||
edge = edge.Field(g.field)
|
||||
}
|
||||
|
||||
return []ent.Edge{edge}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user