Compare commits

..

157 Commits

Author SHA1 Message Date
Hayden
8cc0f30291 fix: allow zeroing out asset ids (#624) 2023-11-15 18:46:47 -09:00
Hayden
afbc6a49ac fix: images in child items (#623)
* support parentID search

* fetch images for item children
2023-11-15 18:41:24 -09:00
renovate[bot]
2594d4cdb4 chore(deps): update dependency @types/markdown-it to v13 (#577)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-11-15 18:18:19 -09:00
renovate[bot]
2eafa8e72f chore(deps): update dependency @vite-pwa/nuxt to ^0.2.0 (#616)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-11-15 18:18:07 -09:00
renovate[bot]
e65d44fa9e fix(deps): update dependency @pinia/nuxt to ^0.5.0 (#620)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-11-15 18:17:57 -09:00
Hayden
eeae790fe4 feat: expose timeout variables (#622)
* expose timeout variables

* formatting
2023-11-15 18:17:43 -09:00
renovate[bot]
a70ee33759 chore(deps): update actions/setup-node action to v4 (#621)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-11-15 18:15:31 -09:00
Hayden
80e2071300 fix: use theme aware background color (#619) 2023-11-15 18:01:03 -09:00
renovate[bot]
db27d34b4f chore(deps): update pnpm/action-setup action to v2.4.0 (#511)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-11-15 17:58:30 -09:00
Owen Valentine
da22074ed3 feat : Validate bark, ntfy, generic+ shortcut (#591) 2023-11-15 17:53:06 -09:00
Hayden
51ba15f84c fix: ensure urls isn't encoded (#618) 2023-11-15 17:51:19 -09:00
Hayden
b408318acb fix: filepath sep on windows (#615) 2023-11-15 17:43:44 -09:00
Hayden
c0e8e34065 chore: bump all go dependencies (#614)
* bump all

* code-generation
2023-11-15 17:30:49 -09:00
Hayden
4738a9b131 chore: rewrite generator to resolve strange name generation (#612)
* rewrite generator to resolve strange name generation

* fix asset id types

* ignore errored types

* remove depreciated calls

* use more random words

* random user
2023-11-15 17:19:51 -09:00
renovate[bot]
b08e52104c chore(deps): update actions/checkout action to v4 (#575)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-11-15 08:08:46 -09:00
renovate[bot]
3e2ab29054 chore(deps): update docker/setup-buildx-action action to v3 (#579)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-11-15 08:08:37 -09:00
renovate[bot]
e5f66d99bc chore(deps): update dependency mkdocs-material to v9.4.8 (#592)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-11-15 08:08:26 -09:00
Hayden
0995478cc0 fix: restore location section (#587) 2023-10-15 06:37:47 -08:00
Hayden
ae4b95301f fix: infinite redirect issue (#583) 2023-10-10 05:43:44 -08:00
Hayden
d8482f3a13 Revert "chore(deps): update dependency nuxt to v3.7.4 (#554)" (#580)
This reverts commit 2cd3c15215.
2023-10-06 20:29:27 -08:00
Hayden
1365bdfd46 refactor: rewrite to cookie based auth (#578)
* rewrite to cookie based auth

* remove interceptor
2023-10-06 19:44:43 -08:00
renovate[bot]
2cd3c15215 chore(deps): update dependency nuxt to v3.7.4 (#554)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-06 18:51:54 -08:00
renovate[bot]
0dc4fa5d98 fix(deps): update module modernc.org/sqlite to v1.26.0 (#574)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-06 18:51:44 -08:00
Hayden
318b8be192 feat: primary images (#576)
* add support for primary images

* fix locked loading state issue

* add action to auto-set images
2023-10-06 18:51:08 -08:00
Hayden
63a966c526 fix: field values request fails (#573) 2023-10-06 14:10:44 -08:00
Hayden
db16d3fb23 feat: make selectables clearable (#572) 2023-10-06 13:32:49 -08:00
renovate[bot]
1952b9f1cb fix(deps): update module golang.org/x/crypto to v0.14.0 (#570)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-06 13:24:12 -08:00
renovate[bot]
2b31d46ab3 fix(deps): update module github.com/rs/zerolog to v1.31.0 (#569)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-06 13:24:00 -08:00
Hayden
f3f96723b2 fix: ensure loading in toggled (#571) 2023-10-06 13:22:16 -08:00
renovate[bot]
b28bb2c4a8 fix(deps): update module github.com/yeqown/go-qrcode/writer/standard to v1.2.2 (#567)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-06 13:06:00 -08:00
renovate[bot]
a33cf54a33 fix(deps): update module github.com/containrrr/shoutrrr to v0.8.0 (#555)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-06 13:05:53 -08:00
Hayden
f13bf2958d pr: fixed incorrect sum of the total items price (#568)
* Fixed incorrect sum of the total items price

https://github.com/hay-kot/homebox/issues/458

* fix eslint errors

---------

Co-authored-by: Adamko <33964772+cRaZy92@users.noreply.github.com>
2023-10-06 12:50:55 -08:00
renovate[bot]
a9712c48af chore(deps): update dependency mkdocs-material to v9.4.4 (#553)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-06 12:31:51 -08:00
renovate[bot]
e68b7cf500 fix(deps): update module github.com/go-playground/validator/v10 to v10.15.5 (#556)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-10-06 12:30:31 -08:00
renovate[bot]
744a5bbb47 fix(deps): update module github.com/swaggo/swag to v1.16.2 (#552)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-14 06:19:57 -08:00
renovate[bot]
fc5698410b fix(deps): update module github.com/google/uuid to v1.3.1 (#551)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-14 06:19:37 -08:00
renovate[bot]
455163d637 fix(deps): update module golang.org/x/crypto to v0.13.0 (#532)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-14 06:19:28 -08:00
Aaron von Awesome
fbc7e6e33a fix: minor typo (#546) 2023-09-14 06:13:42 -08:00
renovate[bot]
5739b2005a chore(deps): update dependency eslint-config-prettier to v9 (#533)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-14 06:11:08 -08:00
renovate[bot]
5f41960c0a fix(deps): update module modernc.org/sqlite to v1.25.0 (#531)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-14 06:10:46 -08:00
renovate[bot]
c89aa738cf fix(deps): update module github.com/rs/zerolog to v1.30.0 (#517)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-09-14 06:10:37 -08:00
Jonathan Gawrych
94fd9c314d fix: mobile "Create and Add Another" goes off screen (#540) 2023-08-31 09:07:45 -08:00
Hayden
0876deb1e9 fix websocket over secure connection (#542)
* fix https connection

* explicity dependency
2023-08-24 06:28:56 -08:00
tctlrd
5438898b49 feat: add currencies XAG and XAU (#535)
* Added currencies XAG and XAU to currency.ts

I added XAG and XAU for myself and others who prefer to measure value with something of substance.

Review the ISO 4217 standard to view a full list of official currency codes including the ones I have added.

https://www.iso.org/iso-4217-currency-codes.html
https://en.wikipedia.org/wiki/ISO_4217

Example:
https://www.xe.com/currencyconverter/convert/?Amount=100&From=XAG&To=USD

API for exchange rates:
https://openexchangerates.org/

* Added field values xag and xau to group.go

* Update group.go
2023-08-23 09:29:22 -08:00
Hayden
9fa17bec90 update lock file 2023-08-09 21:49:32 -05:00
Cheng Gu
b5987f2e8d feat: set cookies' expires attribute and fix remember me (#530) 2023-08-09 18:48:39 -08:00
Hayden
2cbcc8bb1d feat: WebSocket based implementation of server sent events for cache busting (#527)
* rough implementation of WS based event system for server side notifications of mutation

* fix test construction

* fix deadlock on event bus

* disable linter error

* add item mutation events

* remove old event bus code

* refactor event system to use composables

* refresh items table when new item is added

* fix create form errors

* cleanup unnecessary calls

* fix importer erorrs + limit fn calls on import
2023-08-02 13:00:57 -08:00
Hayden
cceec06148 specify h3 dependency 2023-08-02 09:05:07 -05:00
Hayden
2e2eed143d try node 18 2023-08-02 09:01:47 -05:00
renovate[bot]
272cc5a370 chore(deps): update dependency vitest to ^0.34.0 (#529)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-08-02 05:59:31 -08:00
Hayden
275e106d72 build nightly rootless 2023-08-02 08:47:53 -05:00
Hayden
3f0e65a2ad include rootless dockerfile 2023-08-02 08:45:22 -05:00
Hayden
22bbaae08f feat: add support for create + add more for all create modals and support k… (#526)
* add support for create + add more for all create modals and support keyboard bindings

* listen for esc to close modals
2023-07-31 09:53:26 -08:00
Hayden
8c7d91ea52 fix: prevent resetting dialog state on error (#524) 2023-07-31 08:22:08 -08:00
Hayden
5a219f6a9c feat: support cmd+s / ctrl+s and rework button display on edit (#523) 2023-07-31 06:57:42 -08:00
Hayden
895017b28e fix: label prop not being passed to password input (#522) 2023-07-31 06:08:35 -08:00
Hayden
02ce52dbe3 fix: assert/asserts (#521) 2023-07-31 06:05:37 -08:00
Hayden
c5ae6b17f9 feat: more currency support (#520)
* add multiple new currencies

* add multiple new currencies

* remove duplicate yen
2023-07-31 05:59:36 -08:00
renovate[bot]
371fc0a6af chore(deps): update dependency mkdocs-material to v9.1.21 (#512)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-29 09:54:55 -08:00
Hayden
016780920d ui: rework location/labels pages (#475)
* formatting

* slimdown locations page

* update location/labels

* fix dependency issues

* fix type generator

* cleanup unused variables
2023-07-27 13:21:28 -08:00
db8200
06eb6c1f91 fix 3 places where API URLs were not constructed by function route (#451)
* Fixed 3 places where API URLs were not constructed by function route(path, params).

* autofix

---------

Co-authored-by: Hayden <64056131+hay-kot@users.noreply.github.com>
2023-07-22 20:11:29 -08:00
renovate[bot]
27dad0e118 fix(deps): update module github.com/swaggo/http-swagger to v2 (#508)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-22 20:10:52 -08:00
renovate[bot]
dc9446516a fix(deps): update module github.com/swaggo/http-swagger to v2 (#506)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-22 20:09:43 -08:00
Hayden
a042496c71 chore: bump all go deps (#507)
* bump all deps

* run code-gen
2023-07-22 19:57:51 -08:00
renovate[bot]
feab9f4c46 chore(deps): update dependency @vite-pwa/nuxt to ^0.1.0 (#474)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-22 19:46:08 -08:00
renovate[bot]
fe5622d62a fix(deps): update module golang.org/x/crypto to v0.11.0 (#495)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-22 19:45:56 -08:00
renovate[bot]
e759f2817e chore(deps): update dependency nuxt to v3.6.5 (#503)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-22 19:45:35 -08:00
renovate[bot]
60cc5c2710 chore(deps): update dependency mkdocs-material to v9.1.19 (#497)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-22 19:44:19 -08:00
renovate[bot]
25ccd678c9 chore(deps): update typescript-eslint monorepo to v6 (#500)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-20 11:13:42 -08:00
Pranshu Agrawal
a77b4cbe71 update "h3" verison to 1.7.1 (#502) 2023-07-20 11:13:18 -08:00
renovate[bot]
aae32b0d74 fix(deps): update module github.com/go-chi/chi/v5 to v5.0.10 (#493)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-18 12:08:23 -08:00
renovate[bot]
a94b43a19e fix(deps): update module github.com/ardanlabs/conf/v3 to v3.1.6 (#492)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-18 12:08:05 -08:00
renovate[bot]
9a4c2df552 chore(deps): update dependency vitest to ^0.33.0 (#494)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-18 12:07:55 -08:00
renovate[bot]
d5b89a755e chore(deps): update actions/setup-go action to v4 (#496)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-18 12:07:45 -08:00
renovate[bot]
a9acf62d93 chore(deps): update dependency mkdocs-material to v9.1.18 (#491)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-16 17:51:13 -08:00
renovate[bot]
c896e198dd fix(deps): update module github.com/swaggo/http-swagger to v2 (#490)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-16 17:39:42 -08:00
Cappy-Bara
c538518b4b fix: add parseDependency to swag (#486) 2023-07-16 17:37:56 -08:00
renovate[bot]
f66d14eeea fix(deps): update module github.com/stretchr/testify to v1.8.4 (#468)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-16 17:34:51 -08:00
renovate[bot]
bc8feac83c chore(deps): update dependency nuxt to v3.6.3 (#350)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-16 17:34:23 -08:00
renovate[bot]
40a98bcf30 fix(deps): update module modernc.org/sqlite to v1.24.0 (#437)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-16 17:34:00 -08:00
renovate[bot]
045e91d9ac fix(deps): update module github.com/mattn/go-sqlite3 to v1.14.17 (#470)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-16 17:33:35 -08:00
renovate[bot]
a80ab0f3e9 fix(deps): update module github.com/go-playground/validator/v10 to v10.14.1 (#478)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-07-16 17:33:22 -08:00
renovate[bot]
e5d209d407 chore(deps): update dependency mkdocs-material to v9.1.15 (#467)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-06-02 13:57:09 -08:00
Hayden
ef1531e561 feat: easily increment quantity (#473)
* fix vue version issue

* new patch API endpoint

* doc-gen

* new API class method for patch operations

* add quantity patch UI support

* fix typegen errors

* fix ts errors
2023-06-02 13:56:40 -08:00
renovate[bot]
4dd036abb2 chore(deps): update dependency @vite-pwa/nuxt to ^0.0.9 (#453)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-28 10:59:57 -08:00
Hayden
81e909ccfb chore: tidy (#466) 2023-05-28 10:59:44 -08:00
Hayden
0cb9d2a8e4 bump nuxt + fix CookieRef (#465) 2023-05-28 09:51:23 -08:00
renovate[bot]
cb16c0e829 fix(deps): update module github.com/go-playground/validator/v10 to v10.14.0 (#462)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-23 10:43:42 -08:00
renovate[bot]
9e067ee230 fix(deps): update module github.com/stretchr/testify to v1.8.3 (#460)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-23 10:43:34 -08:00
renovate[bot]
8b53d40a2a chore(deps): update dependency mkdocs-material to v9.1.14 (#461)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-23 10:42:49 -08:00
renovate[bot]
4c0ad7a5d8 chore(deps): update dependency mkdocs-material to v9.1.13 (#457)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-16 18:24:19 -08:00
D M
66e25ba068 feat: Low-Privileged and Distroless Docker Image (#372)
* feat: use distroless image and non-root user

* fix: remove conflicts after merge

* chore: Commen the Dockerfile

* chore: Update documentation to reflect image changes

* Split docker build in latest and latest-rootless

One more job added to the publish Github Action, to build and push TAG-rootless
images.

* fix: add missing workflow

* feat: update documentation about double tags

* feat: update readme with double tags

---------

Co-authored-by: daniele <daniele@coolbyte.eu>
2023-05-13 10:38:57 -08:00
renovate[bot]
56c98e6e3a chore(deps): update dependency @faker-js/faker to v8 (#449)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-13 10:34:57 -08:00
renovate[bot]
01f305a98e fix(deps): update github.com/gocarina/gocsv digest to 7f30c79 (#448)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-13 10:34:45 -08:00
renovate[bot]
e14cdaccdd fix(deps): update module golang.org/x/crypto to v0.9.0 (#447)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-13 10:34:33 -08:00
renovate[bot]
4ece25b58d chore(deps): update dependency mkdocs-material to v9.1.12 (#444)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-13 10:34:23 -08:00
renovate[bot]
c1957bb927 chore(deps): update dependency vitest to ^0.31.0 (#442)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-06 10:00:40 -08:00
renovate[bot]
636ca155e5 fix(deps): update module github.com/go-playground/validator/v10 to v10.13.0 (#438)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-06 10:00:33 -08:00
renovate[bot]
17a5b43609 chore(deps): update dependency mkdocs-material to v9.1.9 (#440)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-05-02 17:09:15 -08:00
renovate[bot]
b2b3ccf923 chore(deps): update dependency mkdocs-material to v9.1.8 (#435)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-24 23:08:32 -08:00
renovate[bot]
2272c7eb6b fix(deps): update module modernc.org/sqlite to v1.22.0 (#427)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-24 18:30:53 -08:00
renovate[bot]
181c324dd4 chore(deps): update dependency mkdocs-material to v9.1.7 (#432)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-24 18:30:41 -08:00
renovate[bot]
89912b18d7 fix(deps): update dependency @vueuse/router to v10 (#418)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-21 22:54:09 -08:00
renovate[bot]
85f2af4bc3 fix(deps): update module github.com/swaggo/swag to v1.16.1 (#426)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-21 22:53:29 -08:00
Hayden
21ad5a32c1 fix: add generic shoutrrr prefix to validators(#422) 2023-04-16 11:21:33 -08:00
renovate[bot]
4b51a4ad9a fix(deps): update module ariga.io/atlas to v0.10.1 (#395)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-14 20:37:57 -08:00
renovate[bot]
dd7e634b69 fix(deps): update dependency @vueuse/nuxt to v10 (#415)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-14 20:37:00 -08:00
renovate[bot]
351ec64bbc fix(deps): update module github.com/rs/zerolog to v1.29.1 (#414)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-14 20:36:14 -08:00
renovate[bot]
3a758e012f chore(deps): update dependency vitest to ^0.30.0 (#403)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-14 20:35:55 -08:00
renovate[bot]
c16084d99f chore(deps): update dependency mkdocs-material to v9.1.6 (#401)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-14 20:35:42 -08:00
renovate[bot]
5591267124 fix(deps): update module golang.org/x/crypto to v0.8.0 (#400)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-14 20:35:27 -08:00
renovate[bot]
d46c16f01f fix(deps): update github.com/gocarina/gocsv digest to 6445c2b (#398)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-14 20:35:18 -08:00
zodac
18c22e8a68 Fixing minor typos (#368)
* Adding fullstops for consistency

* Fixing typos

* Fixing eslints
2023-04-14 20:34:40 -08:00
Hayden
64b3ac3e94 change safeserve -> httpkit (#405) 2023-04-09 10:39:43 -08:00
verybadsoldier
c36b9dcf5d update go version in devcontainer to 1.20 to match version in go.mod (#402) 2023-04-09 10:38:17 -08:00
Hayden
d3b6c93b63 use ref_name 2023-04-03 14:21:54 -08:00
Hayden
3b862e36c8 fix CI 2023-04-03 14:18:08 -08:00
Hayden
f3bb86d905 change publish workflow (#390) 2023-04-03 14:10:45 -08:00
Hayden
4dd925caf0 fix: other minor fixes (#388)
* remove overflow-hidden on when no collapsed

* fix recently added on homescreen

* fix delete account formatting

* add manufacturer to search

* move nav button to left
2023-04-01 22:01:21 -08:00
Hayden
ced5aef6d1 fix: export child relationships (#385)
* tidy

* ensure export contains locations path

* update pnpm lock file

* fix swagger stuff

* code gen

* fix linter issue

* fix reverse order bug in test
2023-04-01 15:10:27 -08:00
Hayden
6a853c07a0 fix: various minor bugs (#384)
* fix insufficiently large max height for cards

* fix listener for resetItemDateTimes

* support YYYY/MM/DD format for imports

* fix columns in docs

* use comma deliminator
2023-04-01 14:07:44 -08:00
renovate[bot]
f0b9a0fce4 chore(deps): update dependency mkdocs-material to v9.1.5 (#382)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-04-01 12:22:34 -08:00
renovate[bot]
00f09fec2f fix(deps): update module modernc.org/sqlite to v1.21.1 (#381)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-28 19:49:16 -08:00
renovate[bot]
6e1863b515 fix(deps): update module github.com/hay-kot/safeserve to v0.0.2 (#378)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-28 19:49:07 -08:00
renovate[bot]
dfe2084c74 fix(deps): update github.com/gocarina/gocsv digest to 9a18a84 (#375)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-28 19:48:59 -08:00
renovate[bot]
0825f055a7 fix(deps): update module github.com/swaggo/swag to v1.8.12 (#380)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-28 19:48:49 -08:00
Hayden
e1e04d49aa update git ignore 2023-03-25 11:20:38 -08:00
Hayden
9020587c9e set working directory 2023-03-25 11:17:07 -08:00
Hayden
bd0db1ea37 remove go tidy 2023-03-25 11:13:23 -08:00
Hayden
d2985ff72c go tidy 2023-03-25 11:12:48 -08:00
Hayden
8c57ff841e fix: redirect issues for authorized users (#374) 2023-03-25 11:07:22 -08:00
renovate[bot]
0264bfb8c1 chore(deps): update dependency mkdocs-material to v9.1.4 (#371)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-24 08:03:56 -08:00
Hayden
5dd6844536 feat: change shit to things (#369) 2023-03-23 19:10:19 -08:00
Hayden
0f8db862b4 feat: pwa support (#366)
* add PWA support

* fix broken URLs for query

* remove unused variable

* restore authURL
2023-03-23 10:27:12 -08:00
Hayden
be6b5c9c56 run frontend build before publish via goreleaser 2023-03-23 09:41:15 -08:00
Hayden
faed343eda fix: cookie-auth-issues (#365)
* fix session clearing on error

* use singleton context to manage user state

* implement remember-me functionality

* fix errors

* fix more errors
2023-03-22 21:52:25 -08:00
Hayden
ed1230e17d feat: goreleaser + remove cgo dependency (#363)
* wip: goreleaser

* update image path

* spelling

* set working dir

* change main.go

* remove unused field

* drop cgo requirement

* remove unused workflow step

* generate code

* drop cgo from docker file

* update publish workflow

* annotate as unfinished
2023-03-22 20:49:49 -08:00
zodac
2d768e2b9c feat Adding NZD currency (#360)
* Adding NZD as currency option

* Updating frontend

* Sorting alphabetically

* Fixing typo
2023-03-22 20:26:51 -08:00
Hayden
40e76bac0c feat: filter details for zero values (#364)
* filter details for zero values

* ensure exhaustive checks

* update event listener to only bind when collapsable
2023-03-22 19:05:13 -08:00
Hayden
840d220d4f feat: use notifiers on schedule (#362)
* fix potential memory leak with time.After

* add new background service to manage scheduled notifications

* update docs

* remove old js reference

* closes #278

* tidy
2023-03-21 11:32:48 -08:00
renovate[bot]
975e636fb6 fix(deps): update module github.com/swaggo/swag to v1.8.11 (#361)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-21 11:27:40 -08:00
renovate[bot]
d1076baf84 fix(deps): update module github.com/swaggo/http-swagger to v2 (#358)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-21 11:26:46 -08:00
renovate[bot]
40fcef4e9b chore(deps): update dependency typescript to v5 (#355)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-21 11:23:30 -08:00
Hayden
97fb94d231 fix: refactoring errors (#359)
* #352 - require one date to be set to save

* fix many type regressions
2023-03-20 21:48:22 -08:00
renovate[bot]
4a8ba6231d fix(deps): update module entgo.io/ent to v0.11.10 (#328)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-20 20:32:48 -08:00
Hayden
db80f8a159 chore: refactor api endpoints (#339)
* move typegen code

* update taskfile to fix code-gen caches and use 'dir' attribute

* enable dumping stack traces for errors

* log request start and stop

* set zerolog stack handler

* fix routes function

* refactor context adapters to use requests directly

* change some method signatures to support GID

* start requiring validation tags

* first pass on updating handlers to use adapters

* add errs package

* code gen

* tidy

* rework API to use external server package
2023-03-20 20:32:10 -08:00
renovate[bot]
184b494fc3 fix(deps): update module github.com/go-playground/validator/v10 to v10.12.0 (#357)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-20 18:43:56 -08:00
renovate[bot]
5a3fa23332 fix(deps): update module github.com/swaggo/http-swagger to v1.3.4 (#356)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-20 18:43:49 -08:00
renovate[bot]
ef0690d511 chore(deps): update actions/setup-go action to v4 (#351)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-20 18:43:33 -08:00
renovate[bot]
9e55c880f6 chore(deps): update dependency @types/dompurify to v3 (#346)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-20 18:43:04 -08:00
renovate[bot]
cb9b20e2d2 fix(deps): update module github.com/ardanlabs/conf/v3 to v3.1.5 (#341)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-20 18:42:49 -08:00
renovate[bot]
a79e780b4e chore(deps): update dependency mkdocs-material to v9.1.3 (#340)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-20 18:42:38 -08:00
renovate[bot]
90cbb9bfd1 fix(deps): update module ariga.io/atlas to v0.10.0 (#327)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-20 18:42:25 -08:00
Vitalij Dovhanyc
dc08dbbd7a feat: add czech currency (#323) 2023-03-20 18:42:11 -08:00
renovate[bot]
1f47d96e4c chore(deps): update dependency nuxt to v3.2.3 (#326)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-07 10:12:32 -09:00
Hayden
23b5892aef feat: Notifiers CRUD (#337)
* introduce scaffold for new models

* wip: shoutrrr wrapper (may remove)

* update schema files

* gen: ent code

* gen: migrations

* go mod tidy

* add group_id to notifier

* db migration

* new mapper helpers

* notifier repo

* introduce experimental adapter pattern for hdlrs

* refactor adapters to fit more common use cases

* new routes for notifiers

* update errors to fix validation panic

* go tidy

* reverse checkbox label display

* wip: notifiers UI

* use badges instead of text

* improve documentation

* add scaffold schema reference

* remove notifier service

* refactor schema folder

* support group edges via scaffold

* delete test file

* include link to API docs

* audit and update documentation + improve format

* refactor schema edges

* refactor

* add custom validator

* set validate + order fields by name

* fix failing tests
2023-03-06 21:18:58 -09:00
renovate[bot]
2665b666f1 fix(deps): update github.com/gocarina/gocsv digest to 70c27cb (#308)
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2023-03-06 10:09:17 -09:00
273 changed files with 11278 additions and 8420 deletions

View File

@@ -35,6 +35,6 @@
// Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
"remoteUser": "node",
"features": {
"golang": "1.19"
"golang": "1.20"
}
}

View File

@@ -22,3 +22,4 @@
**/secrets.dev.yaml
**/values.dev.yaml
README.md
!Dockerfile.rootless

View File

@@ -7,12 +7,12 @@ jobs:
Go:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: 1.19
go-version: "1.20"
- name: Install Task
uses: arduino/setup-task@v1

View File

@@ -9,11 +9,11 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: pnpm/action-setup@v2.2.4
- uses: pnpm/action-setup@v2.4.0
with:
version: 6.0.2
@@ -34,7 +34,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
with:
fetch-depth: 0
@@ -44,15 +44,15 @@ jobs:
repo-token: ${{ secrets.GITHUB_TOKEN }}
- name: Set up Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: 1.19
go-version: "1.20"
- uses: actions/setup-node@v3
- uses: actions/setup-node@v4
with:
node-version: 18
- uses: pnpm/action-setup@v2.2.4
- uses: pnpm/action-setup@v2.4.0
with:
version: 6.0.2

View File

@@ -20,11 +20,11 @@ jobs:
name: "Publish Homebox"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v3
uses: actions/setup-go@v4
with:
go-version: 1.19
go-version: "1.20"
- name: Set up QEMU
id: qemu
@@ -35,7 +35,7 @@ jobs:
- name: install buildx
id: buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
with:
install: true
@@ -44,7 +44,7 @@ jobs:
env:
CR_PAT: ${{ secrets.GH_TOKEN }}
- name: build nightly the image
- name: build nightly image
if: ${{ inputs.release == false }}
run: |
docker build --push --no-cache \
@@ -53,6 +53,16 @@ jobs:
--build-arg=BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
--platform=linux/amd64,linux/arm64,linux/arm/v7 .
- name: build nightly-rootless image
if: ${{ inputs.release == false }}
run: |
docker build --push --no-cache \
--tag=ghcr.io/hay-kot/homebox:${{ inputs.tag }}-rootless \
--build-arg=COMMIT=$(git rev-parse HEAD) \
--build-arg=BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
--file Dockerfile.rootless \
--platform=linux/amd64,linux/arm64,linux/arm/v7 .
- name: build release tagged the image
if: ${{ inputs.release == true }}
run: |
@@ -64,3 +74,16 @@ jobs:
--build-arg COMMIT=$(git rev-parse HEAD) \
--build-arg BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
--platform linux/amd64,linux/arm64,linux/arm/v7 .
- name: build release tagged the rootless image
if: ${{ inputs.release == true }}
run: |
docker build --push --no-cache \
--tag ghcr.io/hay-kot/homebox:nightly-rootless \
--tag ghcr.io/hay-kot/homebox:latest-rootless \
--tag ghcr.io/hay-kot/homebox:${{ inputs.tag }}-rootless \
--build-arg VERSION=${{ inputs.tag }} \
--build-arg COMMIT=$(git rev-parse HEAD) \
--build-arg BUILD_TIME=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \
--platform linux/amd64,linux/arm64,linux/arm/v7 \
--file Dockerfile.rootless .

View File

@@ -1,73 +1,29 @@
name: Build Nightly
name: Publish Dockers
on:
push:
branches:
- main
release:
types:
- published
env:
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
jobs:
backend-tests:
name: "Backend Server Tests"
uses: hay-kot/homebox/.github/workflows/partial-backend.yaml@main
frontend-tests:
name: "Frontend and End-to-End Tests"
uses: hay-kot/homebox/.github/workflows/partial-frontend.yaml@main
deploy:
name: "Deploy Nightly to Fly.io"
runs-on: ubuntu-latest
needs:
- backend-tests
- frontend-tests
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: superfly/flyctl-actions/setup-flyctl@master
- run: flyctl deploy --remote-only
publish-nightly:
name: "Publish Nightly"
if: github.event_name != 'release'
needs:
- backend-tests
- frontend-tests
uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
with:
tag: nightly
secrets:
GH_TOKEN: ${{ secrets.CR_PAT }}
publish-tag:
name: "Publish Tag"
if: github.event_name == 'release'
needs:
- backend-tests
- frontend-tests
uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
with:
release: true
tag: ${{ github.event.release.tag_name }}
secrets:
GH_TOKEN: ${{ secrets.CR_PAT }}
deploy-docs:
name: Deploy docs
needs:
- publish-tag
runs-on: ubuntu-latest
steps:
- name: Checkout main
uses: actions/checkout@v3
- name: Deploy docs
uses: mhausenblas/mkdocs-deploy-gh-pages@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CONFIG_FILE: docs/mkdocs.yml
EXTRA_PACKAGES: build-base

View File

@@ -12,4 +12,4 @@ jobs:
frontend-tests:
name: "Frontend and End-to-End Tests"
uses: ./.github/workflows/partial-frontend.yaml
uses: ./.github/workflows/partial-frontend.yaml

77
.github/workflows/tag.yaml vendored Normal file
View File

@@ -0,0 +1,77 @@
name: Publish Release
on:
push:
tags:
- v*
env:
FLY_API_TOKEN: ${{ secrets.FLY_API_TOKEN }}
jobs:
backend-tests:
name: "Backend Server Tests"
uses: hay-kot/homebox/.github/workflows/partial-backend.yaml@main
frontend-tests:
name: "Frontend and End-to-End Tests"
uses: hay-kot/homebox/.github/workflows/partial-frontend.yaml@main
goreleaser:
name: goreleaser
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v4
- uses: pnpm/action-setup@v2
with:
version: 7.30.1
- name: Build Frontend and Copy to Backend
working-directory: frontend
run: |
pnpm install --shamefully-hoist
pnpm run build
cp -r ./.output/public ../backend/app/api/static/
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v4
with:
workdir: "backend"
distribution: goreleaser
version: latest
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
publish-tag:
name: "Publish Tag"
uses: hay-kot/homebox/.github/workflows/partial-publish.yaml@main
with:
release: true
tag: ${{ github.ref_name }}
secrets:
GH_TOKEN: ${{ secrets.CR_PAT }}
deploy-docs:
name: Deploy docs
needs:
- publish-tag
- goreleaser
runs-on: ubuntu-latest
steps:
- name: Checkout main
uses: actions/checkout@v4
- name: Deploy docs
uses: mhausenblas/mkdocs-deploy-gh-pages@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
CONFIG_FILE: docs/mkdocs.yml
EXTRA_PACKAGES: build-base

7
.gitignore vendored
View File

@@ -48,4 +48,9 @@ dist
.pnpm-store
backend/app/api/app
backend/app/api/__debug_bin
backend/app/api/__debug_bin
dist/
# Nuxt Publish Dir
backend/app/api/static/public/*
!backend/app/api/static/public/.gitkeep

View File

@@ -48,4 +48,10 @@ start command `task: ui:dev`
1. The frontend is a Vue 3 app with Nuxt.js that uses Tailwind and DaisyUI for styling.
2. We're using Vitest for our automated testing. you can run these with `task ui:watch`.
3. Tests require the API server to be running and in some cases the first run will fail due to a race condition. If this happens just run the tests again and they should pass.
3. Tests require the API server to be running and in some cases the first run will fail due to a race condition. If this happens just run the tests again and they should pass.
## Publishing Release
Create a new tag in github with the version number vX.X.X. This will trigger a new release to be created.
Test -> Goreleaser -> Publish Release -> Trigger Docker Builds -> Deploy Docs + Fly.io Demo

View File

@@ -1,6 +1,6 @@
# Build Nuxt
FROM node:17-alpine as frontend-builder
FROM node:18-alpine as frontend-builder
WORKDIR /app
RUN npm install -g pnpm
COPY frontend/package.json frontend/pnpm-lock.yaml ./
@@ -22,7 +22,7 @@ COPY ./backend .
RUN go get -d -v ./...
RUN rm -rf ./app/api/public
COPY --from=frontend-builder /app/.output/public ./app/api/static/public
RUN CGO_ENABLED=1 GOOS=linux go build \
RUN CGO_ENABLED=0 GOOS=linux go build \
-ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \
-o /go/bin/api \
-v ./app/api/*.go

53
Dockerfile.rootless Normal file
View File

@@ -0,0 +1,53 @@
# Build Nuxt
FROM node:17-alpine as frontend-builder
WORKDIR /app
RUN npm install -g pnpm
COPY frontend/package.json frontend/pnpm-lock.yaml ./
RUN pnpm install --frozen-lockfile --shamefully-hoist
COPY frontend .
RUN pnpm build
# Build API
FROM golang:alpine AS builder
ARG BUILD_TIME
ARG COMMIT
ARG VERSION
RUN apk update && \
apk upgrade && \
apk add --update git build-base gcc g++
WORKDIR /go/src/app
COPY ./backend .
RUN go get -d -v ./...
RUN rm -rf ./app/api/public
COPY --from=frontend-builder /app/.output/public ./app/api/static/public
RUN CGO_ENABLED=0 GOOS=linux go build \
-ldflags "-s -w -X main.commit=$COMMIT -X main.buildTime=$BUILD_TIME -X main.version=$VERSION" \
-o /go/bin/api \
-v ./app/api/*.go && \
chmod +x /go/bin/api && \
# create a directory so that we can copy it in the next stage
mkdir /data
# Production Stage
FROM gcr.io/distroless/static
ENV HBOX_MODE=production
ENV HBOX_STORAGE_DATA=/data/
ENV HBOX_STORAGE_SQLITE_URL=/data/homebox.db?_fk=1
# Copy the binary and the (empty) /data dir and
# change the ownership to the low-privileged user
COPY --from=builder --chown=nonroot /go/bin/api /app
COPY --from=builder --chown=nonroot /data /data
LABEL Name=homebox Version=0.0.1
LABEL org.opencontainers.image.source="https://github.com/hay-kot/homebox"
EXPOSE 7745
VOLUME [ "/data" ]
# Drop root and run as low-privileged user
USER nonroot
ENTRYPOINT [ "/app" ]
CMD [ "/data/config.yml" ]

View File

@@ -16,10 +16,18 @@
[Configuration & Docker Compose](https://hay-kot.github.io/homebox/quick-start)
```bash
docker run --name=homebox \
--restart=always \
--publish=3100:7745 \
ghcr.io/hay-kot/homebox:latest
# If using the rootless image, ensure data
# folder has correct permissions
mkdir -p /path/to/data/folder
chown 65532:65532 -R /path/to/data/folder
docker run -d \
--name homebox \
--restart unless-stopped \
--publish 3100:7745 \
--env TZ=Europe/Bucharest \
--volume /path/to/data/folder/:/data \
ghcr.io/hay-kot/homebox:latest
# ghcr.io/hay-kot/homebox:latest-rootless
```
## Credits

View File

@@ -1,7 +1,7 @@
version: "3"
env:
HBOX_STORAGE_SQLITE_URL: .data/homebox.db?_fk=1
HBOX_STORAGE_SQLITE_URL: .data/homebox.db?_pragma=busy_timeout=1000&_pragma=journal_mode=WAL&_fk=1
HBOX_OPTIONS_ALLOW_REGISTRATION: true
UNSAFE_DISABLE_PASSWORD_PROJECTION: "yes_i_am_sure"
tasks:
@@ -12,62 +12,77 @@ tasks:
- cd backend && go mod tidy
- cd frontend && pnpm install --shamefully-hoist
generate:
desc: |
Generates collateral files from the backend project
including swagger docs and typescripts type for the frontend
deps:
- db:generate
swag:
desc: Generate swagger docs
dir: backend/app/api/static/
vars:
API: "../"
INTERNAL: "../../../internal"
PKGS: "../../../pkgs"
cmds:
- swag fmt --dir={{ .API }}
- swag init --dir={{ .API }},{{ .INTERNAL }}/core/services,{{ .INTERNAL }}/data/repo --parseDependency
sources:
- "./backend/app/api/**/*"
- "./backend/internal/data/**"
- "./backend/internal/core/services/**/*"
- "./backend/app/tools/typegen/main.go"
typescript-types:
desc: Generates typescript types from swagger definition
cmds:
- cd backend/app/api/static && swag fmt --dir=../
- cd backend/app/api/static && swag init --dir=../,../../../internal,../../../pkgs
- |
npx swagger-typescript-api \
--no-client \
--modular \
--path ./backend/app/api/static/docs/swagger.json \
--output ./frontend/lib/api/types
- go run ./scripts/process-types/*.go ./frontend/lib/api/types/data-contracts.ts
- cp ./backend/app/api/static/docs/swagger.json docs/docs/api/openapi-2.0.json
- go run ./backend/app/tools/typegen/main.go ./frontend/lib/api/types/data-contracts.ts
sources:
- "./backend/app/api/**/*"
- "./backend/internal/data/**"
- "./backend/internal/services/**/*"
- "./scripts/process-types.py"
generates:
- "./frontend/lib/api/types/data-contracts.ts"
- "./backend/internal/data/ent/schema"
- "./backend/app/api/static/docs/swagger.json"
- "./backend/app/api/static/docs/swagger.yaml"
- ./backend/app/tools/typegen/main.go
- ./backend/app/api/static/docs/swagger.json
generate:
deps:
- db:generate
cmds:
- task: swag
- task: typescript-types
- cp ./backend/app/api/static/docs/swagger.json docs/docs/api/openapi-2.0.json
go:run:
desc: Starts the backend api server (depends on generate task)
dir: backend
deps:
- generate
cmds:
- cd backend && go run ./app/api/ {{ .CLI_ARGS }}
- go run ./app/api/ {{ .CLI_ARGS }}
silent: false
go:test:
desc: Runs all go tests using gotestsum - supports passing gotestsum args
dir: backend
cmds:
- cd backend && gotestsum {{ .CLI_ARGS }} ./...
- gotestsum {{ .CLI_ARGS }} ./...
go:coverage:
desc: Runs all go tests with -race flag and generates a coverage report
dir: backend
cmds:
- cd backend && go test -race -coverprofile=coverage.out -covermode=atomic ./app/... ./internal/... ./pkgs/... -v -cover
- go test -race -coverprofile=coverage.out -covermode=atomic ./app/... ./internal/... ./pkgs/... -v -cover
silent: true
go:tidy:
desc: Runs go mod tidy on the backend
dir: backend
cmds:
- cd backend && go mod tidy
- go mod tidy
go:lint:
desc: Runs golangci-lint
dir: backend
cmds:
- cd backend && golangci-lint run ./...
- golangci-lint run ./...
go:all:
desc: Runs all go test and lint related tasks
@@ -78,19 +93,19 @@ tasks:
go:build:
desc: Builds the backend binary
dir: backend
cmds:
- cd backend && go build -o ../build/backend ./app/api
- go build -o ../build/backend ./app/api
db:generate:
desc: Run Entgo.io Code Generation
dir: backend/internal/
cmds:
- |
cd backend/internal/ && go generate ./... \
go generate ./... \
--template=./data/ent/schema/templates/has_id.tmpl
sources:
- "./backend/internal/data/ent/schema/**/*"
generates:
- "./backend/internal/ent/"
db:migration:
desc: Runs the database diff engine to generate a SQL migration files
@@ -101,23 +116,27 @@ tasks:
ui:watch:
desc: Starts the vitest test runner in watch mode
dir: frontend
cmds:
- cd frontend && pnpm run test:watch
- pnpm run test:watch
ui:dev:
desc: Run frontend development server
dir: frontend
cmds:
- cd frontend && pnpm dev
- pnpm dev
ui:fix:
desc: Runs prettier and eslint on the frontend
dir: frontend
cmds:
- cd frontend && pnpm run lint:fix
- pnpm run lint:fix
ui:check:
desc: Runs type checking
dir: frontend
cmds:
- cd frontend && pnpm run typecheck
- pnpm run typecheck
test:ci:
desc: Runs end-to-end test on a live server (only for use in CI)
@@ -135,4 +154,4 @@ tasks:
- task: go:all
- task: ui:check
- task: ui:fix
- task: test:ci
- task: test:ci

2
backend/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
dist/

54
backend/.goreleaser.yaml Normal file
View File

@@ -0,0 +1,54 @@
# This is an example .goreleaser.yml file with some sensible defaults.
# Make sure to check the documentation at https://goreleaser.com
before:
hooks:
# you may remove this if you don't need go generate
- go generate ./...
builds:
- main: ./app/api
env:
- CGO_ENABLED=0
goos:
- linux
- windows
- darwin
goarch:
- amd64
- "386"
- arm
- arm64
ignore:
- goos: windows
goarch: arm
- goos: windows
goarch: "386"
archives:
- format: tar.gz
# this name template makes the OS and Arch compatible with the results of uname.
name_template: >-
{{ .ProjectName }}_
{{- title .Os }}_
{{- if eq .Arch "amd64" }}x86_64
{{- else if eq .Arch "386" }}i386
{{- else }}{{ .Arch }}{{ end }}
{{- if .Arm }}v{{ .Arm }}{{ end }}
# use zip for windows archives
format_overrides:
- goos: windows
format: zip
checksum:
name_template: 'checksums.txt'
snapshot:
name_template: "{{ incpatch .Version }}-next"
changelog:
sort: asc
filters:
exclude:
- '^docs:'
- '^test:'
# The lines beneath this are called `modelines`. See `:help modeline`
# Feel free to remove those if you don't want/use them.
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj

BIN
backend/api Executable file

Binary file not shown.

View File

@@ -4,11 +4,12 @@ import (
"time"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/config"
"github.com/hay-kot/homebox/backend/pkgs/mailer"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/server"
)
type app struct {
@@ -18,6 +19,7 @@ type app struct {
server *server.Server
repos *repo.AllRepos
services *services.AllServices
bus *eventbus.EventBus
}
func new(conf *config.Config) *app {
@@ -37,8 +39,11 @@ func new(conf *config.Config) *app {
}
func (a *app) startBgTask(t time.Duration, fn func()) {
timer := time.NewTimer(t)
for {
timer.Reset(t)
a.server.Background(fn)
time.Sleep(t)
<-timer.C
}
}

View File

@@ -25,7 +25,7 @@ func (a *app) SetupDemo() {
}
// First check if we've already setup a demo user and skip if so
_, err := a.services.User.Login(context.Background(), registration.Email, registration.Password)
_, err := a.services.User.Login(context.Background(), registration.Email, registration.Password, false)
if err == nil {
return
}
@@ -36,7 +36,7 @@ func (a *app) SetupDemo() {
log.Fatal().Msg("Failed to setup demo")
}
token, _ := a.services.User.Login(context.Background(), registration.Email, registration.Password)
token, _ := a.services.User.Login(context.Background(), registration.Email, registration.Password, false)
self, _ := a.services.User.GetSelf(context.Background(), token.Raw)
_, err = a.services.Items.CsvImport(context.Background(), self.GroupID, strings.NewReader(csvText))

View File

@@ -1,13 +1,36 @@
package v1
import (
"fmt"
"net/http"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
"github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
"github.com/olahol/melody"
)
type Results[T any] struct {
Items []T `json:"items"`
}
func WrapResults[T any](items []T) Results[T] {
return Results[T]{Items: items}
}
type Wrapped struct {
Item interface{} `json:"item"`
}
func Wrap(v any) Wrapped {
return Wrapped{Item: v}
}
func WithMaxUploadSize(maxUploadSize int64) func(*V1Controller) {
return func(ctrl *V1Controller) {
ctrl.maxUploadSize = maxUploadSize
@@ -26,12 +49,20 @@ func WithRegistration(allowRegistration bool) func(*V1Controller) {
}
}
func WithSecureCookies(secure bool) func(*V1Controller) {
return func(ctrl *V1Controller) {
ctrl.cookieSecure = secure
}
}
type V1Controller struct {
cookieSecure bool
repo *repo.AllRepos
svc *services.AllServices
maxUploadSize int64
isDemo bool
allowRegistration bool
bus *eventbus.EventBus
}
type (
@@ -60,11 +91,12 @@ func BaseUrlFunc(prefix string) func(s string) string {
}
}
func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, options ...func(*V1Controller)) *V1Controller {
func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, bus *eventbus.EventBus, options ...func(*V1Controller)) *V1Controller {
ctrl := &V1Controller{
repo: repos,
svc: svc,
allowRegistration: true,
bus: bus,
}
for _, opt := range options {
@@ -81,15 +113,54 @@ func NewControllerV1(svc *services.AllServices, repos *repo.AllRepos, options ..
// @Produce json
// @Success 200 {object} ApiSummary
// @Router /v1/status [GET]
func (ctrl *V1Controller) HandleBase(ready ReadyFunc, build Build) server.HandlerFunc {
func (ctrl *V1Controller) HandleBase(ready ReadyFunc, build Build) errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
return server.Respond(w, http.StatusOK, ApiSummary{
return server.JSON(w, http.StatusOK, ApiSummary{
Healthy: ready(),
Title: "Homebox",
Message: "Track, Manage, and Organize your shit",
Message: "Track, Manage, and Organize your Things",
Build: build,
Demo: ctrl.isDemo,
AllowRegistration: ctrl.allowRegistration,
})
}
}
func (ctrl *V1Controller) HandleCacheWS() errchain.HandlerFunc {
m := melody.New()
m.HandleConnect(func(s *melody.Session) {
auth := services.NewContext(s.Request.Context())
s.Set("gid", auth.GID)
})
factory := func(e string) func(data any) {
return func(data any) {
eventData, ok := data.(eventbus.GroupMutationEvent)
if !ok {
log.Log().Msgf("invalid event data: %v", data)
return
}
jsonStr := fmt.Sprintf(`{"event": "%s"}`, e)
_ = m.BroadcastFilter([]byte(jsonStr), func(s *melody.Session) bool {
groupIDStr, ok := s.Get("gid")
if !ok {
return false
}
GID := groupIDStr.(uuid.UUID)
return GID == eventData.GID
})
}
}
ctrl.bus.Subscribe(eventbus.EventLabelMutation, factory("label.mutation"))
ctrl.bus.Subscribe(eventbus.EventLocationMutation, factory("location.mutation"))
ctrl.bus.Subscribe(eventbus.EventItemMutation, factory("item.mutation"))
return func(w http.ResponseWriter, r *http.Request) error {
return m.HandleRequest(w, r)
}
}

View File

@@ -7,7 +7,8 @@ import (
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
"github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
@@ -15,7 +16,7 @@ type ActionAmountResult struct {
Completed int `json:"completed"`
}
func actionHandlerFactory(ref string, fn func(context.Context, uuid.UUID) (int, error)) server.HandlerFunc {
func actionHandlerFactory(ref string, fn func(context.Context, uuid.UUID) (int, error)) errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
@@ -25,7 +26,7 @@ func actionHandlerFactory(ref string, fn func(context.Context, uuid.UUID) (int,
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, ActionAmountResult{Completed: totalCompleted})
return server.JSON(w, http.StatusOK, ActionAmountResult{Completed: totalCompleted})
}
}
@@ -38,7 +39,7 @@ func actionHandlerFactory(ref string, fn func(context.Context, uuid.UUID) (int,
// @Success 200 {object} ActionAmountResult
// @Router /v1/actions/ensure-asset-ids [Post]
// @Security Bearer
func (ctrl *V1Controller) HandleEnsureAssetID() server.HandlerFunc {
func (ctrl *V1Controller) HandleEnsureAssetID() errchain.HandlerFunc {
return actionHandlerFactory("ensure asset IDs", ctrl.svc.Items.EnsureAssetID)
}
@@ -51,7 +52,7 @@ func (ctrl *V1Controller) HandleEnsureAssetID() server.HandlerFunc {
// @Success 200 {object} ActionAmountResult
// @Router /v1/actions/ensure-import-refs [Post]
// @Security Bearer
func (ctrl *V1Controller) HandleEnsureImportRefs() server.HandlerFunc {
func (ctrl *V1Controller) HandleEnsureImportRefs() errchain.HandlerFunc {
return actionHandlerFactory("ensure import refs", ctrl.svc.Items.EnsureImportRef)
}
@@ -64,6 +65,19 @@ func (ctrl *V1Controller) HandleEnsureImportRefs() server.HandlerFunc {
// @Success 200 {object} ActionAmountResult
// @Router /v1/actions/zero-item-time-fields [Post]
// @Security Bearer
func (ctrl *V1Controller) HandleItemDateZeroOut() server.HandlerFunc {
func (ctrl *V1Controller) HandleItemDateZeroOut() errchain.HandlerFunc {
return actionHandlerFactory("zero out date time", ctrl.repo.Items.ZeroOutTimeFields)
}
// HandleSetPrimaryPhotos godoc
//
// @Summary Set Primary Photos
// @Description Sets the first photo of each item as the primary photo
// @Tags Actions
// @Produce json
// @Success 200 {object} ActionAmountResult
// @Router /v1/actions/set-primary-photos [Post]
// @Security Bearer
func (ctrl *V1Controller) HandleSetPrimaryPhotos() errchain.HandlerFunc {
return actionHandlerFactory("ensure asset IDs", ctrl.repo.Items.SetPrimaryPhotos)
}

View File

@@ -9,7 +9,8 @@ import (
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
"github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
@@ -23,7 +24,7 @@ import (
// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
// @Router /v1/assets/{id} [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc {
func (ctrl *V1Controller) HandleAssetGet() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
assetIdParam := chi.URLParam(r, "id")
@@ -38,7 +39,7 @@ func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc {
if pageParam != "" {
page, err = strconv.ParseInt(pageParam, 10, 64)
if err != nil {
return server.Respond(w, http.StatusBadRequest, "Invalid page number")
return server.JSON(w, http.StatusBadRequest, "Invalid page number")
}
}
@@ -47,7 +48,7 @@ func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc {
if pageSizeParam != "" {
pageSize, err = strconv.ParseInt(pageSizeParam, 10, 64)
if err != nil {
return server.Respond(w, http.StatusBadRequest, "Invalid page size")
return server.JSON(w, http.StatusBadRequest, "Invalid page size")
}
}
@@ -56,6 +57,6 @@ func (ctrl *V1Controller) HandleAssetGet() server.HandlerFunc {
log.Err(err).Msg("failed to get item")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, items)
return server.JSON(w, http.StatusOK, items)
}
}

View File

@@ -3,15 +3,23 @@ package v1
import (
"errors"
"net/http"
"strconv"
"strings"
"time"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
"github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
const (
cookieNameToken = "hb.auth.token"
cookieNameRemember = "hb.auth.remember"
cookieNameSession = "hb.auth.session"
)
type (
TokenResponse struct {
Token string `json:"token"`
@@ -20,11 +28,36 @@ type (
}
LoginForm struct {
Username string `json:"username"`
Password string `json:"password"`
Username string `json:"username"`
Password string `json:"password"`
StayLoggedIn bool `json:"stayLoggedIn"`
}
)
type CookieContents struct {
Token string
ExpiresAt time.Time
Remember bool
}
func GetCookies(r *http.Request) (*CookieContents, error) {
cookie, err := r.Cookie(cookieNameToken)
if err != nil {
return nil, errors.New("authorization cookie is required")
}
rememberCookie, err := r.Cookie(cookieNameRemember)
if err != nil {
return nil, errors.New("remember cookie is required")
}
return &CookieContents{
Token: cookie.Value,
ExpiresAt: cookie.Expires,
Remember: rememberCookie.Value == "true",
}, nil
}
// HandleAuthLogin godoc
//
// @Summary User Login
@@ -33,29 +66,32 @@ type (
// @Accept application/json
// @Param username formData string false "string" example(admin@admin.com)
// @Param password formData string false "string" example(admin)
// @Param payload body LoginForm true "Login Data"
// @Produce json
// @Success 200 {object} TokenResponse
// @Router /v1/users/login [POST]
func (ctrl *V1Controller) HandleAuthLogin() server.HandlerFunc {
func (ctrl *V1Controller) HandleAuthLogin() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
loginForm := &LoginForm{}
switch r.Header.Get("Content-Type") {
case server.ContentFormUrlEncoded:
case "application/x-www-form-urlencoded":
err := r.ParseForm()
if err != nil {
return server.Respond(w, http.StatusBadRequest, server.Wrap(err))
return errors.New("failed to parse form")
}
loginForm.Username = r.PostFormValue("username")
loginForm.Password = r.PostFormValue("password")
case server.ContentJSON:
loginForm.StayLoggedIn = r.PostFormValue("stayLoggedIn") == "true"
case "application/json":
err := server.Decode(r, loginForm)
if err != nil {
log.Err(err).Msg("failed to decode login form")
return errors.New("failed to decode login form")
}
default:
return server.Respond(w, http.StatusBadRequest, errors.New("invalid content type"))
return server.JSON(w, http.StatusBadRequest, errors.New("invalid content type"))
}
if loginForm.Username == "" || loginForm.Password == "" {
@@ -71,12 +107,13 @@ func (ctrl *V1Controller) HandleAuthLogin() server.HandlerFunc {
)
}
newToken, err := ctrl.svc.User.Login(r.Context(), strings.ToLower(loginForm.Username), loginForm.Password)
newToken, err := ctrl.svc.User.Login(r.Context(), strings.ToLower(loginForm.Username), loginForm.Password, loginForm.StayLoggedIn)
if err != nil {
return validate.NewRequestError(errors.New("authentication failed"), http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, TokenResponse{
ctrl.setCookies(w, noPort(r.Host), newToken.Raw, newToken.ExpiresAt, loginForm.StayLoggedIn)
return server.JSON(w, http.StatusOK, TokenResponse{
Token: "Bearer " + newToken.Raw,
ExpiresAt: newToken.ExpiresAt,
AttachmentToken: newToken.AttachmentToken,
@@ -91,7 +128,7 @@ func (ctrl *V1Controller) HandleAuthLogin() server.HandlerFunc {
// @Success 204
// @Router /v1/users/logout [POST]
// @Security Bearer
func (ctrl *V1Controller) HandleAuthLogout() server.HandlerFunc {
func (ctrl *V1Controller) HandleAuthLogout() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
token := services.UseTokenCtx(r.Context())
if token == "" {
@@ -103,7 +140,8 @@ func (ctrl *V1Controller) HandleAuthLogout() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
ctrl.unsetCookies(w, noPort(r.Host))
return server.JSON(w, http.StatusNoContent, nil)
}
}
@@ -116,7 +154,7 @@ func (ctrl *V1Controller) HandleAuthLogout() server.HandlerFunc {
// @Success 200
// @Router /v1/users/refresh [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleAuthRefresh() server.HandlerFunc {
func (ctrl *V1Controller) HandleAuthRefresh() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
requestToken := services.UseTokenCtx(r.Context())
if requestToken == "" {
@@ -128,6 +166,78 @@ func (ctrl *V1Controller) HandleAuthRefresh() server.HandlerFunc {
return validate.NewUnauthorizedError()
}
return server.Respond(w, http.StatusOK, newToken)
ctrl.setCookies(w, noPort(r.Host), newToken.Raw, newToken.ExpiresAt, false)
return server.JSON(w, http.StatusOK, newToken)
}
}
func noPort(host string) string {
return strings.Split(host, ":")[0]
}
func (ctrl *V1Controller) setCookies(w http.ResponseWriter, domain, token string, expires time.Time, remember bool) {
http.SetCookie(w, &http.Cookie{
Name: cookieNameRemember,
Value: strconv.FormatBool(remember),
Expires: expires,
Domain: domain,
Secure: ctrl.cookieSecure,
HttpOnly: true,
Path: "/",
})
// Set HTTP only cookie
http.SetCookie(w, &http.Cookie{
Name: cookieNameToken,
Value: token,
Expires: expires,
Domain: domain,
Secure: ctrl.cookieSecure,
HttpOnly: true,
Path: "/",
})
// Set Fake Session cookie
http.SetCookie(w, &http.Cookie{
Name: cookieNameSession,
Value: "true",
Expires: expires,
Domain: domain,
Secure: ctrl.cookieSecure,
HttpOnly: false,
Path: "/",
})
}
func (ctrl *V1Controller) unsetCookies(w http.ResponseWriter, domain string) {
http.SetCookie(w, &http.Cookie{
Name: cookieNameToken,
Value: "",
Expires: time.Unix(0, 0),
Domain: domain,
Secure: ctrl.cookieSecure,
HttpOnly: true,
Path: "/",
})
http.SetCookie(w, &http.Cookie{
Name: cookieNameRemember,
Value: "false",
Expires: time.Unix(0, 0),
Domain: domain,
Secure: ctrl.cookieSecure,
HttpOnly: true,
Path: "/",
})
// Set Fake Session cookie
http.SetCookie(w, &http.Cookie{
Name: cookieNameSession,
Value: "false",
Expires: time.Unix(0, 0),
Domain: domain,
Secure: ctrl.cookieSecure,
HttpOnly: false,
Path: "/",
})
}

View File

@@ -6,14 +6,13 @@ import (
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/rs/zerolog/log"
"github.com/hay-kot/homebox/backend/internal/web/adapters"
"github.com/hay-kot/httpkit/errchain"
)
type (
GroupInvitationCreate struct {
Uses int `json:"uses"`
Uses int `json:"uses" validate:"required,min=1,max=100"`
ExpiresAt time.Time `json:"expiresAt"`
}
@@ -32,8 +31,13 @@ type (
// @Success 200 {object} repo.Group
// @Router /v1/groups [Get]
// @Security Bearer
func (ctrl *V1Controller) HandleGroupGet() server.HandlerFunc {
return ctrl.handleGroupGeneral()
func (ctrl *V1Controller) HandleGroupGet() errchain.HandlerFunc {
fn := func(r *http.Request) (repo.Group, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Groups.GroupByID(auth, auth.GID)
}
return adapters.Command(fn, http.StatusOK)
}
// HandleGroupUpdate godoc
@@ -45,41 +49,13 @@ func (ctrl *V1Controller) HandleGroupGet() server.HandlerFunc {
// @Success 200 {object} repo.Group
// @Router /v1/groups [Put]
// @Security Bearer
func (ctrl *V1Controller) HandleGroupUpdate() server.HandlerFunc {
return ctrl.handleGroupGeneral()
}
func (ctrl *V1Controller) handleGroupGeneral() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
switch r.Method {
case http.MethodGet:
group, err := ctrl.repo.Groups.GroupByID(ctx, ctx.GID)
if err != nil {
log.Err(err).Msg("failed to get group")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, group)
case http.MethodPut:
data := repo.GroupUpdate{}
if err := server.Decode(r, &data); err != nil {
return validate.NewRequestError(err, http.StatusBadRequest)
}
group, err := ctrl.svc.Group.UpdateGroup(ctx, data)
if err != nil {
log.Err(err).Msg("failed to update group")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, group)
}
return nil
func (ctrl *V1Controller) HandleGroupUpdate() errchain.HandlerFunc {
fn := func(r *http.Request, body repo.GroupUpdate) (repo.Group, error) {
auth := services.NewContext(r.Context())
return ctrl.svc.Group.UpdateGroup(auth, body)
}
return adapters.Action(fn, http.StatusOK)
}
// HandleGroupInvitationsCreate godoc
@@ -91,30 +67,22 @@ func (ctrl *V1Controller) handleGroupGeneral() server.HandlerFunc {
// @Success 200 {object} GroupInvitation
// @Router /v1/groups/invitations [Post]
// @Security Bearer
func (ctrl *V1Controller) HandleGroupInvitationsCreate() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
data := GroupInvitationCreate{}
if err := server.Decode(r, &data); err != nil {
log.Err(err).Msg("failed to decode user registration data")
return validate.NewRequestError(err, http.StatusBadRequest)
func (ctrl *V1Controller) HandleGroupInvitationsCreate() errchain.HandlerFunc {
fn := func(r *http.Request, body GroupInvitationCreate) (GroupInvitation, error) {
if body.ExpiresAt.IsZero() {
body.ExpiresAt = time.Now().Add(time.Hour * 24)
}
if data.ExpiresAt.IsZero() {
data.ExpiresAt = time.Now().Add(time.Hour * 24)
}
auth := services.NewContext(r.Context())
ctx := services.NewContext(r.Context())
token, err := ctrl.svc.Group.NewInvitation(auth, body.Uses, body.ExpiresAt)
token, err := ctrl.svc.Group.NewInvitation(ctx, data.Uses, data.ExpiresAt)
if err != nil {
log.Err(err).Msg("failed to create new token")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusCreated, GroupInvitation{
return GroupInvitation{
Token: token,
ExpiresAt: data.ExpiresAt,
Uses: data.Uses,
})
ExpiresAt: body.ExpiresAt,
Uses: body.Uses,
}, err
}
return adapters.Action(fn, http.StatusCreated)
}

View File

@@ -7,10 +7,13 @@ import (
"net/http"
"strings"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/homebox/backend/internal/web/adapters"
"github.com/hay-kot/httpkit/errchain"
"github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
@@ -24,10 +27,11 @@ import (
// @Param pageSize query int false "items per page"
// @Param labels query []string false "label Ids" collectionFormat(multi)
// @Param locations query []string false "location Ids" collectionFormat(multi)
// @Param parentIds query []string false "parent Ids" collectionFormat(multi)
// @Success 200 {object} repo.PaginationResult[repo.ItemSummary]{}
// @Router /v1/items [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc {
func (ctrl *V1Controller) HandleItemsGetAll() errchain.HandlerFunc {
extractQuery := func(r *http.Request) repo.ItemQuery {
params := r.URL.Query()
@@ -53,8 +57,10 @@ func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc {
Search: params.Get("q"),
LocationIDs: queryUUIDList(params, "locations"),
LabelIDs: queryUUIDList(params, "labels"),
ParentItemIDs: queryUUIDList(params, "parentIds"),
IncludeArchived: queryBool(params.Get("includeArchived")),
Fields: filterFieldItems(params["fields"]),
OrderBy: params.Get("orderBy"),
}
if strings.HasPrefix(v.Search, "#") {
@@ -76,14 +82,14 @@ func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc {
items, err := ctrl.repo.Items.QueryByGroup(ctx, ctx.GID, extractQuery(r))
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return server.Respond(w, http.StatusOK, repo.PaginationResult[repo.ItemSummary]{
return server.JSON(w, http.StatusOK, repo.PaginationResult[repo.ItemSummary]{
Items: []repo.ItemSummary{},
})
}
log.Err(err).Msg("failed to get items")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, items)
return server.JSON(w, http.StatusOK, items)
}
}
@@ -93,26 +99,15 @@ func (ctrl *V1Controller) HandleItemsGetAll() server.HandlerFunc {
// @Tags Items
// @Produce json
// @Param payload body repo.ItemCreate true "Item Data"
// @Success 200 {object} repo.ItemSummary
// @Success 201 {object} repo.ItemSummary
// @Router /v1/items [POST]
// @Security Bearer
func (ctrl *V1Controller) HandleItemsCreate() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
createData := repo.ItemCreate{}
if err := server.Decode(r, &createData); err != nil {
log.Err(err).Msg("failed to decode request body")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
ctx := services.NewContext(r.Context())
item, err := ctrl.svc.Items.Create(ctx, createData)
if err != nil {
log.Err(err).Msg("failed to create item")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusCreated, item)
func (ctrl *V1Controller) HandleItemsCreate() errchain.HandlerFunc {
fn := func(r *http.Request, body repo.ItemCreate) (repo.ItemOut, error) {
return ctrl.svc.Items.Create(services.NewContext(r.Context()), body)
}
return adapters.Action(fn, http.StatusCreated)
}
// HandleItemGet godocs
@@ -124,8 +119,14 @@ func (ctrl *V1Controller) HandleItemsCreate() server.HandlerFunc {
// @Success 200 {object} repo.ItemOut
// @Router /v1/items/{id} [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleItemGet() server.HandlerFunc {
return ctrl.handleItemsGeneral()
func (ctrl *V1Controller) HandleItemGet() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID) (repo.ItemOut, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID)
}
return adapters.CommandID("id", fn, http.StatusOK)
}
// HandleItemDelete godocs
@@ -137,8 +138,14 @@ func (ctrl *V1Controller) HandleItemGet() server.HandlerFunc {
// @Success 204
// @Router /v1/items/{id} [DELETE]
// @Security Bearer
func (ctrl *V1Controller) HandleItemDelete() server.HandlerFunc {
return ctrl.handleItemsGeneral()
func (ctrl *V1Controller) HandleItemDelete() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID) (any, error) {
auth := services.NewContext(r.Context())
err := ctrl.repo.Items.DeleteByGroup(auth, auth.GID, ID)
return nil, err
}
return adapters.CommandID("id", fn, http.StatusNoContent)
}
// HandleItemUpdate godocs
@@ -151,50 +158,41 @@ func (ctrl *V1Controller) HandleItemDelete() server.HandlerFunc {
// @Success 200 {object} repo.ItemOut
// @Router /v1/items/{id} [PUT]
// @Security Bearer
func (ctrl *V1Controller) HandleItemUpdate() server.HandlerFunc {
return ctrl.handleItemsGeneral()
func (ctrl *V1Controller) HandleItemUpdate() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID, body repo.ItemUpdate) (repo.ItemOut, error) {
auth := services.NewContext(r.Context())
body.ID = ID
return ctrl.repo.Items.UpdateByGroup(auth, auth.GID, body)
}
return adapters.ActionID("id", fn, http.StatusOK)
}
func (ctrl *V1Controller) handleItemsGeneral() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
ID, err := ctrl.routeID(r)
// HandleItemPatch godocs
//
// @Summary Update Item
// @Tags Items
// @Produce json
// @Param id path string true "Item ID"
// @Param payload body repo.ItemPatch true "Item Data"
// @Success 200 {object} repo.ItemOut
// @Router /v1/items/{id} [Patch]
// @Security Bearer
func (ctrl *V1Controller) HandleItemPatch() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID, body repo.ItemPatch) (repo.ItemOut, error) {
auth := services.NewContext(r.Context())
body.ID = ID
err := ctrl.repo.Items.Patch(auth, auth.GID, ID, body)
if err != nil {
return err
return repo.ItemOut{}, err
}
switch r.Method {
case http.MethodGet:
items, err := ctrl.repo.Items.GetOneByGroup(r.Context(), ctx.GID, ID)
if err != nil {
log.Err(err).Msg("failed to get item")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, items)
case http.MethodDelete:
err = ctrl.repo.Items.DeleteByGroup(r.Context(), ctx.GID, ID)
if err != nil {
log.Err(err).Msg("failed to delete item")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
case http.MethodPut:
body := repo.ItemUpdate{}
if err := server.Decode(r, &body); err != nil {
log.Err(err).Msg("failed to decode request body")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
body.ID = ID
result, err := ctrl.repo.Items.UpdateByGroup(r.Context(), ctx.GID, body)
if err != nil {
log.Err(err).Msg("failed to update item")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, result)
}
return nil
return ctrl.repo.Items.GetOneByGroup(auth, auth.GID, ID)
}
return adapters.ActionID("id", fn, http.StatusOK)
}
// HandleGetAllCustomFieldNames godocs
@@ -206,17 +204,13 @@ func (ctrl *V1Controller) handleItemsGeneral() server.HandlerFunc {
// @Router /v1/items/fields [GET]
// @Success 200 {object} []string
// @Security Bearer
func (ctrl *V1Controller) HandleGetAllCustomFieldNames() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
v, err := ctrl.repo.Items.GetAllCustomFieldNames(r.Context(), ctx.GID)
if err != nil {
return err
}
return server.Respond(w, http.StatusOK, v)
func (ctrl *V1Controller) HandleGetAllCustomFieldNames() errchain.HandlerFunc {
fn := func(r *http.Request) ([]string, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Items.GetAllCustomFieldNames(auth, auth.GID)
}
return adapters.Command(fn, http.StatusOK)
}
// HandleGetAllCustomFieldValues godocs
@@ -228,17 +222,18 @@ func (ctrl *V1Controller) HandleGetAllCustomFieldNames() server.HandlerFunc {
// @Router /v1/items/fields/values [GET]
// @Success 200 {object} []string
// @Security Bearer
func (ctrl *V1Controller) HandleGetAllCustomFieldValues() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
v, err := ctrl.repo.Items.GetAllCustomFieldValues(r.Context(), ctx.GID, r.URL.Query().Get("field"))
if err != nil {
return err
}
return server.Respond(w, http.StatusOK, v)
func (ctrl *V1Controller) HandleGetAllCustomFieldValues() errchain.HandlerFunc {
type query struct {
Field string `schema:"field" validate:"required"`
}
fn := func(r *http.Request, q query) ([]string, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Items.GetAllCustomFieldValues(auth, auth.GID, q.Field)
}
return adapters.Query(fn, http.StatusOK)
}
// HandleItemsImport godocs
@@ -250,7 +245,7 @@ func (ctrl *V1Controller) HandleGetAllCustomFieldValues() server.HandlerFunc {
// @Param csv formData file true "Image to upload"
// @Router /v1/items/import [Post]
// @Security Bearer
func (ctrl *V1Controller) HandleItemsImport() server.HandlerFunc {
func (ctrl *V1Controller) HandleItemsImport() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
err := r.ParseMultipartForm(ctrl.maxUploadSize << 20)
if err != nil {
@@ -272,7 +267,7 @@ func (ctrl *V1Controller) HandleItemsImport() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
return server.JSON(w, http.StatusNoContent, nil)
}
}
@@ -283,7 +278,7 @@ func (ctrl *V1Controller) HandleItemsImport() server.HandlerFunc {
// @Success 200 {string} string "text/csv"
// @Router /v1/items/export [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleItemsExport() server.HandlerFunc {
func (ctrl *V1Controller) HandleItemsExport() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
@@ -295,7 +290,9 @@ func (ctrl *V1Controller) HandleItemsExport() server.HandlerFunc {
w.Header().Set("Content-Type", "text/tsv")
w.Header().Set("Content-Disposition", "attachment;filename=homebox-items.tsv")
writer := csv.NewWriter(w)
writer.Comma = '\t'
return writer.WriteAll(csvData)
}
}

View File

@@ -8,7 +8,8 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
"github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
@@ -28,10 +29,10 @@ type (
// @Param type formData string true "Type of file"
// @Param name formData string true "name of the file including extension"
// @Success 200 {object} repo.ItemOut
// @Failure 422 {object} server.ErrorResponse
// @Failure 422 {object} validate.ErrorResponse
// @Router /v1/items/{id}/attachments [POST]
// @Security Bearer
func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
func (ctrl *V1Controller) HandleItemAttachmentCreate() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
err := r.ParseMultipartForm(ctrl.maxUploadSize << 20)
if err != nil {
@@ -61,7 +62,7 @@ func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
}
if !errs.Nil() {
return server.Respond(w, http.StatusUnprocessableEntity, errs)
return server.JSON(w, http.StatusUnprocessableEntity, errs)
}
attachmentType := r.FormValue("type")
@@ -88,7 +89,7 @@ func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusCreated, item)
return server.JSON(w, http.StatusCreated, item)
}
}
@@ -102,7 +103,7 @@ func (ctrl *V1Controller) HandleItemAttachmentCreate() server.HandlerFunc {
// @Success 200 {object} ItemAttachmentToken
// @Router /v1/items/{id}/attachments/{attachment_id} [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleItemAttachmentGet() server.HandlerFunc {
func (ctrl *V1Controller) HandleItemAttachmentGet() errchain.HandlerFunc {
return ctrl.handleItemAttachmentsHandler
}
@@ -115,7 +116,7 @@ func (ctrl *V1Controller) HandleItemAttachmentGet() server.HandlerFunc {
// @Success 204
// @Router /v1/items/{id}/attachments/{attachment_id} [DELETE]
// @Security Bearer
func (ctrl *V1Controller) HandleItemAttachmentDelete() server.HandlerFunc {
func (ctrl *V1Controller) HandleItemAttachmentDelete() errchain.HandlerFunc {
return ctrl.handleItemAttachmentsHandler
}
@@ -129,7 +130,7 @@ func (ctrl *V1Controller) HandleItemAttachmentDelete() server.HandlerFunc {
// @Success 200 {object} repo.ItemOut
// @Router /v1/items/{id}/attachments/{attachment_id} [PUT]
// @Security Bearer
func (ctrl *V1Controller) HandleItemAttachmentUpdate() server.HandlerFunc {
func (ctrl *V1Controller) HandleItemAttachmentUpdate() errchain.HandlerFunc {
return ctrl.handleItemAttachmentsHandler
}
@@ -164,7 +165,7 @@ func (ctrl *V1Controller) handleItemAttachmentsHandler(w http.ResponseWriter, r
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
return server.JSON(w, http.StatusNoContent, nil)
// Update Attachment Handler
case http.MethodPut:
@@ -182,7 +183,7 @@ func (ctrl *V1Controller) handleItemAttachmentsHandler(w http.ResponseWriter, r
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, val)
return server.JSON(w, http.StatusOK, val)
}
return nil

View File

@@ -3,12 +3,11 @@ package v1
import (
"net/http"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/rs/zerolog/log"
"github.com/hay-kot/homebox/backend/internal/web/adapters"
"github.com/hay-kot/httpkit/errchain"
)
// HandleLabelsGetAll godoc
@@ -16,19 +15,16 @@ import (
// @Summary Get All Labels
// @Tags Labels
// @Produce json
// @Success 200 {object} server.Results{items=[]repo.LabelOut}
// @Success 200 {object} []repo.LabelOut
// @Router /v1/labels [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleLabelsGetAll() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
user := services.UseUserCtx(r.Context())
labels, err := ctrl.repo.Labels.GetAll(r.Context(), user.GroupID)
if err != nil {
log.Err(err).Msg("error getting labels")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, server.Results{Items: labels})
func (ctrl *V1Controller) HandleLabelsGetAll() errchain.HandlerFunc {
fn := func(r *http.Request) ([]repo.LabelSummary, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Labels.GetAll(auth, auth.GID)
}
return adapters.Command(fn, http.StatusOK)
}
// HandleLabelsCreate godoc
@@ -40,23 +36,13 @@ func (ctrl *V1Controller) HandleLabelsGetAll() server.HandlerFunc {
// @Success 200 {object} repo.LabelSummary
// @Router /v1/labels [POST]
// @Security Bearer
func (ctrl *V1Controller) HandleLabelsCreate() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
createData := repo.LabelCreate{}
if err := server.Decode(r, &createData); err != nil {
log.Err(err).Msg("error decoding label create data")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
user := services.UseUserCtx(r.Context())
label, err := ctrl.repo.Labels.Create(r.Context(), user.GroupID, createData)
if err != nil {
log.Err(err).Msg("error creating label")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusCreated, label)
func (ctrl *V1Controller) HandleLabelsCreate() errchain.HandlerFunc {
fn := func(r *http.Request, data repo.LabelCreate) (repo.LabelOut, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Labels.Create(auth, auth.GID, data)
}
return adapters.Action(fn, http.StatusCreated)
}
// HandleLabelDelete godocs
@@ -68,8 +54,14 @@ func (ctrl *V1Controller) HandleLabelsCreate() server.HandlerFunc {
// @Success 204
// @Router /v1/labels/{id} [DELETE]
// @Security Bearer
func (ctrl *V1Controller) HandleLabelDelete() server.HandlerFunc {
return ctrl.handleLabelsGeneral()
func (ctrl *V1Controller) HandleLabelDelete() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID) (any, error) {
auth := services.NewContext(r.Context())
err := ctrl.repo.Labels.DeleteByGroup(auth, auth.GID, ID)
return nil, err
}
return adapters.CommandID("id", fn, http.StatusNoContent)
}
// HandleLabelGet godocs
@@ -81,8 +73,13 @@ func (ctrl *V1Controller) HandleLabelDelete() server.HandlerFunc {
// @Success 200 {object} repo.LabelOut
// @Router /v1/labels/{id} [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleLabelGet() server.HandlerFunc {
return ctrl.handleLabelsGeneral()
func (ctrl *V1Controller) HandleLabelGet() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID) (repo.LabelOut, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Labels.GetOneByGroup(auth, auth.GID, ID)
}
return adapters.CommandID("id", fn, http.StatusOK)
}
// HandleLabelUpdate godocs
@@ -94,55 +91,12 @@ func (ctrl *V1Controller) HandleLabelGet() server.HandlerFunc {
// @Success 200 {object} repo.LabelOut
// @Router /v1/labels/{id} [PUT]
// @Security Bearer
func (ctrl *V1Controller) HandleLabelUpdate() server.HandlerFunc {
return ctrl.handleLabelsGeneral()
}
func (ctrl *V1Controller) handleLabelsGeneral() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
ID, err := ctrl.routeID(r)
if err != nil {
return err
}
switch r.Method {
case http.MethodGet:
labels, err := ctrl.repo.Labels.GetOneByGroup(r.Context(), ctx.GID, ID)
if err != nil {
if ent.IsNotFound(err) {
log.Err(err).
Str("id", ID.String()).
Msg("label not found")
return validate.NewRequestError(err, http.StatusNotFound)
}
log.Err(err).Msg("error getting label")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, labels)
case http.MethodDelete:
err = ctrl.repo.Labels.DeleteByGroup(ctx, ctx.GID, ID)
if err != nil {
log.Err(err).Msg("error deleting label")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
case http.MethodPut:
body := repo.LabelUpdate{}
if err := server.Decode(r, &body); err != nil {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
body.ID = ID
result, err := ctrl.repo.Labels.UpdateByGroup(ctx, ctx.GID, body)
if err != nil {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, result)
}
return nil
func (ctrl *V1Controller) HandleLabelUpdate() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID, data repo.LabelUpdate) (repo.LabelOut, error) {
auth := services.NewContext(r.Context())
data.ID = ID
return ctrl.repo.Labels.UpdateByGroup(auth, auth.GID, data)
}
return adapters.ActionID("id", fn, http.StatusOK)
}

View File

@@ -3,77 +3,50 @@ package v1
import (
"net/http"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/rs/zerolog/log"
"github.com/hay-kot/homebox/backend/internal/web/adapters"
"github.com/hay-kot/httpkit/errchain"
)
// HandleLocationTreeQuery godoc
// HandleLocationTreeQuery
//
// @Summary Get Locations Tree
// @Tags Locations
// @Produce json
// @Param withItems query bool false "include items in response tree"
// @Success 200 {object} server.Results{items=[]repo.TreeItem}
// @Success 200 {object} []repo.TreeItem
// @Router /v1/locations/tree [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleLocationTreeQuery() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
user := services.UseUserCtx(r.Context())
q := r.URL.Query()
withItems := queryBool(q.Get("withItems"))
locTree, err := ctrl.repo.Locations.Tree(
r.Context(),
user.GroupID,
repo.TreeQuery{
WithItems: withItems,
},
)
if err != nil {
log.Err(err).Msg("failed to get locations tree")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, server.Results{Items: locTree})
func (ctrl *V1Controller) HandleLocationTreeQuery() errchain.HandlerFunc {
fn := func(r *http.Request, query repo.TreeQuery) ([]repo.TreeItem, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Locations.Tree(auth, auth.GID, query)
}
return adapters.Query(fn, http.StatusOK)
}
// HandleLocationGetAll godoc
// HandleLocationGetAll
//
// @Summary Get All Locations
// @Tags Locations
// @Produce json
// @Param filterChildren query bool false "Filter locations with parents"
// @Success 200 {object} server.Results{items=[]repo.LocationOutCount}
// @Success 200 {object} []repo.LocationOutCount
// @Router /v1/locations [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleLocationGetAll() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
user := services.UseUserCtx(r.Context())
q := r.URL.Query()
filter := repo.LocationQuery{
FilterChildren: queryBool(q.Get("filterChildren")),
}
locations, err := ctrl.repo.Locations.GetAll(r.Context(), user.GroupID, filter)
if err != nil {
log.Err(err).Msg("failed to get locations")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, server.Results{Items: locations})
func (ctrl *V1Controller) HandleLocationGetAll() errchain.HandlerFunc {
fn := func(r *http.Request, q repo.LocationQuery) ([]repo.LocationOutCount, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Locations.GetAll(auth, auth.GID, q)
}
return adapters.Query(fn, http.StatusOK)
}
// HandleLocationCreate godoc
// HandleLocationCreate
//
// @Summary Create Location
// @Tags Locations
@@ -82,26 +55,16 @@ func (ctrl *V1Controller) HandleLocationGetAll() server.HandlerFunc {
// @Success 200 {object} repo.LocationSummary
// @Router /v1/locations [POST]
// @Security Bearer
func (ctrl *V1Controller) HandleLocationCreate() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
createData := repo.LocationCreate{}
if err := server.Decode(r, &createData); err != nil {
log.Err(err).Msg("failed to decode location create data")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
user := services.UseUserCtx(r.Context())
location, err := ctrl.repo.Locations.Create(r.Context(), user.GroupID, createData)
if err != nil {
log.Err(err).Msg("failed to create location")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusCreated, location)
func (ctrl *V1Controller) HandleLocationCreate() errchain.HandlerFunc {
fn := func(r *http.Request, createData repo.LocationCreate) (repo.LocationOut, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Locations.Create(auth, auth.GID, createData)
}
return adapters.Action(fn, http.StatusCreated)
}
// HandleLocationDelete godocs
// HandleLocationDelete
//
// @Summary Delete Location
// @Tags Locations
@@ -110,11 +73,17 @@ func (ctrl *V1Controller) HandleLocationCreate() server.HandlerFunc {
// @Success 204
// @Router /v1/locations/{id} [DELETE]
// @Security Bearer
func (ctrl *V1Controller) HandleLocationDelete() server.HandlerFunc {
return ctrl.handleLocationGeneral()
func (ctrl *V1Controller) HandleLocationDelete() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID) (any, error) {
auth := services.NewContext(r.Context())
err := ctrl.repo.Locations.DeleteByGroup(auth, auth.GID, ID)
return nil, err
}
return adapters.CommandID("id", fn, http.StatusNoContent)
}
// HandleLocationGet godocs
// HandleLocationGet
//
// @Summary Get Location
// @Tags Locations
@@ -123,11 +92,16 @@ func (ctrl *V1Controller) HandleLocationDelete() server.HandlerFunc {
// @Success 200 {object} repo.LocationOut
// @Router /v1/locations/{id} [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleLocationGet() server.HandlerFunc {
return ctrl.handleLocationGeneral()
func (ctrl *V1Controller) HandleLocationGet() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID) (repo.LocationOut, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Locations.GetOneByGroup(auth, auth.GID, ID)
}
return adapters.CommandID("id", fn, http.StatusOK)
}
// HandleLocationUpdate godocs
// HandleLocationUpdate
//
// @Summary Update Location
// @Tags Locations
@@ -137,58 +111,12 @@ func (ctrl *V1Controller) HandleLocationGet() server.HandlerFunc {
// @Success 200 {object} repo.LocationOut
// @Router /v1/locations/{id} [PUT]
// @Security Bearer
func (ctrl *V1Controller) HandleLocationUpdate() server.HandlerFunc {
return ctrl.handleLocationGeneral()
}
func (ctrl *V1Controller) handleLocationGeneral() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
ID, err := ctrl.routeID(r)
if err != nil {
return err
}
switch r.Method {
case http.MethodGet:
location, err := ctrl.repo.Locations.GetOneByGroup(r.Context(), ctx.GID, ID)
if err != nil {
l := log.Err(err).
Str("ID", ID.String()).
Str("GID", ctx.GID.String())
if ent.IsNotFound(err) {
l.Msg("location not found")
return validate.NewRequestError(err, http.StatusNotFound)
}
l.Msg("failed to get location")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, location)
case http.MethodPut:
body := repo.LocationUpdate{}
if err := server.Decode(r, &body); err != nil {
log.Err(err).Msg("failed to decode location update data")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
body.ID = ID
result, err := ctrl.repo.Locations.UpdateOneByGroup(r.Context(), ctx.GID, ID, body)
if err != nil {
log.Err(err).Msg("failed to update location")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, result)
case http.MethodDelete:
err = ctrl.repo.Locations.DeleteByGroup(r.Context(), ctx.GID, ID)
if err != nil {
log.Err(err).Msg("failed to delete location")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
}
return nil
func (ctrl *V1Controller) HandleLocationUpdate() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID, body repo.LocationUpdate) (repo.LocationOut, error) {
auth := services.NewContext(r.Context())
body.ID = ID
return ctrl.repo.Locations.UpdateByGroup(auth, auth.GID, ID, body)
}
return adapters.ActionID("id", fn, http.StatusOK)
}

View File

@@ -2,13 +2,12 @@ package v1
import (
"net/http"
"strconv"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/rs/zerolog/log"
"github.com/hay-kot/homebox/backend/internal/web/adapters"
"github.com/hay-kot/httpkit/errchain"
)
// HandleMaintenanceGetLog godoc
@@ -19,8 +18,13 @@ import (
// @Success 200 {object} repo.MaintenanceLog
// @Router /v1/items/{id}/maintenance [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleMaintenanceLogGet() server.HandlerFunc {
return ctrl.handleMaintenanceLog()
func (ctrl *V1Controller) HandleMaintenanceLogGet() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID, q repo.MaintenanceLogQuery) (repo.MaintenanceLog, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.MaintEntry.GetLog(auth, auth.GID, ID, q)
}
return adapters.QueryID("id", fn, http.StatusOK)
}
// HandleMaintenanceEntryCreate godoc
@@ -29,11 +33,16 @@ func (ctrl *V1Controller) HandleMaintenanceLogGet() server.HandlerFunc {
// @Tags Maintenance
// @Produce json
// @Param payload body repo.MaintenanceEntryCreate true "Entry Data"
// @Success 200 {object} repo.MaintenanceEntry
// @Success 201 {object} repo.MaintenanceEntry
// @Router /v1/items/{id}/maintenance [POST]
// @Security Bearer
func (ctrl *V1Controller) HandleMaintenanceEntryCreate() server.HandlerFunc {
return ctrl.handleMaintenanceLog()
func (ctrl *V1Controller) HandleMaintenanceEntryCreate() errchain.HandlerFunc {
fn := func(r *http.Request, itemID uuid.UUID, body repo.MaintenanceEntryCreate) (repo.MaintenanceEntry, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.MaintEntry.Create(auth, itemID, body)
}
return adapters.ActionID("id", fn, http.StatusCreated)
}
// HandleMaintenanceEntryDelete godoc
@@ -44,8 +53,14 @@ func (ctrl *V1Controller) HandleMaintenanceEntryCreate() server.HandlerFunc {
// @Success 204
// @Router /v1/items/{id}/maintenance/{entry_id} [DELETE]
// @Security Bearer
func (ctrl *V1Controller) HandleMaintenanceEntryDelete() server.HandlerFunc {
return ctrl.handleMaintenanceLog()
func (ctrl *V1Controller) HandleMaintenanceEntryDelete() errchain.HandlerFunc {
fn := func(r *http.Request, entryID uuid.UUID) (any, error) {
auth := services.NewContext(r.Context())
err := ctrl.repo.MaintEntry.Delete(auth, entryID)
return nil, err
}
return adapters.CommandID("entry_id", fn, http.StatusNoContent)
}
// HandleMaintenanceEntryUpdate godoc
@@ -57,81 +72,11 @@ func (ctrl *V1Controller) HandleMaintenanceEntryDelete() server.HandlerFunc {
// @Success 200 {object} repo.MaintenanceEntry
// @Router /v1/items/{id}/maintenance/{entry_id} [PUT]
// @Security Bearer
func (ctrl *V1Controller) HandleMaintenanceEntryUpdate() server.HandlerFunc {
return ctrl.handleMaintenanceLog()
}
func (ctrl *V1Controller) handleMaintenanceLog() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
itemID, err := ctrl.routeID(r)
if err != nil {
return err
}
switch r.Method {
case http.MethodGet:
completed, _ := strconv.ParseBool(r.URL.Query().Get("completed"))
scheduled, _ := strconv.ParseBool(r.URL.Query().Get("scheduled"))
query := repo.MaintenanceLogQuery{
Completed: completed,
Scheduled: scheduled,
}
mlog, err := ctrl.repo.MaintEntry.GetLog(ctx, itemID, query)
if err != nil {
log.Err(err).Msg("failed to get items")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, mlog)
case http.MethodPost:
var create repo.MaintenanceEntryCreate
err := server.Decode(r, &create)
if err != nil {
return validate.NewRequestError(err, http.StatusBadRequest)
}
entry, err := ctrl.repo.MaintEntry.Create(ctx, itemID, create)
if err != nil {
log.Err(err).Msg("failed to create item")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusCreated, entry)
case http.MethodPut:
entryID, err := ctrl.routeUUID(r, "entry_id")
if err != nil {
return err
}
var update repo.MaintenanceEntryUpdate
err = server.Decode(r, &update)
if err != nil {
return validate.NewRequestError(err, http.StatusBadRequest)
}
entry, err := ctrl.repo.MaintEntry.Update(ctx, entryID, update)
if err != nil {
log.Err(err).Msg("failed to update item")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, entry)
case http.MethodDelete:
entryID, err := ctrl.routeUUID(r, "entry_id")
if err != nil {
return err
}
err = ctrl.repo.MaintEntry.Delete(ctx, entryID)
if err != nil {
log.Err(err).Msg("failed to delete item")
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
}
return nil
func (ctrl *V1Controller) HandleMaintenanceEntryUpdate() errchain.HandlerFunc {
fn := func(r *http.Request, entryID uuid.UUID, body repo.MaintenanceEntryUpdate) (repo.MaintenanceEntry, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.MaintEntry.Update(auth, entryID, body)
}
return adapters.ActionID("entry_id", fn, http.StatusOK)
}

View File

@@ -1,7 +1,6 @@
package v1
import (
"context"
"net/http"
"github.com/containrrr/shoutrrr"
@@ -9,7 +8,7 @@ import (
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/web/adapters"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
)
// HandleGetUserNotifiers godoc
@@ -17,13 +16,13 @@ import (
// @Summary Get Notifiers
// @Tags Notifiers
// @Produce json
// @Success 200 {object} server.Results{items=[]repo.NotifierOut}
// @Success 200 {object} []repo.NotifierOut
// @Router /v1/notifiers [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleGetUserNotifiers() server.HandlerFunc {
fn := func(ctx context.Context, _ struct{}) ([]repo.NotifierOut, error) {
user := services.UseUserCtx(ctx)
return ctrl.repo.Notifiers.GetByUser(ctx, user.ID)
func (ctrl *V1Controller) HandleGetUserNotifiers() errchain.HandlerFunc {
fn := func(r *http.Request, _ struct{}) ([]repo.NotifierOut, error) {
user := services.UseUserCtx(r.Context())
return ctrl.repo.Notifiers.GetByUser(r.Context(), user.ID)
}
return adapters.Query(fn, http.StatusOK)
@@ -38,10 +37,10 @@ func (ctrl *V1Controller) HandleGetUserNotifiers() server.HandlerFunc {
// @Success 200 {object} repo.NotifierOut
// @Router /v1/notifiers [POST]
// @Security Bearer
func (ctrl *V1Controller) HandleCreateNotifier() server.HandlerFunc {
fn := func(ctx context.Context, in repo.NotifierCreate) (repo.NotifierOut, error) {
auth := services.NewContext(ctx)
return ctrl.repo.Notifiers.Create(ctx, auth.GID, auth.UID, in)
func (ctrl *V1Controller) HandleCreateNotifier() errchain.HandlerFunc {
fn := func(r *http.Request, in repo.NotifierCreate) (repo.NotifierOut, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Notifiers.Create(auth, auth.GID, auth.UID, in)
}
return adapters.Action(fn, http.StatusCreated)
@@ -55,10 +54,10 @@ func (ctrl *V1Controller) HandleCreateNotifier() server.HandlerFunc {
// @Success 204
// @Router /v1/notifiers/{id} [DELETE]
// @Security Bearer
func (ctrl *V1Controller) HandleDeleteNotifier() server.HandlerFunc {
fn := func(ctx context.Context, ID uuid.UUID) (any, error) {
auth := services.NewContext(ctx)
return nil, ctrl.repo.Notifiers.Delete(ctx, auth.UID, ID)
func (ctrl *V1Controller) HandleDeleteNotifier() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID) (any, error) {
auth := services.NewContext(r.Context())
return nil, ctrl.repo.Notifiers.Delete(auth, auth.UID, ID)
}
return adapters.CommandID("id", fn, http.StatusNoContent)
@@ -73,10 +72,10 @@ func (ctrl *V1Controller) HandleDeleteNotifier() server.HandlerFunc {
// @Success 200 {object} repo.NotifierOut
// @Router /v1/notifiers/{id} [PUT]
// @Security Bearer
func (ctrl *V1Controller) HandleUpdateNotifier() server.HandlerFunc {
fn := func(ctx context.Context, ID uuid.UUID, in repo.NotifierUpdate) (repo.NotifierOut, error) {
auth := services.NewContext(ctx)
return ctrl.repo.Notifiers.Update(ctx, auth.UID, ID, in)
func (ctrl *V1Controller) HandleUpdateNotifier() errchain.HandlerFunc {
fn := func(r *http.Request, ID uuid.UUID, in repo.NotifierUpdate) (repo.NotifierOut, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Notifiers.Update(auth, auth.UID, ID, in)
}
return adapters.ActionID("id", fn, http.StatusOK)
@@ -92,12 +91,12 @@ func (ctrl *V1Controller) HandleUpdateNotifier() server.HandlerFunc {
// @Success 204
// @Router /v1/notifiers/test [POST]
// @Security Bearer
func (ctrl *V1Controller) HandlerNotifierTest() server.HandlerFunc {
func (ctrl *V1Controller) HandlerNotifierTest() errchain.HandlerFunc {
type body struct {
URL string `json:"url" validate:"required"`
}
fn := func(ctx context.Context, q body) (any, error) {
fn := func(r *http.Request, q body) (any, error) {
err := shoutrrr.Send(q.URL, "Test message from Homebox")
return nil, err
}

View File

@@ -5,9 +5,10 @@ import (
"image/png"
"io"
"net/http"
"net/url"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/homebox/backend/internal/web/adapters"
"github.com/hay-kot/httpkit/errchain"
"github.com/yeqown/go-qrcode/v2"
"github.com/yeqown/go-qrcode/writer/standard"
@@ -26,25 +27,29 @@ var qrcodeLogo []byte
// @Success 200 {string} string "image/jpeg"
// @Router /v1/qrcode [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleGenerateQRCode() server.HandlerFunc {
const MaxLength = 4_296 // assume alphanumeric characters only
func (ctrl *V1Controller) HandleGenerateQRCode() errchain.HandlerFunc {
type query struct {
// 4,296 characters is the maximum length of a QR code
Data string `schema:"data" validate:"required,max=4296"`
}
return func(w http.ResponseWriter, r *http.Request) error {
data := r.URL.Query().Get("data")
q, err := adapters.DecodeQuery[query](r)
if err != nil {
return err
}
image, err := png.Decode(bytes.NewReader(qrcodeLogo))
if err != nil {
panic(err)
}
if len(data) > MaxLength {
return validate.NewFieldErrors(validate.FieldError{
Field: "data",
Error: "max length is 4,296 characters exceeded",
})
decodedStr, err := url.QueryUnescape(q.Data)
if err != nil {
return err
}
qrc, err := qrcode.New(data)
qrc, err := qrcode.New(decodedStr)
if err != nil {
return err
}

View File

@@ -4,7 +4,7 @@ import (
"net/http"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
)
// HandleBillOfMaterialsExport godoc
@@ -15,7 +15,7 @@ import (
// @Success 200 {string} string "text/csv"
// @Router /v1/reporting/bill-of-materials [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleBillOfMaterialsExport() server.HandlerFunc {
func (ctrl *V1Controller) HandleBillOfMaterialsExport() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
actor := services.UseUserCtx(r.Context())

View File

@@ -5,8 +5,11 @@ import (
"time"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/homebox/backend/internal/web/adapters"
"github.com/hay-kot/httpkit/errchain"
"github.com/hay-kot/httpkit/server"
)
// HandleGroupGet godoc
@@ -17,17 +20,13 @@ import (
// @Success 200 {object} []repo.TotalsByOrganizer
// @Router /v1/groups/statistics/locations [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleGroupStatisticsLocations() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
stats, err := ctrl.repo.Groups.StatsLocationsByPurchasePrice(ctx, ctx.GID)
if err != nil {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, stats)
func (ctrl *V1Controller) HandleGroupStatisticsLocations() errchain.HandlerFunc {
fn := func(r *http.Request) ([]repo.TotalsByOrganizer, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Groups.StatsLocationsByPurchasePrice(auth, auth.GID)
}
return adapters.Command(fn, http.StatusOK)
}
// HandleGroupStatisticsLabels godoc
@@ -38,17 +37,13 @@ func (ctrl *V1Controller) HandleGroupStatisticsLocations() server.HandlerFunc {
// @Success 200 {object} []repo.TotalsByOrganizer
// @Router /v1/groups/statistics/labels [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleGroupStatisticsLabels() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
stats, err := ctrl.repo.Groups.StatsLabelsByPurchasePrice(ctx, ctx.GID)
if err != nil {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, stats)
func (ctrl *V1Controller) HandleGroupStatisticsLabels() errchain.HandlerFunc {
fn := func(r *http.Request) ([]repo.TotalsByOrganizer, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Groups.StatsLabelsByPurchasePrice(auth, auth.GID)
}
return adapters.Command(fn, http.StatusOK)
}
// HandleGroupStatistics godoc
@@ -59,17 +54,13 @@ func (ctrl *V1Controller) HandleGroupStatisticsLabels() server.HandlerFunc {
// @Success 200 {object} repo.GroupStatistics
// @Router /v1/groups/statistics [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleGroupStatistics() server.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
ctx := services.NewContext(r.Context())
stats, err := ctrl.repo.Groups.StatsGroup(ctx, ctx.GID)
if err != nil {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, stats)
func (ctrl *V1Controller) HandleGroupStatistics() errchain.HandlerFunc {
fn := func(r *http.Request) (repo.GroupStatistics, error) {
auth := services.NewContext(r.Context())
return ctrl.repo.Groups.StatsGroup(auth, auth.GID)
}
return adapters.Command(fn, http.StatusOK)
}
// HandleGroupStatisticsPriceOverTime godoc
@@ -82,7 +73,7 @@ func (ctrl *V1Controller) HandleGroupStatistics() server.HandlerFunc {
// @Param end query string false "end date"
// @Router /v1/groups/statistics/purchase-price [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() server.HandlerFunc {
func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() errchain.HandlerFunc {
parseDate := func(datestr string, defaultDate time.Time) (time.Time, error) {
if datestr == "" {
return defaultDate, nil
@@ -108,6 +99,6 @@ func (ctrl *V1Controller) HandleGroupStatisticsPriceOverTime() server.HandlerFun
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, stats)
return server.JSON(w, http.StatusOK, stats)
}
}

View File

@@ -8,7 +8,8 @@ import (
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
"github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog/log"
)
@@ -20,7 +21,7 @@ import (
// @Param payload body services.UserRegistration true "User Data"
// @Success 204
// @Router /v1/users/register [Post]
func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc {
func (ctrl *V1Controller) HandleUserRegistration() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
regData := services.UserRegistration{}
@@ -39,7 +40,7 @@ func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
return server.JSON(w, http.StatusNoContent, nil)
}
}
@@ -48,10 +49,10 @@ func (ctrl *V1Controller) HandleUserRegistration() server.HandlerFunc {
// @Summary Get User Self
// @Tags User
// @Produce json
// @Success 200 {object} server.Result{item=repo.UserOut}
// @Success 200 {object} Wrapped{item=repo.UserOut}
// @Router /v1/users/self [GET]
// @Security Bearer
func (ctrl *V1Controller) HandleUserSelf() server.HandlerFunc {
func (ctrl *V1Controller) HandleUserSelf() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
token := services.UseTokenCtx(r.Context())
usr, err := ctrl.svc.User.GetSelf(r.Context(), token)
@@ -60,7 +61,7 @@ func (ctrl *V1Controller) HandleUserSelf() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, server.Wrap(usr))
return server.JSON(w, http.StatusOK, Wrap(usr))
}
}
@@ -70,10 +71,10 @@ func (ctrl *V1Controller) HandleUserSelf() server.HandlerFunc {
// @Tags User
// @Produce json
// @Param payload body repo.UserUpdate true "User Data"
// @Success 200 {object} server.Result{item=repo.UserUpdate}
// @Success 200 {object} Wrapped{item=repo.UserUpdate}
// @Router /v1/users/self [PUT]
// @Security Bearer
func (ctrl *V1Controller) HandleUserSelfUpdate() server.HandlerFunc {
func (ctrl *V1Controller) HandleUserSelfUpdate() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
updateData := repo.UserUpdate{}
if err := server.Decode(r, &updateData); err != nil {
@@ -87,7 +88,7 @@ func (ctrl *V1Controller) HandleUserSelfUpdate() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusOK, server.Wrap(newData))
return server.JSON(w, http.StatusOK, Wrap(newData))
}
}
@@ -99,7 +100,7 @@ func (ctrl *V1Controller) HandleUserSelfUpdate() server.HandlerFunc {
// @Success 204
// @Router /v1/users/self [DELETE]
// @Security Bearer
func (ctrl *V1Controller) HandleUserSelfDelete() server.HandlerFunc {
func (ctrl *V1Controller) HandleUserSelfDelete() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
if ctrl.isDemo {
return validate.NewRequestError(nil, http.StatusForbidden)
@@ -110,7 +111,7 @@ func (ctrl *V1Controller) HandleUserSelfDelete() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
return server.JSON(w, http.StatusNoContent, nil)
}
}
@@ -129,7 +130,7 @@ type (
// @Param payload body ChangePassword true "Password Payload"
// @Router /v1/users/change-password [PUT]
// @Security Bearer
func (ctrl *V1Controller) HandleUserSelfChangePassword() server.HandlerFunc {
func (ctrl *V1Controller) HandleUserSelfChangePassword() errchain.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
if ctrl.isDemo {
return validate.NewRequestError(nil, http.StatusForbidden)
@@ -148,6 +149,6 @@ func (ctrl *V1Controller) HandleUserSelfChangePassword() server.HandlerFunc {
return validate.NewRequestError(err, http.StatusInternalServerError)
}
return server.Respond(w, http.StatusNoContent, nil)
return server.JSON(w, http.StatusNoContent, nil)
}
}

View File

@@ -2,6 +2,7 @@ package main
import (
"context"
"fmt"
"net/http"
"os"
"path/filepath"
@@ -9,16 +10,23 @@ import (
atlas "ariga.io/atlas/sql/migrate"
"entgo.io/ent/dialect/sql/schema"
"github.com/hay-kot/homebox/backend/app/api/static/docs"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/migrations"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/sys/config"
"github.com/hay-kot/homebox/backend/internal/web/mid"
"github.com/hay-kot/homebox/backend/pkgs/server"
_ "github.com/mattn/go-sqlite3"
"github.com/hay-kot/httpkit/errchain"
"github.com/hay-kot/httpkit/server"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/rs/zerolog/pkgerrors"
_ "github.com/hay-kot/homebox/backend/pkgs/cgofreesqlite"
)
var (
@@ -29,22 +37,21 @@ var (
// @title Homebox API
// @version 1.0
// @description Track, Manage, and Organize your Shit.
// @description Track, Manage, and Organize your Things.
// @contact.name Don't
// @license.name MIT
// @BasePath /api
// @securityDefinitions.apikey Bearer
// @in header
// @name Authorization
// @description "Type 'Bearer TOKEN' to correctly set the API Key"
func main() {
zerolog.ErrorStackMarshaler = pkgerrors.MarshalStack
cfg, err := config.New()
if err != nil {
panic(err)
}
docs.SwaggerInfo.Host = cfg.Swagger.Host
if err := run(cfg); err != nil {
panic(err)
}
@@ -110,39 +117,46 @@ func run(cfg *config.Config) error {
return err
}
app.bus = eventbus.New()
app.db = c
app.repos = repo.New(c, cfg.Storage.Data)
app.repos = repo.New(c, app.bus, cfg.Storage.Data)
app.services = services.New(
app.repos,
services.WithAutoIncrementAssetID(cfg.Options.AutoIncrementAssetID),
)
// =========================================================================
// Start Server\
// Start Server
logger := log.With().Caller().Logger()
mwLogger := mid.Logger(logger)
if app.conf.Mode == config.ModeDevelopment {
mwLogger = mid.SugarLogger(logger)
}
router := chi.NewMux()
router.Use(
middleware.RequestID,
middleware.RealIP,
mid.Logger(logger),
middleware.Recoverer,
middleware.StripSlashes,
)
chain := errchain.New(mid.Errors(app.server, logger))
app.mountRoutes(router, chain, app.repos)
app.server = server.NewServer(
server.WithHost(app.conf.Web.Host),
server.WithPort(app.conf.Web.Port),
server.WithMiddleware(
mwLogger,
mid.Errors(logger),
mid.Panic(app.conf.Mode == config.ModeDevelopment),
),
server.WithReadTimeout(app.conf.Web.ReadTimeout),
server.WithWriteTimeout(app.conf.Web.WriteTimeout),
server.WithIdleTimeout(app.conf.Web.IdleTimeout),
)
app.mountRoutes(app.repos)
log.Info().Msgf("Starting HTTP Server on %s:%s", app.server.Host, app.server.Port)
// =========================================================================
// Start Reoccurring Tasks
go app.bus.Run()
go app.startBgTask(time.Duration(24)*time.Hour, func() {
_, err := app.repos.AuthTokens.PurgeExpiredTokens(context.Background())
if err != nil {
@@ -159,6 +173,19 @@ func run(cfg *config.Config) error {
Msg("failed to purge expired invitations")
}
})
go app.startBgTask(time.Duration(1)*time.Hour, func() {
now := time.Now()
if now.Hour() == 8 {
fmt.Println("run notifiers")
err := app.services.BackgroundService.SendNotifiersToday(context.Background())
if err != nil {
log.Error().
Err(err).
Msg("failed to send notifiers")
}
}
})
// TODO: Remove through external API that does setup
if cfg.Demo {
@@ -175,5 +202,5 @@ func run(cfg *config.Config) error {
}()
}
return app.server.Start()
return app.server.Start(router)
}

View File

@@ -7,9 +7,10 @@ import (
"net/url"
"strings"
v1 "github.com/hay-kot/homebox/backend/app/api/handlers/v1"
"github.com/hay-kot/homebox/backend/internal/core/services"
"github.com/hay-kot/homebox/backend/internal/sys/validate"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
)
type tokenHasKey struct {
@@ -30,9 +31,9 @@ const (
// the required roles, a 403 Forbidden will be returned.
//
// WARNING: This middleware _MUST_ be called after mwAuthToken or else it will panic
func (a *app) mwRoles(rm RoleMode, required ...string) server.Middleware {
return func(next server.Handler) server.Handler {
return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
func (a *app) mwRoles(rm RoleMode, required ...string) errchain.Middleware {
return func(next errchain.Handler) errchain.Handler {
return errchain.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
ctx := r.Context()
maybeToken := ctx.Value(hashedToken)
@@ -94,20 +95,6 @@ func getQuery(r *http.Request) (string, error) {
return token, nil
}
func getCookie(r *http.Request) (string, error) {
cookie, err := r.Cookie("hb.auth.token")
if err != nil {
return "", errors.New("access_token cookie is required")
}
token, err := url.QueryUnescape(cookie.Value)
if err != nil {
return "", errors.New("access_token cookie is required")
}
return token, nil
}
// mwAuthToken is a middleware that will check the database for a stateful token
// and attach it's user to the request context, or return an appropriate error.
// Authorization support is by token via Headers or Query Parameter
@@ -115,21 +102,30 @@ func getCookie(r *http.Request) (string, error) {
// Example:
// - header = "Bearer 1234567890"
// - query = "?access_token=1234567890"
// - cookie = hb.auth.token = 1234567890
func (a *app) mwAuthToken(next server.Handler) server.Handler {
return server.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
keyFuncs := [...]KeyFunc{
getBearer,
getCookie,
getQuery,
func (a *app) mwAuthToken(next errchain.Handler) errchain.Handler {
return errchain.HandlerFunc(func(w http.ResponseWriter, r *http.Request) error {
var requestToken string
// We ignore the error to allow the next strategy to be attempted
{
cookies, _ := v1.GetCookies(r)
if cookies != nil {
requestToken = cookies.Token
}
}
var requestToken string
for _, keyFunc := range keyFuncs {
token, err := keyFunc(r)
if err == nil {
requestToken = token
break
if requestToken == "" {
keyFuncs := [...]KeyFunc{
getBearer,
getQuery,
}
for _, keyFunc := range keyFuncs {
token, err := keyFunc(r)
if err == nil {
requestToken = token
break
}
}
}

View File

@@ -3,19 +3,19 @@ package main
import (
"embed"
"errors"
"fmt"
"io"
"mime"
"net/http"
"path"
"path/filepath"
"github.com/go-chi/chi/v5"
"github.com/hay-kot/homebox/backend/app/api/handlers/debughandlers"
v1 "github.com/hay-kot/homebox/backend/app/api/handlers/v1"
_ "github.com/hay-kot/homebox/backend/app/api/static/docs"
"github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/pkgs/server"
"github.com/hay-kot/httpkit/errchain"
httpSwagger "github.com/swaggo/http-swagger" // http-swagger middleware
)
@@ -36,12 +36,12 @@ func (a *app) debugRouter() *http.ServeMux {
}
// registerRoutes registers all the routes for the API
func (a *app) mountRoutes(repos *repo.AllRepos) {
func (a *app) mountRoutes(r *chi.Mux, chain *errchain.ErrChain, repos *repo.AllRepos) {
registerMimes()
a.server.Get("/swagger/*", server.ToHandler(httpSwagger.Handler(
httpSwagger.URL(fmt.Sprintf("%s://%s/swagger/doc.json", a.conf.Swagger.Scheme, a.conf.Swagger.Host)),
)))
r.Get("/swagger/*", httpSwagger.Handler(
httpSwagger.URL("/swagger/doc.json"),
))
// =========================================================================
// API Version 1
@@ -51,104 +51,111 @@ func (a *app) mountRoutes(repos *repo.AllRepos) {
v1Ctrl := v1.NewControllerV1(
a.services,
a.repos,
a.bus,
v1.WithMaxUploadSize(a.conf.Web.MaxUploadSize),
v1.WithRegistration(a.conf.Options.AllowRegistration),
v1.WithDemoStatus(a.conf.Demo), // Disable Password Change in Demo Mode
)
a.server.Get(v1Base("/status"), v1Ctrl.HandleBase(func() bool { return true }, v1.Build{
r.Get(v1Base("/status"), chain.ToHandlerFunc(v1Ctrl.HandleBase(func() bool { return true }, v1.Build{
Version: version,
Commit: commit,
BuildTime: buildTime,
}))
})))
a.server.Post(v1Base("/users/register"), v1Ctrl.HandleUserRegistration())
a.server.Post(v1Base("/users/login"), v1Ctrl.HandleAuthLogin())
r.Post(v1Base("/users/register"), chain.ToHandlerFunc(v1Ctrl.HandleUserRegistration()))
r.Post(v1Base("/users/login"), chain.ToHandlerFunc(v1Ctrl.HandleAuthLogin()))
userMW := []server.Middleware{
userMW := []errchain.Middleware{
a.mwAuthToken,
a.mwRoles(RoleModeOr, authroles.RoleUser.String()),
}
a.server.Get(v1Base("/users/self"), v1Ctrl.HandleUserSelf(), userMW...)
a.server.Put(v1Base("/users/self"), v1Ctrl.HandleUserSelfUpdate(), userMW...)
a.server.Delete(v1Base("/users/self"), v1Ctrl.HandleUserSelfDelete(), userMW...)
a.server.Post(v1Base("/users/logout"), v1Ctrl.HandleAuthLogout(), userMW...)
a.server.Get(v1Base("/users/refresh"), v1Ctrl.HandleAuthRefresh(), userMW...)
a.server.Put(v1Base("/users/self/change-password"), v1Ctrl.HandleUserSelfChangePassword(), userMW...)
r.Get(v1Base("/ws/events"), chain.ToHandlerFunc(v1Ctrl.HandleCacheWS(), userMW...))
r.Get(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelf(), userMW...))
r.Put(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfUpdate(), userMW...))
r.Delete(v1Base("/users/self"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfDelete(), userMW...))
r.Post(v1Base("/users/logout"), chain.ToHandlerFunc(v1Ctrl.HandleAuthLogout(), userMW...))
r.Get(v1Base("/users/refresh"), chain.ToHandlerFunc(v1Ctrl.HandleAuthRefresh(), userMW...))
r.Put(v1Base("/users/self/change-password"), chain.ToHandlerFunc(v1Ctrl.HandleUserSelfChangePassword(), userMW...))
a.server.Post(v1Base("/groups/invitations"), v1Ctrl.HandleGroupInvitationsCreate(), userMW...)
a.server.Get(v1Base("/groups/statistics"), v1Ctrl.HandleGroupStatistics(), userMW...)
a.server.Get(v1Base("/groups/statistics/purchase-price"), v1Ctrl.HandleGroupStatisticsPriceOverTime(), userMW...)
a.server.Get(v1Base("/groups/statistics/locations"), v1Ctrl.HandleGroupStatisticsLocations(), userMW...)
a.server.Get(v1Base("/groups/statistics/labels"), v1Ctrl.HandleGroupStatisticsLabels(), userMW...)
r.Post(v1Base("/groups/invitations"), chain.ToHandlerFunc(v1Ctrl.HandleGroupInvitationsCreate(), userMW...))
r.Get(v1Base("/groups/statistics"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatistics(), userMW...))
r.Get(v1Base("/groups/statistics/purchase-price"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsPriceOverTime(), userMW...))
r.Get(v1Base("/groups/statistics/locations"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsLocations(), userMW...))
r.Get(v1Base("/groups/statistics/labels"), chain.ToHandlerFunc(v1Ctrl.HandleGroupStatisticsLabels(), userMW...))
// TODO: I don't like /groups being the URL for users
a.server.Get(v1Base("/groups"), v1Ctrl.HandleGroupGet(), userMW...)
a.server.Put(v1Base("/groups"), v1Ctrl.HandleGroupUpdate(), userMW...)
r.Get(v1Base("/groups"), chain.ToHandlerFunc(v1Ctrl.HandleGroupGet(), userMW...))
r.Put(v1Base("/groups"), chain.ToHandlerFunc(v1Ctrl.HandleGroupUpdate(), userMW...))
a.server.Post(v1Base("/actions/ensure-asset-ids"), v1Ctrl.HandleEnsureAssetID(), userMW...)
a.server.Post(v1Base("/actions/zero-item-time-fields"), v1Ctrl.HandleItemDateZeroOut(), userMW...)
a.server.Post(v1Base("/actions/ensure-import-refs"), v1Ctrl.HandleEnsureImportRefs(), userMW...)
r.Post(v1Base("/actions/ensure-asset-ids"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureAssetID(), userMW...))
r.Post(v1Base("/actions/zero-item-time-fields"), chain.ToHandlerFunc(v1Ctrl.HandleItemDateZeroOut(), userMW...))
r.Post(v1Base("/actions/ensure-import-refs"), chain.ToHandlerFunc(v1Ctrl.HandleEnsureImportRefs(), userMW...))
r.Post(v1Base("/actions/set-primary-photos"), chain.ToHandlerFunc(v1Ctrl.HandleSetPrimaryPhotos(), userMW...))
a.server.Get(v1Base("/locations"), v1Ctrl.HandleLocationGetAll(), userMW...)
a.server.Post(v1Base("/locations"), v1Ctrl.HandleLocationCreate(), userMW...)
a.server.Get(v1Base("/locations/tree"), v1Ctrl.HandleLocationTreeQuery(), userMW...)
a.server.Get(v1Base("/locations/{id}"), v1Ctrl.HandleLocationGet(), userMW...)
a.server.Put(v1Base("/locations/{id}"), v1Ctrl.HandleLocationUpdate(), userMW...)
a.server.Delete(v1Base("/locations/{id}"), v1Ctrl.HandleLocationDelete(), userMW...)
r.Get(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationGetAll(), userMW...))
r.Post(v1Base("/locations"), chain.ToHandlerFunc(v1Ctrl.HandleLocationCreate(), userMW...))
r.Get(v1Base("/locations/tree"), chain.ToHandlerFunc(v1Ctrl.HandleLocationTreeQuery(), userMW...))
r.Get(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationGet(), userMW...))
r.Put(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationUpdate(), userMW...))
r.Delete(v1Base("/locations/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLocationDelete(), userMW...))
a.server.Get(v1Base("/labels"), v1Ctrl.HandleLabelsGetAll(), userMW...)
a.server.Post(v1Base("/labels"), v1Ctrl.HandleLabelsCreate(), userMW...)
a.server.Get(v1Base("/labels/{id}"), v1Ctrl.HandleLabelGet(), userMW...)
a.server.Put(v1Base("/labels/{id}"), v1Ctrl.HandleLabelUpdate(), userMW...)
a.server.Delete(v1Base("/labels/{id}"), v1Ctrl.HandleLabelDelete(), userMW...)
r.Get(v1Base("/labels"), chain.ToHandlerFunc(v1Ctrl.HandleLabelsGetAll(), userMW...))
r.Post(v1Base("/labels"), chain.ToHandlerFunc(v1Ctrl.HandleLabelsCreate(), userMW...))
r.Get(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelGet(), userMW...))
r.Put(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelUpdate(), userMW...))
r.Delete(v1Base("/labels/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleLabelDelete(), userMW...))
a.server.Get(v1Base("/items"), v1Ctrl.HandleItemsGetAll(), userMW...)
a.server.Post(v1Base("/items"), v1Ctrl.HandleItemsCreate(), userMW...)
a.server.Post(v1Base("/items/import"), v1Ctrl.HandleItemsImport(), userMW...)
a.server.Get(v1Base("/items/export"), v1Ctrl.HandleItemsExport(), userMW...)
a.server.Get(v1Base("/items/fields"), v1Ctrl.HandleGetAllCustomFieldNames(), userMW...)
a.server.Get(v1Base("/items/fields/values"), v1Ctrl.HandleGetAllCustomFieldValues(), userMW...)
r.Get(v1Base("/items"), chain.ToHandlerFunc(v1Ctrl.HandleItemsGetAll(), userMW...))
r.Post(v1Base("/items"), chain.ToHandlerFunc(v1Ctrl.HandleItemsCreate(), userMW...))
r.Post(v1Base("/items/import"), chain.ToHandlerFunc(v1Ctrl.HandleItemsImport(), userMW...))
r.Get(v1Base("/items/export"), chain.ToHandlerFunc(v1Ctrl.HandleItemsExport(), userMW...))
r.Get(v1Base("/items/fields"), chain.ToHandlerFunc(v1Ctrl.HandleGetAllCustomFieldNames(), userMW...))
r.Get(v1Base("/items/fields/values"), chain.ToHandlerFunc(v1Ctrl.HandleGetAllCustomFieldValues(), userMW...))
a.server.Get(v1Base("/items/{id}"), v1Ctrl.HandleItemGet(), userMW...)
a.server.Put(v1Base("/items/{id}"), v1Ctrl.HandleItemUpdate(), userMW...)
a.server.Delete(v1Base("/items/{id}"), v1Ctrl.HandleItemDelete(), userMW...)
r.Get(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemGet(), userMW...))
r.Put(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemUpdate(), userMW...))
r.Patch(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemPatch(), userMW...))
r.Delete(v1Base("/items/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemDelete(), userMW...))
a.server.Post(v1Base("/items/{id}/attachments"), v1Ctrl.HandleItemAttachmentCreate(), userMW...)
a.server.Put(v1Base("/items/{id}/attachments/{attachment_id}"), v1Ctrl.HandleItemAttachmentUpdate(), userMW...)
a.server.Delete(v1Base("/items/{id}/attachments/{attachment_id}"), v1Ctrl.HandleItemAttachmentDelete(), userMW...)
r.Post(v1Base("/items/{id}/attachments"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentCreate(), userMW...))
r.Put(v1Base("/items/{id}/attachments/{attachment_id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentUpdate(), userMW...))
r.Delete(v1Base("/items/{id}/attachments/{attachment_id}"), chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentDelete(), userMW...))
a.server.Get(v1Base("/items/{id}/maintenance"), v1Ctrl.HandleMaintenanceEntryCreate(), userMW...)
a.server.Post(v1Base("/items/{id}/maintenance"), v1Ctrl.HandleMaintenanceEntryCreate(), userMW...)
a.server.Put(v1Base("/items/{id}/maintenance/{entry_id}"), v1Ctrl.HandleMaintenanceEntryUpdate(), userMW...)
a.server.Delete(v1Base("/items/{id}/maintenance/{entry_id}"), v1Ctrl.HandleMaintenanceEntryDelete(), userMW...)
r.Get(v1Base("/items/{id}/maintenance"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceLogGet(), userMW...))
r.Post(v1Base("/items/{id}/maintenance"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryCreate(), userMW...))
r.Put(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryUpdate(), userMW...))
r.Delete(v1Base("/items/{id}/maintenance/{entry_id}"), chain.ToHandlerFunc(v1Ctrl.HandleMaintenanceEntryDelete(), userMW...))
a.server.Get(v1Base("/asset/{id}"), v1Ctrl.HandleAssetGet(), userMW...)
r.Get(v1Base("/assets/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleAssetGet(), userMW...))
// Notifiers
a.server.Get(v1Base("/notifiers"), v1Ctrl.HandleGetUserNotifiers(), userMW...)
a.server.Post(v1Base("/notifiers"), v1Ctrl.HandleCreateNotifier(), userMW...)
a.server.Put(v1Base("/notifiers/{id}"), v1Ctrl.HandleUpdateNotifier(), userMW...)
a.server.Delete(v1Base("/notifiers/{id}"), v1Ctrl.HandleDeleteNotifier(), userMW...)
a.server.Post(v1Base("/notifiers/test"), v1Ctrl.HandlerNotifierTest(), userMW...)
r.Get(v1Base("/notifiers"), chain.ToHandlerFunc(v1Ctrl.HandleGetUserNotifiers(), userMW...))
r.Post(v1Base("/notifiers"), chain.ToHandlerFunc(v1Ctrl.HandleCreateNotifier(), userMW...))
r.Put(v1Base("/notifiers/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleUpdateNotifier(), userMW...))
r.Delete(v1Base("/notifiers/{id}"), chain.ToHandlerFunc(v1Ctrl.HandleDeleteNotifier(), userMW...))
r.Post(v1Base("/notifiers/test"), chain.ToHandlerFunc(v1Ctrl.HandlerNotifierTest(), userMW...))
// Asset-Like endpoints
a.server.Get(
assetMW := []errchain.Middleware{
a.mwAuthToken,
a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()),
}
r.Get(
v1Base("/qrcode"),
v1Ctrl.HandleGenerateQRCode(),
a.mwAuthToken, a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()),
chain.ToHandlerFunc(v1Ctrl.HandleGenerateQRCode(), assetMW...),
)
a.server.Get(
r.Get(
v1Base("/items/{id}/attachments/{attachment_id}"),
v1Ctrl.HandleItemAttachmentGet(),
a.mwAuthToken, a.mwRoles(RoleModeOr, authroles.RoleUser.String(), authroles.RoleAttachments.String()),
chain.ToHandlerFunc(v1Ctrl.HandleItemAttachmentGet(), assetMW...),
)
// Reporting Services
a.server.Get(v1Base("/reporting/bill-of-materials"), v1Ctrl.HandleBillOfMaterialsExport(), userMW...)
r.Get(v1Base("/reporting/bill-of-materials"), chain.ToHandlerFunc(v1Ctrl.HandleBillOfMaterialsExport(), userMW...))
a.server.NotFound(notFoundHandler())
r.NotFound(chain.ToHandlerFunc(notFoundHandler()))
}
func registerMimes() {
@@ -165,7 +172,7 @@ func registerMimes() {
// notFoundHandler perform the main logic around handling the internal SPA embed and ensuring that
// the client side routing is handled correctly.
func notFoundHandler() server.HandlerFunc {
func notFoundHandler() errchain.HandlerFunc {
tryRead := func(fs embed.FS, prefix, requestedPath string, w http.ResponseWriter) error {
f, err := fs.Open(path.Join(prefix, requestedPath))
if err != nil {

View File

@@ -1,5 +1,4 @@
// Package docs GENERATED BY SWAG; DO NOT EDIT
// This file was generated by swaggo/swag
// Package docs Code generated by swaggo/swag. DO NOT EDIT
package docs
import "github.com/swaggo/swag"
@@ -13,9 +12,6 @@ const docTemplate = `{
"contact": {
"name": "Don't"
},
"license": {
"name": "MIT"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
@@ -71,6 +67,31 @@ const docTemplate = `{
}
}
},
"/v1/actions/set-primary-photos": {
"post": {
"security": [
{
"Bearer": []
}
],
"description": "Sets the first photo of each item as the primary photo",
"produces": [
"application/json"
],
"tags": [
"Actions"
],
"summary": "Set Primary Photos",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/v1.ActionAmountResult"
}
}
}
}
},
"/v1/actions/zero-item-time-fields": {
"post": {
"security": [
@@ -425,8 +446,8 @@ const docTemplate = `{
}
],
"responses": {
"200": {
"description": "OK",
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/repo.ItemSummary"
}
@@ -638,6 +659,46 @@ const docTemplate = `{
"description": "No Content"
}
}
},
"patch": {
"security": [
{
"Bearer": []
}
],
"produces": [
"application/json"
],
"tags": [
"Items"
],
"summary": "Update Item",
"parameters": [
{
"type": "string",
"description": "Item ID",
"name": "id",
"in": "path",
"required": true
},
{
"description": "Item Data",
"name": "payload",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/repo.ItemPatch"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/repo.ItemOut"
}
}
}
}
},
"/v1/items/{id}/attachments": {
@@ -694,7 +755,7 @@ const docTemplate = `{
"422": {
"description": "Unprocessable Entity",
"schema": {
"$ref": "#/definitions/server.ErrorResponse"
"$ref": "#/definitions/validate.ErrorResponse"
}
}
}
@@ -864,8 +925,8 @@ const docTemplate = `{
}
],
"responses": {
"200": {
"description": "OK",
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/repo.MaintenanceEntry"
}
@@ -945,22 +1006,10 @@ const docTemplate = `{
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Results"
},
{
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.LabelOut"
}
}
}
}
]
"type": "array",
"items": {
"$ref": "#/definitions/repo.LabelOut"
}
}
}
}
@@ -1117,22 +1166,10 @@ const docTemplate = `{
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Results"
},
{
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.LocationOutCount"
}
}
}
}
]
"type": "array",
"items": {
"$ref": "#/definitions/repo.LocationOutCount"
}
}
}
}
@@ -1197,22 +1234,10 @@ const docTemplate = `{
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Results"
},
{
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.TreeItem"
}
}
}
}
]
"type": "array",
"items": {
"$ref": "#/definitions/repo.TreeItem"
}
}
}
}
@@ -1337,22 +1362,10 @@ const docTemplate = `{
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Results"
},
{
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.NotifierOut"
}
}
}
}
]
"type": "array",
"items": {
"$ref": "#/definitions/repo.NotifierOut"
}
}
}
}
@@ -1623,6 +1636,15 @@ const docTemplate = `{
"description": "string",
"name": "password",
"in": "formData"
},
{
"description": "Login Data",
"name": "payload",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/v1.LoginForm"
}
}
],
"responses": {
@@ -1719,7 +1741,7 @@ const docTemplate = `{
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Result"
"$ref": "#/definitions/v1.Wrapped"
},
{
"type": "object",
@@ -1764,7 +1786,7 @@ const docTemplate = `{
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Result"
"$ref": "#/definitions/v1.Wrapped"
},
{
"type": "object",
@@ -1881,6 +1903,9 @@ const docTemplate = `{
"id": {
"type": "string"
},
"primary": {
"type": "boolean"
},
"type": {
"type": "string"
},
@@ -1892,6 +1917,9 @@ const docTemplate = `{
"repo.ItemAttachmentUpdate": {
"type": "object",
"properties": {
"primary": {
"type": "boolean"
},
"title": {
"type": "string"
},
@@ -1902,9 +1930,13 @@ const docTemplate = `{
},
"repo.ItemCreate": {
"type": "object",
"required": [
"name"
],
"properties": {
"description": {
"type": "string"
"type": "string",
"maxLength": 1000
},
"labelIds": {
"type": "array",
@@ -1917,7 +1949,9 @@ const docTemplate = `{
"type": "string"
},
"name": {
"type": "string"
"type": "string",
"maxLength": 255,
"minLength": 1
},
"parentId": {
"type": "string",
@@ -1985,6 +2019,9 @@ const docTemplate = `{
"id": {
"type": "string"
},
"imageId": {
"type": "string"
},
"insured": {
"type": "boolean"
},
@@ -2000,9 +2037,13 @@ const docTemplate = `{
},
"location": {
"description": "Edges",
"allOf": [
{
"$ref": "#/definitions/repo.LocationSummary"
}
],
"x-nullable": true,
"x-omitempty": true,
"$ref": "#/definitions/repo.LocationSummary"
"x-omitempty": true
},
"manufacturer": {
"type": "string"
@@ -2018,9 +2059,13 @@ const docTemplate = `{
"type": "string"
},
"parent": {
"allOf": [
{
"$ref": "#/definitions/repo.ItemSummary"
}
],
"x-nullable": true,
"x-omitempty": true,
"$ref": "#/definitions/repo.ItemSummary"
"x-omitempty": true
},
"purchaseFrom": {
"type": "string"
@@ -2064,6 +2109,19 @@ const docTemplate = `{
}
}
},
"repo.ItemPatch": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"quantity": {
"type": "integer",
"x-nullable": true,
"x-omitempty": true
}
}
},
"repo.ItemSummary": {
"type": "object",
"properties": {
@@ -2079,6 +2137,9 @@ const docTemplate = `{
"id": {
"type": "string"
},
"imageId": {
"type": "string"
},
"insured": {
"type": "boolean"
},
@@ -2090,9 +2151,13 @@ const docTemplate = `{
},
"location": {
"description": "Edges",
"allOf": [
{
"$ref": "#/definitions/repo.LocationSummary"
}
],
"x-nullable": true,
"x-omitempty": true,
"$ref": "#/definitions/repo.LocationSummary"
"x-omitempty": true
},
"name": {
"type": "string"
@@ -2116,7 +2181,8 @@ const docTemplate = `{
"type": "boolean"
},
"assetId": {
"type": "string"
"type": "string",
"example": "0"
},
"description": {
"type": "string"
@@ -2201,22 +2267,27 @@ const docTemplate = `{
"type": "string"
},
"warrantyExpires": {
"description": "Sold",
"type": "string"
}
}
},
"repo.LabelCreate": {
"type": "object",
"required": [
"name"
],
"properties": {
"color": {
"type": "string"
},
"description": {
"type": "string"
"type": "string",
"maxLength": 255
},
"name": {
"type": "string"
"type": "string",
"maxLength": 255,
"minLength": 1
}
}
},
@@ -2232,12 +2303,6 @@ const docTemplate = `{
"id": {
"type": "string"
},
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.ItemSummary"
}
},
"name": {
"type": "string"
},
@@ -2299,12 +2364,6 @@ const docTemplate = `{
"id": {
"type": "string"
},
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.ItemSummary"
}
},
"name": {
"type": "string"
},
@@ -2381,7 +2440,6 @@ const docTemplate = `{
"type": "object",
"properties": {
"completedDate": {
"description": "Sold",
"type": "string"
},
"cost": {
@@ -2398,16 +2456,17 @@ const docTemplate = `{
"type": "string"
},
"scheduledDate": {
"description": "Sold",
"type": "string"
}
}
},
"repo.MaintenanceEntryCreate": {
"type": "object",
"required": [
"name"
],
"properties": {
"completedDate": {
"description": "Sold",
"type": "string"
},
"cost": {
@@ -2421,7 +2480,6 @@ const docTemplate = `{
"type": "string"
},
"scheduledDate": {
"description": "Sold",
"type": "string"
}
}
@@ -2430,7 +2488,6 @@ const docTemplate = `{
"type": "object",
"properties": {
"completedDate": {
"description": "Sold",
"type": "string"
},
"cost": {
@@ -2444,7 +2501,6 @@ const docTemplate = `{
"type": "string"
},
"scheduledDate": {
"description": "Sold",
"type": "string"
}
}
@@ -2663,39 +2719,6 @@ const docTemplate = `{
}
}
},
"server.ErrorResponse": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"fields": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
},
"server.Result": {
"type": "object",
"properties": {
"details": {},
"error": {
"type": "boolean"
},
"item": {},
"message": {
"type": "string"
}
}
},
"server.Results": {
"type": "object",
"properties": {
"items": {}
}
},
"services.UserRegistration": {
"type": "object",
"properties": {
@@ -2791,12 +2814,17 @@ const docTemplate = `{
},
"v1.GroupInvitationCreate": {
"type": "object",
"required": [
"uses"
],
"properties": {
"expiresAt": {
"type": "string"
},
"uses": {
"type": "integer"
"type": "integer",
"maximum": 100,
"minimum": 1
}
}
},
@@ -2808,6 +2836,20 @@ const docTemplate = `{
}
}
},
"v1.LoginForm": {
"type": "object",
"properties": {
"password": {
"type": "string"
},
"stayLoggedIn": {
"type": "boolean"
},
"username": {
"type": "string"
}
}
},
"v1.TokenResponse": {
"type": "object",
"properties": {
@@ -2821,6 +2863,23 @@ const docTemplate = `{
"type": "string"
}
}
},
"v1.Wrapped": {
"type": "object",
"properties": {
"item": {}
}
},
"validate.ErrorResponse": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"fields": {
"type": "string"
}
}
}
},
"securityDefinitions": {
@@ -2840,9 +2899,11 @@ var SwaggerInfo = &swag.Spec{
BasePath: "/api",
Schemes: []string{},
Title: "Homebox API",
Description: "Track, Manage, and Organize your Shit.",
Description: "Track, Manage, and Organize your Things.",
InfoInstanceName: "swagger",
SwaggerTemplate: docTemplate,
LeftDelim: "{{",
RightDelim: "}}",
}
func init() {

View File

@@ -1,14 +1,11 @@
{
"swagger": "2.0",
"info": {
"description": "Track, Manage, and Organize your Shit.",
"description": "Track, Manage, and Organize your Things.",
"title": "Homebox API",
"contact": {
"name": "Don't"
},
"license": {
"name": "MIT"
},
"version": "1.0"
},
"basePath": "/api",
@@ -63,6 +60,31 @@
}
}
},
"/v1/actions/set-primary-photos": {
"post": {
"security": [
{
"Bearer": []
}
],
"description": "Sets the first photo of each item as the primary photo",
"produces": [
"application/json"
],
"tags": [
"Actions"
],
"summary": "Set Primary Photos",
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/v1.ActionAmountResult"
}
}
}
}
},
"/v1/actions/zero-item-time-fields": {
"post": {
"security": [
@@ -417,8 +439,8 @@
}
],
"responses": {
"200": {
"description": "OK",
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/repo.ItemSummary"
}
@@ -630,6 +652,46 @@
"description": "No Content"
}
}
},
"patch": {
"security": [
{
"Bearer": []
}
],
"produces": [
"application/json"
],
"tags": [
"Items"
],
"summary": "Update Item",
"parameters": [
{
"type": "string",
"description": "Item ID",
"name": "id",
"in": "path",
"required": true
},
{
"description": "Item Data",
"name": "payload",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/repo.ItemPatch"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/repo.ItemOut"
}
}
}
}
},
"/v1/items/{id}/attachments": {
@@ -686,7 +748,7 @@
"422": {
"description": "Unprocessable Entity",
"schema": {
"$ref": "#/definitions/server.ErrorResponse"
"$ref": "#/definitions/validate.ErrorResponse"
}
}
}
@@ -856,8 +918,8 @@
}
],
"responses": {
"200": {
"description": "OK",
"201": {
"description": "Created",
"schema": {
"$ref": "#/definitions/repo.MaintenanceEntry"
}
@@ -937,22 +999,10 @@
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Results"
},
{
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.LabelOut"
}
}
}
}
]
"type": "array",
"items": {
"$ref": "#/definitions/repo.LabelOut"
}
}
}
}
@@ -1109,22 +1159,10 @@
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Results"
},
{
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.LocationOutCount"
}
}
}
}
]
"type": "array",
"items": {
"$ref": "#/definitions/repo.LocationOutCount"
}
}
}
}
@@ -1189,22 +1227,10 @@
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Results"
},
{
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.TreeItem"
}
}
}
}
]
"type": "array",
"items": {
"$ref": "#/definitions/repo.TreeItem"
}
}
}
}
@@ -1329,22 +1355,10 @@
"200": {
"description": "OK",
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Results"
},
{
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.NotifierOut"
}
}
}
}
]
"type": "array",
"items": {
"$ref": "#/definitions/repo.NotifierOut"
}
}
}
}
@@ -1615,6 +1629,15 @@
"description": "string",
"name": "password",
"in": "formData"
},
{
"description": "Login Data",
"name": "payload",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/v1.LoginForm"
}
}
],
"responses": {
@@ -1711,7 +1734,7 @@
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Result"
"$ref": "#/definitions/v1.Wrapped"
},
{
"type": "object",
@@ -1756,7 +1779,7 @@
"schema": {
"allOf": [
{
"$ref": "#/definitions/server.Result"
"$ref": "#/definitions/v1.Wrapped"
},
{
"type": "object",
@@ -1873,6 +1896,9 @@
"id": {
"type": "string"
},
"primary": {
"type": "boolean"
},
"type": {
"type": "string"
},
@@ -1884,6 +1910,9 @@
"repo.ItemAttachmentUpdate": {
"type": "object",
"properties": {
"primary": {
"type": "boolean"
},
"title": {
"type": "string"
},
@@ -1894,9 +1923,13 @@
},
"repo.ItemCreate": {
"type": "object",
"required": [
"name"
],
"properties": {
"description": {
"type": "string"
"type": "string",
"maxLength": 1000
},
"labelIds": {
"type": "array",
@@ -1909,7 +1942,9 @@
"type": "string"
},
"name": {
"type": "string"
"type": "string",
"maxLength": 255,
"minLength": 1
},
"parentId": {
"type": "string",
@@ -1977,6 +2012,9 @@
"id": {
"type": "string"
},
"imageId": {
"type": "string"
},
"insured": {
"type": "boolean"
},
@@ -1992,9 +2030,13 @@
},
"location": {
"description": "Edges",
"allOf": [
{
"$ref": "#/definitions/repo.LocationSummary"
}
],
"x-nullable": true,
"x-omitempty": true,
"$ref": "#/definitions/repo.LocationSummary"
"x-omitempty": true
},
"manufacturer": {
"type": "string"
@@ -2010,9 +2052,13 @@
"type": "string"
},
"parent": {
"allOf": [
{
"$ref": "#/definitions/repo.ItemSummary"
}
],
"x-nullable": true,
"x-omitempty": true,
"$ref": "#/definitions/repo.ItemSummary"
"x-omitempty": true
},
"purchaseFrom": {
"type": "string"
@@ -2056,6 +2102,19 @@
}
}
},
"repo.ItemPatch": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"quantity": {
"type": "integer",
"x-nullable": true,
"x-omitempty": true
}
}
},
"repo.ItemSummary": {
"type": "object",
"properties": {
@@ -2071,6 +2130,9 @@
"id": {
"type": "string"
},
"imageId": {
"type": "string"
},
"insured": {
"type": "boolean"
},
@@ -2082,9 +2144,13 @@
},
"location": {
"description": "Edges",
"allOf": [
{
"$ref": "#/definitions/repo.LocationSummary"
}
],
"x-nullable": true,
"x-omitempty": true,
"$ref": "#/definitions/repo.LocationSummary"
"x-omitempty": true
},
"name": {
"type": "string"
@@ -2108,7 +2174,8 @@
"type": "boolean"
},
"assetId": {
"type": "string"
"type": "string",
"example": "0"
},
"description": {
"type": "string"
@@ -2193,22 +2260,27 @@
"type": "string"
},
"warrantyExpires": {
"description": "Sold",
"type": "string"
}
}
},
"repo.LabelCreate": {
"type": "object",
"required": [
"name"
],
"properties": {
"color": {
"type": "string"
},
"description": {
"type": "string"
"type": "string",
"maxLength": 255
},
"name": {
"type": "string"
"type": "string",
"maxLength": 255,
"minLength": 1
}
}
},
@@ -2224,12 +2296,6 @@
"id": {
"type": "string"
},
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.ItemSummary"
}
},
"name": {
"type": "string"
},
@@ -2291,12 +2357,6 @@
"id": {
"type": "string"
},
"items": {
"type": "array",
"items": {
"$ref": "#/definitions/repo.ItemSummary"
}
},
"name": {
"type": "string"
},
@@ -2373,7 +2433,6 @@
"type": "object",
"properties": {
"completedDate": {
"description": "Sold",
"type": "string"
},
"cost": {
@@ -2390,16 +2449,17 @@
"type": "string"
},
"scheduledDate": {
"description": "Sold",
"type": "string"
}
}
},
"repo.MaintenanceEntryCreate": {
"type": "object",
"required": [
"name"
],
"properties": {
"completedDate": {
"description": "Sold",
"type": "string"
},
"cost": {
@@ -2413,7 +2473,6 @@
"type": "string"
},
"scheduledDate": {
"description": "Sold",
"type": "string"
}
}
@@ -2422,7 +2481,6 @@
"type": "object",
"properties": {
"completedDate": {
"description": "Sold",
"type": "string"
},
"cost": {
@@ -2436,7 +2494,6 @@
"type": "string"
},
"scheduledDate": {
"description": "Sold",
"type": "string"
}
}
@@ -2655,39 +2712,6 @@
}
}
},
"server.ErrorResponse": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"fields": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
},
"server.Result": {
"type": "object",
"properties": {
"details": {},
"error": {
"type": "boolean"
},
"item": {},
"message": {
"type": "string"
}
}
},
"server.Results": {
"type": "object",
"properties": {
"items": {}
}
},
"services.UserRegistration": {
"type": "object",
"properties": {
@@ -2783,12 +2807,17 @@
},
"v1.GroupInvitationCreate": {
"type": "object",
"required": [
"uses"
],
"properties": {
"expiresAt": {
"type": "string"
},
"uses": {
"type": "integer"
"type": "integer",
"maximum": 100,
"minimum": 1
}
}
},
@@ -2800,6 +2829,20 @@
}
}
},
"v1.LoginForm": {
"type": "object",
"properties": {
"password": {
"type": "string"
},
"stayLoggedIn": {
"type": "boolean"
},
"username": {
"type": "string"
}
}
},
"v1.TokenResponse": {
"type": "object",
"properties": {
@@ -2813,6 +2856,23 @@
"type": "string"
}
}
},
"v1.Wrapped": {
"type": "object",
"properties": {
"item": {}
}
},
"validate.ErrorResponse": {
"type": "object",
"properties": {
"error": {
"type": "string"
},
"fields": {
"type": "string"
}
}
}
},
"securityDefinitions": {

View File

@@ -52,6 +52,8 @@ definitions:
$ref: '#/definitions/repo.DocumentOut'
id:
type: string
primary:
type: boolean
type:
type: string
updatedAt:
@@ -59,6 +61,8 @@ definitions:
type: object
repo.ItemAttachmentUpdate:
properties:
primary:
type: boolean
title:
type: string
type:
@@ -67,6 +71,7 @@ definitions:
repo.ItemCreate:
properties:
description:
maxLength: 1000
type: string
labelIds:
items:
@@ -76,10 +81,14 @@ definitions:
description: Edges
type: string
name:
maxLength: 255
minLength: 1
type: string
parentId:
type: string
x-nullable: true
required:
- name
type: object
repo.ItemField:
properties:
@@ -121,6 +130,8 @@ definitions:
type: array
id:
type: string
imageId:
type: string
insured:
type: boolean
labels:
@@ -131,7 +142,8 @@ definitions:
description: Warranty
type: boolean
location:
$ref: '#/definitions/repo.LocationSummary'
allOf:
- $ref: '#/definitions/repo.LocationSummary'
description: Edges
x-nullable: true
x-omitempty: true
@@ -145,7 +157,8 @@ definitions:
description: Extras
type: string
parent:
$ref: '#/definitions/repo.ItemSummary'
allOf:
- $ref: '#/definitions/repo.ItemSummary'
x-nullable: true
x-omitempty: true
purchaseFrom:
@@ -177,6 +190,15 @@ definitions:
warrantyExpires:
type: string
type: object
repo.ItemPatch:
properties:
id:
type: string
quantity:
type: integer
x-nullable: true
x-omitempty: true
type: object
repo.ItemSummary:
properties:
archived:
@@ -187,6 +209,8 @@ definitions:
type: string
id:
type: string
imageId:
type: string
insured:
type: boolean
labels:
@@ -194,7 +218,8 @@ definitions:
$ref: '#/definitions/repo.LabelSummary'
type: array
location:
$ref: '#/definitions/repo.LocationSummary'
allOf:
- $ref: '#/definitions/repo.LocationSummary'
description: Edges
x-nullable: true
x-omitempty: true
@@ -213,6 +238,7 @@ definitions:
archived:
type: boolean
assetId:
example: "0"
type: string
description:
type: string
@@ -273,7 +299,6 @@ definitions:
warrantyDetails:
type: string
warrantyExpires:
description: Sold
type: string
type: object
repo.LabelCreate:
@@ -281,9 +306,14 @@ definitions:
color:
type: string
description:
maxLength: 255
type: string
name:
maxLength: 255
minLength: 1
type: string
required:
- name
type: object
repo.LabelOut:
properties:
@@ -293,10 +323,6 @@ definitions:
type: string
id:
type: string
items:
items:
$ref: '#/definitions/repo.ItemSummary'
type: array
name:
type: string
updatedAt:
@@ -337,10 +363,6 @@ definitions:
type: string
id:
type: string
items:
items:
$ref: '#/definitions/repo.ItemSummary'
type: array
name:
type: string
parent:
@@ -391,7 +413,6 @@ definitions:
repo.MaintenanceEntry:
properties:
completedDate:
description: Sold
type: string
cost:
example: "0"
@@ -403,13 +424,11 @@ definitions:
name:
type: string
scheduledDate:
description: Sold
type: string
type: object
repo.MaintenanceEntryCreate:
properties:
completedDate:
description: Sold
type: string
cost:
example: "0"
@@ -419,13 +438,13 @@ definitions:
name:
type: string
scheduledDate:
description: Sold
type: string
required:
- name
type: object
repo.MaintenanceEntryUpdate:
properties:
completedDate:
description: Sold
type: string
cost:
example: "0"
@@ -435,7 +454,6 @@ definitions:
name:
type: string
scheduledDate:
description: Sold
type: string
type: object
repo.MaintenanceLog:
@@ -579,28 +597,6 @@ definitions:
value:
type: number
type: object
server.ErrorResponse:
properties:
error:
type: string
fields:
additionalProperties:
type: string
type: object
type: object
server.Result:
properties:
details: {}
error:
type: boolean
item: {}
message:
type: string
type: object
server.Results:
properties:
items: {}
type: object
services.UserRegistration:
properties:
email:
@@ -666,13 +662,26 @@ definitions:
expiresAt:
type: string
uses:
maximum: 100
minimum: 1
type: integer
required:
- uses
type: object
v1.ItemAttachmentToken:
properties:
token:
type: string
type: object
v1.LoginForm:
properties:
password:
type: string
stayLoggedIn:
type: boolean
username:
type: string
type: object
v1.TokenResponse:
properties:
attachmentToken:
@@ -682,12 +691,21 @@ definitions:
token:
type: string
type: object
v1.Wrapped:
properties:
item: {}
type: object
validate.ErrorResponse:
properties:
error:
type: string
fields:
type: string
type: object
info:
contact:
name: Don't
description: Track, Manage, and Organize your Shit.
license:
name: MIT
description: Track, Manage, and Organize your Things.
title: Homebox API
version: "1.0"
paths:
@@ -721,6 +739,21 @@ paths:
summary: Ensures Import Refs
tags:
- Actions
/v1/actions/set-primary-photos:
post:
description: Sets the first photo of each item as the primary photo
produces:
- application/json
responses:
"200":
description: OK
schema:
$ref: '#/definitions/v1.ActionAmountResult'
security:
- Bearer: []
summary: Set Primary Photos
tags:
- Actions
/v1/actions/zero-item-time-fields:
post:
description: Resets all item date fields to the beginning of the day
@@ -932,8 +965,8 @@ paths:
produces:
- application/json
responses:
"200":
description: OK
"201":
description: Created
schema:
$ref: '#/definitions/repo.ItemSummary'
security:
@@ -978,6 +1011,31 @@ paths:
summary: Get Item
tags:
- Items
patch:
parameters:
- description: Item ID
in: path
name: id
required: true
type: string
- description: Item Data
in: body
name: payload
required: true
schema:
$ref: '#/definitions/repo.ItemPatch'
produces:
- application/json
responses:
"200":
description: OK
schema:
$ref: '#/definitions/repo.ItemOut'
security:
- Bearer: []
summary: Update Item
tags:
- Items
put:
parameters:
- description: Item ID
@@ -1036,7 +1094,7 @@ paths:
"422":
description: Unprocessable Entity
schema:
$ref: '#/definitions/server.ErrorResponse'
$ref: '#/definitions/validate.ErrorResponse'
security:
- Bearer: []
summary: Create Item Attachment
@@ -1140,8 +1198,8 @@ paths:
produces:
- application/json
responses:
"200":
description: OK
"201":
description: Created
schema:
$ref: '#/definitions/repo.MaintenanceEntry'
security:
@@ -1251,14 +1309,9 @@ paths:
"200":
description: OK
schema:
allOf:
- $ref: '#/definitions/server.Results'
- properties:
items:
items:
$ref: '#/definitions/repo.LabelOut'
type: array
type: object
items:
$ref: '#/definitions/repo.LabelOut'
type: array
security:
- Bearer: []
summary: Get All Labels
@@ -1353,14 +1406,9 @@ paths:
"200":
description: OK
schema:
allOf:
- $ref: '#/definitions/server.Results'
- properties:
items:
items:
$ref: '#/definitions/repo.LocationOutCount'
type: array
type: object
items:
$ref: '#/definitions/repo.LocationOutCount'
type: array
security:
- Bearer: []
summary: Get All Locations
@@ -1461,14 +1509,9 @@ paths:
"200":
description: OK
schema:
allOf:
- $ref: '#/definitions/server.Results'
- properties:
items:
items:
$ref: '#/definitions/repo.TreeItem'
type: array
type: object
items:
$ref: '#/definitions/repo.TreeItem'
type: array
security:
- Bearer: []
summary: Get Locations Tree
@@ -1482,14 +1525,9 @@ paths:
"200":
description: OK
schema:
allOf:
- $ref: '#/definitions/server.Results'
- properties:
items:
items:
$ref: '#/definitions/repo.NotifierOut'
type: array
type: object
items:
$ref: '#/definitions/repo.NotifierOut'
type: array
security:
- Bearer: []
summary: Get Notifiers
@@ -1655,6 +1693,12 @@ paths:
in: formData
name: password
type: string
- description: Login Data
in: body
name: payload
required: true
schema:
$ref: '#/definitions/v1.LoginForm'
produces:
- application/json
responses:
@@ -1725,7 +1769,7 @@ paths:
description: OK
schema:
allOf:
- $ref: '#/definitions/server.Result'
- $ref: '#/definitions/v1.Wrapped'
- properties:
item:
$ref: '#/definitions/repo.UserOut'
@@ -1750,7 +1794,7 @@ paths:
description: OK
schema:
allOf:
- $ref: '#/definitions/server.Result'
- $ref: '#/definitions/v1.Wrapped'
- properties:
item:
$ref: '#/definitions/repo.UserUpdate'

View File

@@ -54,6 +54,7 @@ func main() {
NewReReplace(` Services`, " "),
NewReReplace(` V1`, " "),
NewReReplace(`\?:`, ":"),
NewReReplace(`(\w+):\s(.*null.*)`, "$1?: $2"), // make null union types optional
NewReDate("createdAt"),
NewReDate("updatedAt"),
NewReDate("soldTime"),

View File

@@ -1,60 +1,79 @@
module github.com/hay-kot/homebox/backend
go 1.19
go 1.20
require (
ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb
entgo.io/ent v0.11.8
github.com/ardanlabs/conf/v3 v3.1.4
github.com/containrrr/shoutrrr v0.7.1
github.com/go-chi/chi/v5 v5.0.8
github.com/go-playground/validator/v10 v10.11.2
github.com/gocarina/gocsv v0.0.0-20230219202803-bcce7dc8d0bb
github.com/google/uuid v1.3.0
github.com/gorilla/schema v1.2.0
github.com/mattn/go-sqlite3 v1.14.16
github.com/rs/zerolog v1.29.0
github.com/stretchr/testify v1.8.2
github.com/swaggo/http-swagger v1.3.3
github.com/swaggo/swag v1.8.10
github.com/yeqown/go-qrcode/v2 v2.2.1
github.com/yeqown/go-qrcode/writer/standard v1.2.1
golang.org/x/crypto v0.7.0
ariga.io/atlas v0.15.0
entgo.io/ent v0.12.5
github.com/ardanlabs/conf/v3 v3.1.7
github.com/containrrr/shoutrrr v0.8.0
github.com/go-chi/chi/v5 v5.0.10
github.com/go-playground/validator/v10 v10.16.0
github.com/gocarina/gocsv v0.0.0-20230616125104-99d496ca653d
github.com/google/uuid v1.4.0
github.com/gorilla/schema v1.2.1
github.com/hay-kot/httpkit v0.0.3
github.com/mattn/go-sqlite3 v1.14.18
github.com/olahol/melody v1.1.4
github.com/pkg/errors v0.9.1
github.com/rs/zerolog v1.31.0
github.com/stretchr/testify v1.8.4
github.com/swaggo/http-swagger v1.3.4
github.com/swaggo/swag v1.16.2
github.com/yeqown/go-qrcode/v2 v2.2.2
github.com/yeqown/go-qrcode/writer/standard v1.2.2
golang.org/x/crypto v0.15.0
modernc.org/sqlite v1.27.0
)
require (
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fatih/color v1.13.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/fogleman/gg v1.3.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/go-openapi/inflect v0.19.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/spec v0.20.7 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-openapi/jsonpointer v0.20.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/spec v0.20.9 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/hashicorp/hcl/v2 v2.15.0 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/hashicorp/hcl/v2 v2.19.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.17 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/swaggo/files v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rogpeppe/go-internal v1.11.0 // indirect
github.com/swaggo/files v1.0.1 // indirect
github.com/yeqown/reedsolomon v1.0.0 // indirect
github.com/zclconf/go-cty v1.12.1 // indirect
golang.org/x/image v0.0.0-20200927104501-e162460cd6b5 // indirect
golang.org/x/mod v0.8.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/tools v0.6.0 // indirect
github.com/zclconf/go-cty v1.14.1 // indirect
golang.org/x/image v0.14.0 // indirect
golang.org/x/mod v0.14.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/sys v0.14.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.15.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
lukechampine.com/uint128 v1.3.0 // indirect
modernc.org/cc/v3 v3.41.0 // indirect
modernc.org/ccgo/v3 v3.16.15 // indirect
modernc.org/libc v1.34.4 // indirect
modernc.org/mathutil v1.6.0 // indirect
modernc.org/memory v1.7.2 // indirect
modernc.org/opt v0.1.3 // indirect
modernc.org/strutil v1.2.0 // indirect
modernc.org/token v1.1.0 // indirect
)

File diff suppressed because it is too large Load Diff

View File

@@ -5,9 +5,10 @@ import (
)
type AllServices struct {
User *UserService
Group *GroupService
Items *ItemService
User *UserService
Group *GroupService
Items *ItemService
BackgroundService *BackgroundService
}
type OptionsFunc func(*options)
@@ -42,5 +43,6 @@ func New(repos *repo.AllRepos, opts ...OptionsFunc) *AllServices {
repo: repos,
autoIncrementAssetID: options.autoIncrementAssetID,
},
BackgroundService: &BackgroundService{repos},
}
}

View File

@@ -3,11 +3,10 @@ package services
import (
"context"
"log"
"math/rand"
"os"
"testing"
"time"
"github.com/hay-kot/homebox/backend/internal/core/services/reporting/eventbus"
"github.com/hay-kot/homebox/backend/internal/data/ent"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/pkgs/faker"
@@ -15,7 +14,8 @@ import (
)
var (
fk = faker.NewFaker()
fk = faker.NewFaker()
tbus = eventbus.New()
tCtx = Context{}
tClient *ent.Client
@@ -49,8 +49,6 @@ func bootstrap() {
}
func TestMain(m *testing.M) {
rand.Seed(int64(time.Now().Unix()))
client, err := ent.Open("sqlite3", "file:ent?mode=memory&cache=shared&_fk=1")
if err != nil {
log.Fatalf("failed opening connection to sqlite: %v", err)
@@ -62,7 +60,7 @@ func TestMain(m *testing.M) {
}
tClient = client
tRepos = repo.New(tClient, os.TempDir()+"/homebox")
tRepos = repo.New(tClient, tbus, os.TempDir()+"/homebox")
tSvc = New(tRepos)
defer client.Close()

View File

@@ -1,7 +0,0 @@
Import Ref,Location,Labels,Quantity,Name,Description,Insured,Serial Number,Mode Number,Manufacturer,Notes,Purchase From,Purchased Price,Purchased Time,Lifetime Warranty,Warranty Expires,Warranty Details,Sold To,Sold Price,Sold Time,Sold Notes
A,Garage,IOT;Home Assistant; Z-Wave,1,Zooz Universal Relay ZEN17,Description 1,TRUE,,ZEN17,Zooz,,Amazon,39.95,10/13/2021,,10/13/2021,,,,10/13/2021,
B,Living Room,IOT;Home Assistant; Z-Wave,1,Zooz Motion Sensor,Description 2,FALSE,,ZSE18,Zooz,,Amazon,29.95,10/15/2021,,10/15/2021,,,,10/15/2021,
C,Office,IOT;Home Assistant; Z-Wave,1,Zooz 110v Power Switch,Description 3,TRUE,,ZEN15,Zooz,,Amazon,39.95,10/13/2021,,10/13/2021,,,,10/13/2021,
D,Downstairs,IOT;Home Assistant; Z-Wave,1,Ecolink Z-Wave PIR Motion Sensor,Description 4,FALSE,,PIRZWAVE2.5-ECO,Ecolink,,Amazon,35.58,10/21/2020,,10/21/2020,,,,10/21/2020,
E,Entry,IOT;Home Assistant; Z-Wave,1,Yale Security Touchscreen Deadbolt,Description 5,TRUE,,YRD226ZW2619,Yale,,Amazon,120.39,10/14/2020,,10/14/2020,,,,10/14/2020,
F,Kitchen,IOT;Home Assistant; Z-Wave,1,Smart Rocker Light Dimmer,Description 6,FALSE,,39351,Honeywell,,Amazon,65.98,09/30/2020,,09/30/2020,,,,09/30/2020,
1 Import Ref Location Labels Quantity Name Description Insured Serial Number Mode Number Manufacturer Notes Purchase From Purchased Price Purchased Time Lifetime Warranty Warranty Expires Warranty Details Sold To Sold Price Sold Time Sold Notes
2 A Garage IOT;Home Assistant; Z-Wave 1 Zooz Universal Relay ZEN17 Description 1 TRUE ZEN17 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021
3 B Living Room IOT;Home Assistant; Z-Wave 1 Zooz Motion Sensor Description 2 FALSE ZSE18 Zooz Amazon 29.95 10/15/2021 10/15/2021 10/15/2021
4 C Office IOT;Home Assistant; Z-Wave 1 Zooz 110v Power Switch Description 3 TRUE ZEN15 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021
5 D Downstairs IOT;Home Assistant; Z-Wave 1 Ecolink Z-Wave PIR Motion Sensor Description 4 FALSE PIRZWAVE2.5-ECO Ecolink Amazon 35.58 10/21/2020 10/21/2020 10/21/2020
6 E Entry IOT;Home Assistant; Z-Wave 1 Yale Security Touchscreen Deadbolt Description 5 TRUE YRD226ZW2619 Yale Amazon 120.39 10/14/2020 10/14/2020 10/14/2020
7 F Kitchen IOT;Home Assistant; Z-Wave 1 Smart Rocker Light Dimmer Description 6 FALSE 39351 Honeywell Amazon 65.98 09/30/2020 09/30/2020 09/30/2020

View File

@@ -1,7 +0,0 @@
Import Ref Location Labels Quantity Name Description Insured Serial Number Mode Number Manufacturer Notes Purchase From Purchased Price Purchased Time Lifetime Warranty Warranty Expires Warranty Details Sold To Sold Price Sold Time Sold Notes
A Garage IOT;Home Assistant; Z-Wave 1 Zooz Universal Relay ZEN17 Description 1 TRUE ZEN17 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021
B Living Room IOT;Home Assistant; Z-Wave 1 Zooz Motion Sensor Description 2 FALSE ZSE18 Zooz Amazon 29.95 10/15/2021 10/15/2021 10/15/2021
C Office IOT;Home Assistant; Z-Wave 1 Zooz 110v Power Switch Description 3 TRUE ZEN15 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021
D Downstairs IOT;Home Assistant; Z-Wave 1 Ecolink Z-Wave PIR Motion Sensor Description 4 FALSE PIRZWAVE2.5-ECO Ecolink Amazon 35.58 10/21/2020 10/21/2020 10/21/2020
E Entry IOT;Home Assistant; Z-Wave 1 Yale Security Touchscreen Deadbolt Description 5 TRUE YRD226ZW2619 Yale Amazon 120.39 10/14/2020 10/14/2020 10/14/2020
F Kitchen IOT;Home Assistant; Z-Wave 1 Smart Rocker Light Dimmer Description 6 FALSE 39351 Honeywell Amazon 65.98 09/30/2020 09/30/2020 09/30/2020
1 Import Ref Location Labels Quantity Name Description Insured Serial Number Mode Number Manufacturer Notes Purchase From Purchased Price Purchased Time Lifetime Warranty Warranty Expires Warranty Details Sold To Sold Price Sold Time Sold Notes
2 A Garage IOT;Home Assistant; Z-Wave 1 Zooz Universal Relay ZEN17 Description 1 TRUE ZEN17 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021
3 B Living Room IOT;Home Assistant; Z-Wave 1 Zooz Motion Sensor Description 2 FALSE ZSE18 Zooz Amazon 29.95 10/15/2021 10/15/2021 10/15/2021
4 C Office IOT;Home Assistant; Z-Wave 1 Zooz 110v Power Switch Description 3 TRUE ZEN15 Zooz Amazon 39.95 10/13/2021 10/13/2021 10/13/2021
5 D Downstairs IOT;Home Assistant; Z-Wave 1 Ecolink Z-Wave PIR Motion Sensor Description 4 FALSE PIRZWAVE2.5-ECO Ecolink Amazon 35.58 10/21/2020 10/21/2020 10/21/2020
6 E Entry IOT;Home Assistant; Z-Wave 1 Yale Security Touchscreen Deadbolt Description 5 TRUE YRD226ZW2619 Yale Amazon 120.39 10/14/2020 10/14/2020 10/14/2020
7 F Kitchen IOT;Home Assistant; Z-Wave 1 Smart Rocker Light Dimmer Description 6 FALSE 39351 Honeywell Amazon 65.98 09/30/2020 09/30/2020 09/30/2020

View File

@@ -0,0 +1,85 @@
// / Package eventbus provides an interface for event bus.
package eventbus
import (
"sync"
"github.com/google/uuid"
)
type Event string
const (
EventLabelMutation Event = "label.mutation"
EventLocationMutation Event = "location.mutation"
EventItemMutation Event = "item.mutation"
)
type GroupMutationEvent struct {
GID uuid.UUID
}
type eventData struct {
event Event
data any
}
type EventBus struct {
started bool
ch chan eventData
mu sync.RWMutex
subscribers map[Event][]func(any)
}
func New() *EventBus {
return &EventBus{
ch: make(chan eventData, 10),
subscribers: map[Event][]func(any){
EventLabelMutation: {},
EventLocationMutation: {},
EventItemMutation: {},
},
}
}
func (e *EventBus) Run() {
if e.started {
panic("event bus already started")
}
e.started = true
for event := range e.ch {
e.mu.RLock()
arr, ok := e.subscribers[event.event]
e.mu.RUnlock()
if !ok {
continue
}
for _, fn := range arr {
fn(event.data)
}
}
}
func (e *EventBus) Publish(event Event, data any) {
e.ch <- eventData{
event: event,
data: data,
}
}
func (e *EventBus) Subscribe(event Event, fn func(any)) {
e.mu.Lock()
defer e.mu.Unlock()
arr, ok := e.subscribers[event]
if !ok {
panic("event not found")
}
e.subscribers[event] = append(arr, fn)
}

View File

@@ -83,3 +83,13 @@ func parseLocationString(s string) LocationString {
func (csf LocationString) String() string {
return strings.Join(csf, " / ")
}
func fromPathSlice(s []repo.LocationPath) LocationString {
v := make(LocationString, len(s))
for i := range s {
v[i] = s[i].Name
}
return v
}

View File

@@ -1,6 +1,7 @@
package reporting
import (
"context"
"fmt"
"io"
"reflect"
@@ -8,6 +9,7 @@ import (
"strconv"
"strings"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/data/types"
"github.com/rs/zerolog/log"
@@ -151,7 +153,7 @@ func (s *IOSheet) Read(data io.Reader) error {
}
// Write writes the sheet to a writer.
func (s *IOSheet) ReadItems(items []repo.ItemOut) {
func (s *IOSheet) ReadItems(ctx context.Context, items []repo.ItemOut, GID uuid.UUID, repos *repo.AllRepos) error {
s.Rows = make([]ExportTSVRow, len(items))
extraHeaders := map[string]struct{}{}
@@ -160,7 +162,15 @@ func (s *IOSheet) ReadItems(items []repo.ItemOut) {
item := items[i]
// TODO: Support fetching nested locations
locString := LocationString{item.Location.Name}
locId := item.Location.ID
locPaths, err := repos.Locations.PathForLoc(context.Background(), GID, locId)
if err != nil {
log.Error().Err(err).Msg("could not get location path")
return err
}
locString := fromPathSlice(locPaths)
labelString := make([]string, len(item.Labels))
@@ -238,6 +248,8 @@ func (s *IOSheet) ReadItems(items []repo.ItemOut) {
for _, h := range customHeaders {
s.headers = append(s.headers, "HB.field."+h)
}
return nil
}
// Writes the current sheet to a writer in TSV format.

View File

@@ -20,12 +20,6 @@ var (
//go:embed .testdata/import/types.csv
customTypesImportCSV []byte
//go:embed .testdata/import.csv
CSVData_Comma []byte
//go:embed .testdata/import.tsv
CSVData_Tab []byte
)
func TestSheet_Read(t *testing.T) {
@@ -189,7 +183,7 @@ func Test_determineSeparator(t *testing.T) {
{
name: "comma",
args: args{
data: CSVData_Comma,
data: []byte("a,b,c"),
},
want: ',',
wantErr: false,
@@ -197,7 +191,7 @@ func Test_determineSeparator(t *testing.T) {
{
name: "tab",
args: args{
data: CSVData_Tab,
data: []byte("a\tb\tc"),
},
want: '\t',
wantErr: false,

View File

@@ -0,0 +1,81 @@
package services
import (
"context"
"strings"
"time"
"github.com/containrrr/shoutrrr"
"github.com/hay-kot/homebox/backend/internal/data/repo"
"github.com/hay-kot/homebox/backend/internal/data/types"
"github.com/rs/zerolog/log"
)
type BackgroundService struct {
repos *repo.AllRepos
}
func (svc *BackgroundService) SendNotifiersToday(ctx context.Context) error {
// Get All Groups
groups, err := svc.repos.Groups.GetAllGroups(ctx)
if err != nil {
return err
}
today := types.DateFromTime(time.Now())
for i := range groups {
group := groups[i]
entries, err := svc.repos.MaintEntry.GetScheduled(ctx, group.ID, today)
if err != nil {
return err
}
if len(entries) == 0 {
log.Debug().
Str("group_name", group.Name).
Str("group_id", group.ID.String()).
Msg("No scheduled maintenance for today")
continue
}
notifiers, err := svc.repos.Notifiers.GetByGroup(ctx, group.ID)
if err != nil {
return err
}
urls := make([]string, len(notifiers))
for i := range notifiers {
urls[i] = notifiers[i].URL
}
bldr := strings.Builder{}
bldr.WriteString("Homebox Maintenance for (")
bldr.WriteString(today.String())
bldr.WriteString("):\n")
for i := range entries {
entry := entries[i]
bldr.WriteString(" - ")
bldr.WriteString(entry.Name)
bldr.WriteString("\n")
}
var sendErrs []error
for i := range urls {
err := shoutrrr.Send(urls[i], bldr.String())
if err != nil {
sendErrs = append(sendErrs, err)
}
}
if len(sendErrs) > 0 {
return sendErrs[0]
}
}
return nil
}

View File

@@ -337,7 +337,10 @@ func (svc *ItemService) ExportTSV(ctx context.Context, GID uuid.UUID) ([][]strin
sheet := reporting.IOSheet{}
sheet.ReadItems(items)
err = sheet.ReadItems(ctx, items, GID, svc.repo)
if err != nil {
return nil, err
}
return sheet.TSV()
}

View File

@@ -23,7 +23,7 @@ func (svc *ItemService) AttachmentPath(ctx context.Context, attachmentId uuid.UU
func (svc *ItemService) AttachmentUpdate(ctx Context, itemId uuid.UUID, data *repo.ItemAttachmentUpdate) (repo.ItemOut, error) {
// Update Attachment
attachment, err := svc.repo.Attachments.Update(ctx, data.ID, attachment.Type(data.Type))
attachment, err := svc.repo.Attachments.Update(ctx, data.ID, data)
if err != nil {
return repo.ItemOut{}, err
}

View File

@@ -61,6 +61,7 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration)
switch data.GroupToken {
case "":
log.Debug().Msg("creating new group")
creatingGroup = true
group, err = svc.repos.Groups.GroupCreate(ctx, "Home")
if err != nil {
@@ -68,6 +69,7 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration)
return repo.UserOut{}, err
}
default:
log.Debug().Msg("joining existing group")
token, err = svc.repos.Groups.InvitationGet(ctx, hasher.HashToken(data.GroupToken))
if err != nil {
log.Err(err).Msg("Failed to get invitation token")
@@ -94,14 +96,14 @@ func (svc *UserService) RegisterUser(ctx context.Context, data UserRegistration)
// Create the default labels and locations for the group.
if creatingGroup {
for _, label := range defaultLabels() {
_, err := svc.repos.Labels.Create(ctx, group.ID, label)
_, err := svc.repos.Labels.Create(ctx, usr.GroupID, label)
if err != nil {
return repo.UserOut{}, err
}
}
for _, location := range defaultLocations() {
_, err := svc.repos.Locations.Create(ctx, group.ID, location)
_, err := svc.repos.Locations.Create(ctx, usr.GroupID, location)
if err != nil {
return repo.UserOut{}, err
}
@@ -138,12 +140,18 @@ func (svc *UserService) UpdateSelf(ctx context.Context, ID uuid.UUID, data repo.
// ============================================================================
// User Authentication
func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID) (UserAuthTokenDetail, error) {
func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID, extendedSession bool) (UserAuthTokenDetail, error) {
attachmentToken := hasher.GenerateToken()
expiresAt := time.Now().Add(oneWeek)
if extendedSession {
expiresAt = time.Now().Add(oneWeek * 4)
}
attachmentData := repo.UserAuthTokenCreate{
UserID: userId,
TokenHash: attachmentToken.Hash,
ExpiresAt: time.Now().Add(oneWeek),
ExpiresAt: expiresAt,
}
_, err := svc.repos.AuthTokens.CreateToken(ctx, attachmentData, authroles.RoleAttachments)
@@ -155,7 +163,7 @@ func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID
data := repo.UserAuthTokenCreate{
UserID: userId,
TokenHash: userToken.Hash,
ExpiresAt: time.Now().Add(oneWeek),
ExpiresAt: expiresAt,
}
created, err := svc.repos.AuthTokens.CreateToken(ctx, data, authroles.RoleUser)
@@ -170,7 +178,7 @@ func (svc *UserService) createSessionToken(ctx context.Context, userId uuid.UUID
}, nil
}
func (svc *UserService) Login(ctx context.Context, username, password string) (UserAuthTokenDetail, error) {
func (svc *UserService) Login(ctx context.Context, username, password string, extendedSession bool) (UserAuthTokenDetail, error) {
usr, err := svc.repos.Users.GetOneEmail(ctx, username)
if err != nil {
// SECURITY: Perform hash to ensure response times are the same
@@ -182,7 +190,7 @@ func (svc *UserService) Login(ctx context.Context, username, password string) (U
return UserAuthTokenDetail{}, ErrorInvalidLogin
}
return svc.createSessionToken(ctx, usr.ID)
return svc.createSessionToken(ctx, usr.ID, extendedSession)
}
func (svc *UserService) Logout(ctx context.Context, token string) error {
@@ -199,7 +207,7 @@ func (svc *UserService) RenewToken(ctx context.Context, token string) (UserAuthT
return UserAuthTokenDetail{}, ErrorInvalidToken
}
return svc.createSessionToken(ctx, dbToken.ID)
return svc.createSessionToken(ctx, dbToken.ID, false)
}
// DeleteSelf deletes the user that is currently logged based of the provided UUID

View File

@@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
@@ -25,11 +26,14 @@ type Attachment struct {
UpdatedAt time.Time `json:"updated_at,omitempty"`
// Type holds the value of the "type" field.
Type attachment.Type `json:"type,omitempty"`
// Primary holds the value of the "primary" field.
Primary bool `json:"primary,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the AttachmentQuery when eager-loading is set.
Edges AttachmentEdges `json:"edges"`
document_attachments *uuid.UUID
item_attachments *uuid.UUID
selectValues sql.SelectValues
}
// AttachmentEdges holds the relations/edges for other nodes in the graph.
@@ -74,6 +78,8 @@ func (*Attachment) scanValues(columns []string) ([]any, error) {
values := make([]any, len(columns))
for i := range columns {
switch columns[i] {
case attachment.FieldPrimary:
values[i] = new(sql.NullBool)
case attachment.FieldType:
values[i] = new(sql.NullString)
case attachment.FieldCreatedAt, attachment.FieldUpdatedAt:
@@ -85,7 +91,7 @@ func (*Attachment) scanValues(columns []string) ([]any, error) {
case attachment.ForeignKeys[1]: // item_attachments
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
return nil, fmt.Errorf("unexpected column %q for type Attachment", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -123,6 +129,12 @@ func (a *Attachment) assignValues(columns []string, values []any) error {
} else if value.Valid {
a.Type = attachment.Type(value.String)
}
case attachment.FieldPrimary:
if value, ok := values[i].(*sql.NullBool); !ok {
return fmt.Errorf("unexpected type %T for field primary", values[i])
} else if value.Valid {
a.Primary = value.Bool
}
case attachment.ForeignKeys[0]:
if value, ok := values[i].(*sql.NullScanner); !ok {
return fmt.Errorf("unexpected type %T for field document_attachments", values[i])
@@ -137,11 +149,19 @@ func (a *Attachment) assignValues(columns []string, values []any) error {
a.item_attachments = new(uuid.UUID)
*a.item_attachments = *value.S.(*uuid.UUID)
}
default:
a.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Attachment.
// This includes values selected through modifiers, order, etc.
func (a *Attachment) Value(name string) (ent.Value, error) {
return a.selectValues.Get(name)
}
// QueryItem queries the "item" edge of the Attachment entity.
func (a *Attachment) QueryItem() *ItemQuery {
return NewAttachmentClient(a.config).QueryItem(a)
@@ -183,6 +203,9 @@ func (a *Attachment) String() string {
builder.WriteString(", ")
builder.WriteString("type=")
builder.WriteString(fmt.Sprintf("%v", a.Type))
builder.WriteString(", ")
builder.WriteString("primary=")
builder.WriteString(fmt.Sprintf("%v", a.Primary))
builder.WriteByte(')')
return builder.String()
}

View File

@@ -6,6 +6,8 @@ import (
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -20,6 +22,8 @@ const (
FieldUpdatedAt = "updated_at"
// FieldType holds the string denoting the type field in the database.
FieldType = "type"
// FieldPrimary holds the string denoting the primary field in the database.
FieldPrimary = "primary"
// EdgeItem holds the string denoting the item edge name in mutations.
EdgeItem = "item"
// EdgeDocument holds the string denoting the document edge name in mutations.
@@ -48,6 +52,7 @@ var Columns = []string{
FieldCreatedAt,
FieldUpdatedAt,
FieldType,
FieldPrimary,
}
// ForeignKeys holds the SQL foreign-keys that are owned by the "attachments"
@@ -79,6 +84,8 @@ var (
DefaultUpdatedAt func() time.Time
// UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field.
UpdateDefaultUpdatedAt func() time.Time
// DefaultPrimary holds the default value on creation for the "primary" field.
DefaultPrimary bool
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
@@ -111,3 +118,59 @@ func TypeValidator(_type Type) error {
return fmt.Errorf("attachment: invalid enum value for type field: %q", _type)
}
}
// OrderOption defines the ordering options for the Attachment queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByType orders the results by the type field.
func ByType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldType, opts...).ToFunc()
}
// ByPrimary orders the results by the primary field.
func ByPrimary(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldPrimary, opts...).ToFunc()
}
// ByItemField orders the results by item field.
func ByItemField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newItemStep(), sql.OrderByField(field, opts...))
}
}
// ByDocumentField orders the results by document field.
func ByDocumentField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newDocumentStep(), sql.OrderByField(field, opts...))
}
}
func newItemStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(ItemInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
)
}
func newDocumentStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
)
}

View File

@@ -66,6 +66,11 @@ func UpdatedAt(v time.Time) predicate.Attachment {
return predicate.Attachment(sql.FieldEQ(FieldUpdatedAt, v))
}
// Primary applies equality check predicate on the "primary" field. It's identical to PrimaryEQ.
func Primary(v bool) predicate.Attachment {
return predicate.Attachment(sql.FieldEQ(FieldPrimary, v))
}
// CreatedAtEQ applies the EQ predicate on the "created_at" field.
func CreatedAtEQ(v time.Time) predicate.Attachment {
return predicate.Attachment(sql.FieldEQ(FieldCreatedAt, v))
@@ -166,6 +171,16 @@ func TypeNotIn(vs ...Type) predicate.Attachment {
return predicate.Attachment(sql.FieldNotIn(FieldType, vs...))
}
// PrimaryEQ applies the EQ predicate on the "primary" field.
func PrimaryEQ(v bool) predicate.Attachment {
return predicate.Attachment(sql.FieldEQ(FieldPrimary, v))
}
// PrimaryNEQ applies the NEQ predicate on the "primary" field.
func PrimaryNEQ(v bool) predicate.Attachment {
return predicate.Attachment(sql.FieldNEQ(FieldPrimary, v))
}
// HasItem applies the HasEdge predicate on the "item" edge.
func HasItem() predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
@@ -180,11 +195,7 @@ func HasItem() predicate.Attachment {
// HasItemWith applies the HasEdge predicate on the "item" edge with a given conditions (other predicates).
func HasItemWith(preds ...predicate.Item) predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(ItemInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, ItemTable, ItemColumn),
)
step := newItemStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -207,11 +218,7 @@ func HasDocument() predicate.Attachment {
// HasDocumentWith applies the HasEdge predicate on the "document" edge with a given conditions (other predicates).
func HasDocumentWith(preds ...predicate.Document) predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, DocumentTable, DocumentColumn),
)
step := newDocumentStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -222,32 +229,15 @@ func HasDocumentWith(preds ...predicate.Document) predicate.Attachment {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Attachment) predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
return predicate.Attachment(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Attachment) predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
return predicate.Attachment(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Attachment) predicate.Attachment {
return predicate.Attachment(func(s *sql.Selector) {
p(s.Not())
})
return predicate.Attachment(sql.NotPredicates(p))
}

View File

@@ -65,6 +65,20 @@ func (ac *AttachmentCreate) SetNillableType(a *attachment.Type) *AttachmentCreat
return ac
}
// SetPrimary sets the "primary" field.
func (ac *AttachmentCreate) SetPrimary(b bool) *AttachmentCreate {
ac.mutation.SetPrimary(b)
return ac
}
// SetNillablePrimary sets the "primary" field if the given value is not nil.
func (ac *AttachmentCreate) SetNillablePrimary(b *bool) *AttachmentCreate {
if b != nil {
ac.SetPrimary(*b)
}
return ac
}
// SetID sets the "id" field.
func (ac *AttachmentCreate) SetID(u uuid.UUID) *AttachmentCreate {
ac.mutation.SetID(u)
@@ -109,7 +123,7 @@ func (ac *AttachmentCreate) Mutation() *AttachmentMutation {
// Save creates the Attachment in the database.
func (ac *AttachmentCreate) Save(ctx context.Context) (*Attachment, error) {
ac.defaults()
return withHooks[*Attachment, AttachmentMutation](ctx, ac.sqlSave, ac.mutation, ac.hooks)
return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -148,6 +162,10 @@ func (ac *AttachmentCreate) defaults() {
v := attachment.DefaultType
ac.mutation.SetType(v)
}
if _, ok := ac.mutation.Primary(); !ok {
v := attachment.DefaultPrimary
ac.mutation.SetPrimary(v)
}
if _, ok := ac.mutation.ID(); !ok {
v := attachment.DefaultID()
ac.mutation.SetID(v)
@@ -170,6 +188,9 @@ func (ac *AttachmentCreate) check() error {
return &ValidationError{Name: "type", err: fmt.Errorf(`ent: validator failed for field "Attachment.type": %w`, err)}
}
}
if _, ok := ac.mutation.Primary(); !ok {
return &ValidationError{Name: "primary", err: errors.New(`ent: missing required field "Attachment.primary"`)}
}
if _, ok := ac.mutation.ItemID(); !ok {
return &ValidationError{Name: "item", err: errors.New(`ent: missing required edge "Attachment.item"`)}
}
@@ -223,6 +244,10 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
_spec.SetField(attachment.FieldType, field.TypeEnum, value)
_node.Type = value
}
if value, ok := ac.mutation.Primary(); ok {
_spec.SetField(attachment.FieldPrimary, field.TypeBool, value)
_node.Primary = value
}
if nodes := ac.mutation.ItemIDs(); len(nodes) > 0 {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
@@ -231,10 +256,7 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -251,10 +273,7 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -269,11 +288,15 @@ func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) {
// AttachmentCreateBulk is the builder for creating many Attachment entities in bulk.
type AttachmentCreateBulk struct {
config
err error
builders []*AttachmentCreate
}
// Save creates the Attachment entities in the database.
func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error) {
if acb.err != nil {
return nil, acb.err
}
specs := make([]*sqlgraph.CreateSpec, len(acb.builders))
nodes := make([]*Attachment, len(acb.builders))
mutators := make([]Mutator, len(acb.builders))
@@ -290,8 +313,8 @@ func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation)
} else {

View File

@@ -27,7 +27,7 @@ func (ad *AttachmentDelete) Where(ps ...predicate.Attachment) *AttachmentDelete
// Exec executes the deletion query and returns how many vertices were deleted.
func (ad *AttachmentDelete) Exec(ctx context.Context) (int, error) {
return withHooks[int, AttachmentMutation](ctx, ad.sqlExec, ad.mutation, ad.hooks)
return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks)
}
// ExecX is like Exec, but panics if an error occurs.

View File

@@ -21,7 +21,7 @@ import (
type AttachmentQuery struct {
config
ctx *QueryContext
order []OrderFunc
order []attachment.OrderOption
inters []Interceptor
predicates []predicate.Attachment
withItem *ItemQuery
@@ -58,7 +58,7 @@ func (aq *AttachmentQuery) Unique(unique bool) *AttachmentQuery {
}
// Order specifies how the records should be ordered.
func (aq *AttachmentQuery) Order(o ...OrderFunc) *AttachmentQuery {
func (aq *AttachmentQuery) Order(o ...attachment.OrderOption) *AttachmentQuery {
aq.order = append(aq.order, o...)
return aq
}
@@ -296,7 +296,7 @@ func (aq *AttachmentQuery) Clone() *AttachmentQuery {
return &AttachmentQuery{
config: aq.config,
ctx: aq.ctx.Clone(),
order: append([]OrderFunc{}, aq.order...),
order: append([]attachment.OrderOption{}, aq.order...),
inters: append([]Interceptor{}, aq.inters...),
predicates: append([]predicate.Attachment{}, aq.predicates...),
withItem: aq.withItem.Clone(),

View File

@@ -51,6 +51,20 @@ func (au *AttachmentUpdate) SetNillableType(a *attachment.Type) *AttachmentUpdat
return au
}
// SetPrimary sets the "primary" field.
func (au *AttachmentUpdate) SetPrimary(b bool) *AttachmentUpdate {
au.mutation.SetPrimary(b)
return au
}
// SetNillablePrimary sets the "primary" field if the given value is not nil.
func (au *AttachmentUpdate) SetNillablePrimary(b *bool) *AttachmentUpdate {
if b != nil {
au.SetPrimary(*b)
}
return au
}
// SetItemID sets the "item" edge to the Item entity by ID.
func (au *AttachmentUpdate) SetItemID(id uuid.UUID) *AttachmentUpdate {
au.mutation.SetItemID(id)
@@ -93,7 +107,7 @@ func (au *AttachmentUpdate) ClearDocument() *AttachmentUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (au *AttachmentUpdate) Save(ctx context.Context) (int, error) {
au.defaults()
return withHooks[int, AttachmentMutation](ctx, au.sqlSave, au.mutation, au.hooks)
return withHooks(ctx, au.sqlSave, au.mutation, au.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -160,6 +174,9 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
if value, ok := au.mutation.GetType(); ok {
_spec.SetField(attachment.FieldType, field.TypeEnum, value)
}
if value, ok := au.mutation.Primary(); ok {
_spec.SetField(attachment.FieldPrimary, field.TypeBool, value)
}
if au.mutation.ItemCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
@@ -168,10 +185,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -184,10 +198,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -203,10 +214,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -219,10 +227,7 @@ func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -270,6 +275,20 @@ func (auo *AttachmentUpdateOne) SetNillableType(a *attachment.Type) *AttachmentU
return auo
}
// SetPrimary sets the "primary" field.
func (auo *AttachmentUpdateOne) SetPrimary(b bool) *AttachmentUpdateOne {
auo.mutation.SetPrimary(b)
return auo
}
// SetNillablePrimary sets the "primary" field if the given value is not nil.
func (auo *AttachmentUpdateOne) SetNillablePrimary(b *bool) *AttachmentUpdateOne {
if b != nil {
auo.SetPrimary(*b)
}
return auo
}
// SetItemID sets the "item" edge to the Item entity by ID.
func (auo *AttachmentUpdateOne) SetItemID(id uuid.UUID) *AttachmentUpdateOne {
auo.mutation.SetItemID(id)
@@ -325,7 +344,7 @@ func (auo *AttachmentUpdateOne) Select(field string, fields ...string) *Attachme
// Save executes the query and returns the updated Attachment entity.
func (auo *AttachmentUpdateOne) Save(ctx context.Context) (*Attachment, error) {
auo.defaults()
return withHooks[*Attachment, AttachmentMutation](ctx, auo.sqlSave, auo.mutation, auo.hooks)
return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -409,6 +428,9 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
if value, ok := auo.mutation.GetType(); ok {
_spec.SetField(attachment.FieldType, field.TypeEnum, value)
}
if value, ok := auo.mutation.Primary(); ok {
_spec.SetField(attachment.FieldPrimary, field.TypeBool, value)
}
if auo.mutation.ItemCleared() {
edge := &sqlgraph.EdgeSpec{
Rel: sqlgraph.M2O,
@@ -417,10 +439,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -433,10 +452,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
Columns: []string{attachment.ItemColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -452,10 +468,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -468,10 +481,7 @@ func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment,
Columns: []string{attachment.DocumentColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"strings"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
@@ -23,6 +24,7 @@ type AuthRoles struct {
// The values are being populated by the AuthRolesQuery when eager-loading is set.
Edges AuthRolesEdges `json:"edges"`
auth_tokens_roles *uuid.UUID
selectValues sql.SelectValues
}
// AuthRolesEdges holds the relations/edges for other nodes in the graph.
@@ -59,7 +61,7 @@ func (*AuthRoles) scanValues(columns []string) ([]any, error) {
case authroles.ForeignKeys[0]: // auth_tokens_roles
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
return nil, fmt.Errorf("unexpected column %q for type AuthRoles", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -92,11 +94,19 @@ func (ar *AuthRoles) assignValues(columns []string, values []any) error {
ar.auth_tokens_roles = new(uuid.UUID)
*ar.auth_tokens_roles = *value.S.(*uuid.UUID)
}
default:
ar.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the AuthRoles.
// This includes values selected through modifiers, order, etc.
func (ar *AuthRoles) Value(name string) (ent.Value, error) {
return ar.selectValues.Get(name)
}
// QueryToken queries the "token" edge of the AuthRoles entity.
func (ar *AuthRoles) QueryToken() *AuthTokensQuery {
return NewAuthRolesClient(ar.config).QueryToken(ar)

View File

@@ -4,6 +4,9 @@ package authroles
import (
"fmt"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
const (
@@ -79,3 +82,30 @@ func RoleValidator(r Role) error {
return fmt.Errorf("authroles: invalid enum value for role field: %q", r)
}
}
// OrderOption defines the ordering options for the AuthRoles queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByRole orders the results by the role field.
func ByRole(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldRole, opts...).ToFunc()
}
// ByTokenField orders the results by token field.
func ByTokenField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newTokenStep(), sql.OrderByField(field, opts...))
}
}
func newTokenStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(TokenInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2O, true, TokenTable, TokenColumn),
)
}

View File

@@ -87,11 +87,7 @@ func HasToken() predicate.AuthRoles {
// HasTokenWith applies the HasEdge predicate on the "token" edge with a given conditions (other predicates).
func HasTokenWith(preds ...predicate.AuthTokens) predicate.AuthRoles {
return predicate.AuthRoles(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(TokenInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2O, true, TokenTable, TokenColumn),
)
step := newTokenStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -102,32 +98,15 @@ func HasTokenWith(preds ...predicate.AuthTokens) predicate.AuthRoles {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.AuthRoles) predicate.AuthRoles {
return predicate.AuthRoles(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
return predicate.AuthRoles(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.AuthRoles) predicate.AuthRoles {
return predicate.AuthRoles(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
return predicate.AuthRoles(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.AuthRoles) predicate.AuthRoles {
return predicate.AuthRoles(func(s *sql.Selector) {
p(s.Not())
})
return predicate.AuthRoles(sql.NotPredicates(p))
}

View File

@@ -62,7 +62,7 @@ func (arc *AuthRolesCreate) Mutation() *AuthRolesMutation {
// Save creates the AuthRoles in the database.
func (arc *AuthRolesCreate) Save(ctx context.Context) (*AuthRoles, error) {
arc.defaults()
return withHooks[*AuthRoles, AuthRolesMutation](ctx, arc.sqlSave, arc.mutation, arc.hooks)
return withHooks(ctx, arc.sqlSave, arc.mutation, arc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -143,10 +143,7 @@ func (arc *AuthRolesCreate) createSpec() (*AuthRoles, *sqlgraph.CreateSpec) {
Columns: []string{authroles.TokenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: authtokens.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -161,11 +158,15 @@ func (arc *AuthRolesCreate) createSpec() (*AuthRoles, *sqlgraph.CreateSpec) {
// AuthRolesCreateBulk is the builder for creating many AuthRoles entities in bulk.
type AuthRolesCreateBulk struct {
config
err error
builders []*AuthRolesCreate
}
// Save creates the AuthRoles entities in the database.
func (arcb *AuthRolesCreateBulk) Save(ctx context.Context) ([]*AuthRoles, error) {
if arcb.err != nil {
return nil, arcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(arcb.builders))
nodes := make([]*AuthRoles, len(arcb.builders))
mutators := make([]Mutator, len(arcb.builders))
@@ -182,8 +183,8 @@ func (arcb *AuthRolesCreateBulk) Save(ctx context.Context) ([]*AuthRoles, error)
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, arcb.builders[i+1].mutation)
} else {

View File

@@ -27,7 +27,7 @@ func (ard *AuthRolesDelete) Where(ps ...predicate.AuthRoles) *AuthRolesDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (ard *AuthRolesDelete) Exec(ctx context.Context) (int, error) {
return withHooks[int, AuthRolesMutation](ctx, ard.sqlExec, ard.mutation, ard.hooks)
return withHooks(ctx, ard.sqlExec, ard.mutation, ard.hooks)
}
// ExecX is like Exec, but panics if an error occurs.

View File

@@ -20,7 +20,7 @@ import (
type AuthRolesQuery struct {
config
ctx *QueryContext
order []OrderFunc
order []authroles.OrderOption
inters []Interceptor
predicates []predicate.AuthRoles
withToken *AuthTokensQuery
@@ -56,7 +56,7 @@ func (arq *AuthRolesQuery) Unique(unique bool) *AuthRolesQuery {
}
// Order specifies how the records should be ordered.
func (arq *AuthRolesQuery) Order(o ...OrderFunc) *AuthRolesQuery {
func (arq *AuthRolesQuery) Order(o ...authroles.OrderOption) *AuthRolesQuery {
arq.order = append(arq.order, o...)
return arq
}
@@ -272,7 +272,7 @@ func (arq *AuthRolesQuery) Clone() *AuthRolesQuery {
return &AuthRolesQuery{
config: arq.config,
ctx: arq.ctx.Clone(),
order: append([]OrderFunc{}, arq.order...),
order: append([]authroles.OrderOption{}, arq.order...),
inters: append([]Interceptor{}, arq.inters...),
predicates: append([]predicate.AuthRoles{}, arq.predicates...),
withToken: arq.withToken.Clone(),

View File

@@ -75,7 +75,7 @@ func (aru *AuthRolesUpdate) ClearToken() *AuthRolesUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (aru *AuthRolesUpdate) Save(ctx context.Context) (int, error) {
return withHooks[int, AuthRolesMutation](ctx, aru.sqlSave, aru.mutation, aru.hooks)
return withHooks(ctx, aru.sqlSave, aru.mutation, aru.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -133,10 +133,7 @@ func (aru *AuthRolesUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{authroles.TokenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: authtokens.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -149,10 +146,7 @@ func (aru *AuthRolesUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{authroles.TokenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: authtokens.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -239,7 +233,7 @@ func (aruo *AuthRolesUpdateOne) Select(field string, fields ...string) *AuthRole
// Save executes the query and returns the updated AuthRoles entity.
func (aruo *AuthRolesUpdateOne) Save(ctx context.Context) (*AuthRoles, error) {
return withHooks[*AuthRoles, AuthRolesMutation](ctx, aruo.sqlSave, aruo.mutation, aruo.hooks)
return withHooks(ctx, aruo.sqlSave, aruo.mutation, aruo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -314,10 +308,7 @@ func (aruo *AuthRolesUpdateOne) sqlSave(ctx context.Context) (_node *AuthRoles,
Columns: []string{authroles.TokenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: authtokens.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -330,10 +321,7 @@ func (aruo *AuthRolesUpdateOne) sqlSave(ctx context.Context) (_node *AuthRoles,
Columns: []string{authroles.TokenColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: authtokens.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authtokens.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {

View File

@@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
@@ -31,6 +32,7 @@ type AuthTokens struct {
// The values are being populated by the AuthTokensQuery when eager-loading is set.
Edges AuthTokensEdges `json:"edges"`
user_auth_tokens *uuid.UUID
selectValues sql.SelectValues
}
// AuthTokensEdges holds the relations/edges for other nodes in the graph.
@@ -84,7 +86,7 @@ func (*AuthTokens) scanValues(columns []string) ([]any, error) {
case authtokens.ForeignKeys[0]: // user_auth_tokens
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
return nil, fmt.Errorf("unexpected column %q for type AuthTokens", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -135,11 +137,19 @@ func (at *AuthTokens) assignValues(columns []string, values []any) error {
at.user_auth_tokens = new(uuid.UUID)
*at.user_auth_tokens = *value.S.(*uuid.UUID)
}
default:
at.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the AuthTokens.
// This includes values selected through modifiers, order, etc.
func (at *AuthTokens) Value(name string) (ent.Value, error) {
return at.selectValues.Get(name)
}
// QueryUser queries the "user" edge of the AuthTokens entity.
func (at *AuthTokens) QueryUser() *UserQuery {
return NewAuthTokensClient(at.config).QueryUser(at)

View File

@@ -5,6 +5,8 @@ package authtokens
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -85,3 +87,54 @@ var (
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
// OrderOption defines the ordering options for the AuthTokens queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByExpiresAt orders the results by the expires_at field.
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
}
// ByUserField orders the results by user field.
func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...))
}
}
// ByRolesField orders the results by roles field.
func ByRolesField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newRolesStep(), sql.OrderByField(field, opts...))
}
}
func newUserStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UserInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
}
func newRolesStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(RolesInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2O, false, RolesTable, RolesColumn),
)
}

View File

@@ -250,11 +250,7 @@ func HasUser() predicate.AuthTokens {
// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates).
func HasUserWith(preds ...predicate.User) predicate.AuthTokens {
return predicate.AuthTokens(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UserInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn),
)
step := newUserStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -277,11 +273,7 @@ func HasRoles() predicate.AuthTokens {
// HasRolesWith applies the HasEdge predicate on the "roles" edge with a given conditions (other predicates).
func HasRolesWith(preds ...predicate.AuthRoles) predicate.AuthTokens {
return predicate.AuthTokens(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(RolesInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2O, false, RolesTable, RolesColumn),
)
step := newRolesStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -292,32 +284,15 @@ func HasRolesWith(preds ...predicate.AuthRoles) predicate.AuthTokens {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.AuthTokens) predicate.AuthTokens {
return predicate.AuthTokens(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
return predicate.AuthTokens(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.AuthTokens) predicate.AuthTokens {
return predicate.AuthTokens(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
return predicate.AuthTokens(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.AuthTokens) predicate.AuthTokens {
return predicate.AuthTokens(func(s *sql.Selector) {
p(s.Not())
})
return predicate.AuthTokens(sql.NotPredicates(p))
}

View File

@@ -131,7 +131,7 @@ func (atc *AuthTokensCreate) Mutation() *AuthTokensMutation {
// Save creates the AuthTokens in the database.
func (atc *AuthTokensCreate) Save(ctx context.Context) (*AuthTokens, error) {
atc.defaults()
return withHooks[*AuthTokens, AuthTokensMutation](ctx, atc.sqlSave, atc.mutation, atc.hooks)
return withHooks(ctx, atc.sqlSave, atc.mutation, atc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -249,10 +249,7 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) {
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -269,10 +266,7 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) {
Columns: []string{authtokens.RolesColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: authroles.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@@ -286,11 +280,15 @@ func (atc *AuthTokensCreate) createSpec() (*AuthTokens, *sqlgraph.CreateSpec) {
// AuthTokensCreateBulk is the builder for creating many AuthTokens entities in bulk.
type AuthTokensCreateBulk struct {
config
err error
builders []*AuthTokensCreate
}
// Save creates the AuthTokens entities in the database.
func (atcb *AuthTokensCreateBulk) Save(ctx context.Context) ([]*AuthTokens, error) {
if atcb.err != nil {
return nil, atcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(atcb.builders))
nodes := make([]*AuthTokens, len(atcb.builders))
mutators := make([]Mutator, len(atcb.builders))
@@ -307,8 +305,8 @@ func (atcb *AuthTokensCreateBulk) Save(ctx context.Context) ([]*AuthTokens, erro
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, atcb.builders[i+1].mutation)
} else {

View File

@@ -27,7 +27,7 @@ func (atd *AuthTokensDelete) Where(ps ...predicate.AuthTokens) *AuthTokensDelete
// Exec executes the deletion query and returns how many vertices were deleted.
func (atd *AuthTokensDelete) Exec(ctx context.Context) (int, error) {
return withHooks[int, AuthTokensMutation](ctx, atd.sqlExec, atd.mutation, atd.hooks)
return withHooks(ctx, atd.sqlExec, atd.mutation, atd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.

View File

@@ -22,7 +22,7 @@ import (
type AuthTokensQuery struct {
config
ctx *QueryContext
order []OrderFunc
order []authtokens.OrderOption
inters []Interceptor
predicates []predicate.AuthTokens
withUser *UserQuery
@@ -59,7 +59,7 @@ func (atq *AuthTokensQuery) Unique(unique bool) *AuthTokensQuery {
}
// Order specifies how the records should be ordered.
func (atq *AuthTokensQuery) Order(o ...OrderFunc) *AuthTokensQuery {
func (atq *AuthTokensQuery) Order(o ...authtokens.OrderOption) *AuthTokensQuery {
atq.order = append(atq.order, o...)
return atq
}
@@ -297,7 +297,7 @@ func (atq *AuthTokensQuery) Clone() *AuthTokensQuery {
return &AuthTokensQuery{
config: atq.config,
ctx: atq.ctx.Clone(),
order: append([]OrderFunc{}, atq.order...),
order: append([]authtokens.OrderOption{}, atq.order...),
inters: append([]Interceptor{}, atq.inters...),
predicates: append([]predicate.AuthTokens{}, atq.predicates...),
withUser: atq.withUser.Clone(),
@@ -494,7 +494,7 @@ func (atq *AuthTokensQuery) loadRoles(ctx context.Context, query *AuthRolesQuery
}
query.withFKs = true
query.Where(predicate.AuthRoles(func(s *sql.Selector) {
s.Where(sql.InValues(authtokens.RolesColumn, fks...))
s.Where(sql.InValues(s.C(authtokens.RolesColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -507,7 +507,7 @@ func (atq *AuthTokensQuery) loadRoles(ctx context.Context, query *AuthRolesQuery
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "auth_tokens_roles" returned %v for node %v`, *fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "auth_tokens_roles" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}

View File

@@ -115,7 +115,7 @@ func (atu *AuthTokensUpdate) ClearRoles() *AuthTokensUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (atu *AuthTokensUpdate) Save(ctx context.Context) (int, error) {
atu.defaults()
return withHooks[int, AuthTokensMutation](ctx, atu.sqlSave, atu.mutation, atu.hooks)
return withHooks(ctx, atu.sqlSave, atu.mutation, atu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -174,10 +174,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -190,10 +187,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -209,10 +203,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{authtokens.RolesColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: authroles.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -225,10 +216,7 @@ func (atu *AuthTokensUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{authtokens.RolesColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: authroles.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
},
}
for _, k := range nodes {
@@ -353,7 +341,7 @@ func (atuo *AuthTokensUpdateOne) Select(field string, fields ...string) *AuthTok
// Save executes the query and returns the updated AuthTokens entity.
func (atuo *AuthTokensUpdateOne) Save(ctx context.Context) (*AuthTokens, error) {
atuo.defaults()
return withHooks[*AuthTokens, AuthTokensMutation](ctx, atuo.sqlSave, atuo.mutation, atuo.hooks)
return withHooks(ctx, atuo.sqlSave, atuo.mutation, atuo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -429,10 +417,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -445,10 +430,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
Columns: []string{authtokens.UserColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -464,10 +446,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
Columns: []string{authtokens.RolesColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: authroles.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -480,10 +459,7 @@ func (atuo *AuthTokensUpdateOne) sqlSave(ctx context.Context) (_node *AuthTokens
Columns: []string{authtokens.RolesColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeInt,
Column: authroles.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(authroles.FieldID, field.TypeInt),
},
}
for _, k := range nodes {

View File

@@ -7,10 +7,15 @@ import (
"errors"
"fmt"
"log"
"reflect"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/migrate"
"entgo.io/ent"
"entgo.io/ent/dialect"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/hay-kot/homebox/backend/internal/data/ent/attachment"
"github.com/hay-kot/homebox/backend/internal/data/ent/authroles"
"github.com/hay-kot/homebox/backend/internal/data/ent/authtokens"
@@ -24,10 +29,6 @@ import (
"github.com/hay-kot/homebox/backend/internal/data/ent/maintenanceentry"
"github.com/hay-kot/homebox/backend/internal/data/ent/notifier"
"github.com/hay-kot/homebox/backend/internal/data/ent/user"
"entgo.io/ent/dialect"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
)
// Client is the client that holds all ent builders.
@@ -65,9 +66,7 @@ type Client struct {
// NewClient creates a new client configured with the given options.
func NewClient(opts ...Option) *Client {
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
cfg.options(opts...)
client := &Client{config: cfg}
client := &Client{config: newConfig(opts...)}
client.init()
return client
}
@@ -89,6 +88,62 @@ func (c *Client) init() {
c.User = NewUserClient(c.config)
}
type (
// config is the configuration for the client and its builder.
config struct {
// driver used for executing database requests.
driver dialect.Driver
// debug enable a debug logging.
debug bool
// log used for logging on debug mode.
log func(...any)
// hooks to execute on mutations.
hooks *hooks
// interceptors to execute on queries.
inters *inters
}
// Option function to configure the client.
Option func(*config)
)
// newConfig creates a new config for the client.
func newConfig(opts ...Option) config {
cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}}
cfg.options(opts...)
return cfg
}
// options applies the options on the config object.
func (c *config) options(opts ...Option) {
for _, opt := range opts {
opt(c)
}
if c.debug {
c.driver = dialect.Debug(c.driver, c.log)
}
}
// Debug enables debug logging on the ent.Driver.
func Debug() Option {
return func(c *config) {
c.debug = true
}
}
// Log sets the logging function for debug mode.
func Log(fn func(...any)) Option {
return func(c *config) {
c.log = fn
}
}
// Driver configures the client driver.
func Driver(driver dialect.Driver) Option {
return func(c *config) {
c.driver = driver
}
}
// Open opens a database/sql.DB specified by the driver name and
// the data source name, and returns a new client attached to it.
// Optional parameters can be added for configuring the client.
@@ -105,11 +160,14 @@ func Open(driverName, dataSourceName string, options ...Option) (*Client, error)
}
}
// ErrTxStarted is returned when trying to start a new transaction from a transactional client.
var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction")
// Tx returns a new transactional client. The provided context
// is used until the transaction is committed or rolled back.
func (c *Client) Tx(ctx context.Context) (*Tx, error) {
if _, ok := c.driver.(*txDriver); ok {
return nil, errors.New("ent: cannot start a transaction within a transaction")
return nil, ErrTxStarted
}
tx, err := newTx(ctx, c.driver)
if err != nil {
@@ -193,37 +251,25 @@ func (c *Client) Close() error {
// Use adds the mutation hooks to all the entity clients.
// In order to add hooks to a specific client, call: `client.Node.Use(...)`.
func (c *Client) Use(hooks ...Hook) {
c.Attachment.Use(hooks...)
c.AuthRoles.Use(hooks...)
c.AuthTokens.Use(hooks...)
c.Document.Use(hooks...)
c.Group.Use(hooks...)
c.GroupInvitationToken.Use(hooks...)
c.Item.Use(hooks...)
c.ItemField.Use(hooks...)
c.Label.Use(hooks...)
c.Location.Use(hooks...)
c.MaintenanceEntry.Use(hooks...)
c.Notifier.Use(hooks...)
c.User.Use(hooks...)
for _, n := range []interface{ Use(...Hook) }{
c.Attachment, c.AuthRoles, c.AuthTokens, c.Document, c.Group,
c.GroupInvitationToken, c.Item, c.ItemField, c.Label, c.Location,
c.MaintenanceEntry, c.Notifier, c.User,
} {
n.Use(hooks...)
}
}
// Intercept adds the query interceptors to all the entity clients.
// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`.
func (c *Client) Intercept(interceptors ...Interceptor) {
c.Attachment.Intercept(interceptors...)
c.AuthRoles.Intercept(interceptors...)
c.AuthTokens.Intercept(interceptors...)
c.Document.Intercept(interceptors...)
c.Group.Intercept(interceptors...)
c.GroupInvitationToken.Intercept(interceptors...)
c.Item.Intercept(interceptors...)
c.ItemField.Intercept(interceptors...)
c.Label.Intercept(interceptors...)
c.Location.Intercept(interceptors...)
c.MaintenanceEntry.Intercept(interceptors...)
c.Notifier.Intercept(interceptors...)
c.User.Intercept(interceptors...)
for _, n := range []interface{ Intercept(...Interceptor) }{
c.Attachment, c.AuthRoles, c.AuthTokens, c.Document, c.Group,
c.GroupInvitationToken, c.Item, c.ItemField, c.Label, c.Location,
c.MaintenanceEntry, c.Notifier, c.User,
} {
n.Intercept(interceptors...)
}
}
// Mutate implements the ent.Mutator interface.
@@ -293,6 +339,21 @@ func (c *AttachmentClient) CreateBulk(builders ...*AttachmentCreate) *Attachment
return &AttachmentCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *AttachmentClient) MapCreateBulk(slice any, setFunc func(*AttachmentCreate, int)) *AttachmentCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &AttachmentCreateBulk{err: fmt.Errorf("calling to AttachmentClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*AttachmentCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &AttachmentCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Attachment.
func (c *AttachmentClient) Update() *AttachmentUpdate {
mutation := newAttachmentMutation(c.config, OpUpdate)
@@ -443,6 +504,21 @@ func (c *AuthRolesClient) CreateBulk(builders ...*AuthRolesCreate) *AuthRolesCre
return &AuthRolesCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *AuthRolesClient) MapCreateBulk(slice any, setFunc func(*AuthRolesCreate, int)) *AuthRolesCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &AuthRolesCreateBulk{err: fmt.Errorf("calling to AuthRolesClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*AuthRolesCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &AuthRolesCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for AuthRoles.
func (c *AuthRolesClient) Update() *AuthRolesUpdate {
mutation := newAuthRolesMutation(c.config, OpUpdate)
@@ -577,6 +653,21 @@ func (c *AuthTokensClient) CreateBulk(builders ...*AuthTokensCreate) *AuthTokens
return &AuthTokensCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *AuthTokensClient) MapCreateBulk(slice any, setFunc func(*AuthTokensCreate, int)) *AuthTokensCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &AuthTokensCreateBulk{err: fmt.Errorf("calling to AuthTokensClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*AuthTokensCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &AuthTokensCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for AuthTokens.
func (c *AuthTokensClient) Update() *AuthTokensUpdate {
mutation := newAuthTokensMutation(c.config, OpUpdate)
@@ -727,6 +818,21 @@ func (c *DocumentClient) CreateBulk(builders ...*DocumentCreate) *DocumentCreate
return &DocumentCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *DocumentClient) MapCreateBulk(slice any, setFunc func(*DocumentCreate, int)) *DocumentCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &DocumentCreateBulk{err: fmt.Errorf("calling to DocumentClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*DocumentCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &DocumentCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Document.
func (c *DocumentClient) Update() *DocumentUpdate {
mutation := newDocumentMutation(c.config, OpUpdate)
@@ -877,6 +983,21 @@ func (c *GroupClient) CreateBulk(builders ...*GroupCreate) *GroupCreateBulk {
return &GroupCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *GroupClient) MapCreateBulk(slice any, setFunc func(*GroupCreate, int)) *GroupCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &GroupCreateBulk{err: fmt.Errorf("calling to GroupClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*GroupCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &GroupCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Group.
func (c *GroupClient) Update() *GroupUpdate {
mutation := newGroupMutation(c.config, OpUpdate)
@@ -1107,6 +1228,21 @@ func (c *GroupInvitationTokenClient) CreateBulk(builders ...*GroupInvitationToke
return &GroupInvitationTokenCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *GroupInvitationTokenClient) MapCreateBulk(slice any, setFunc func(*GroupInvitationTokenCreate, int)) *GroupInvitationTokenCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &GroupInvitationTokenCreateBulk{err: fmt.Errorf("calling to GroupInvitationTokenClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*GroupInvitationTokenCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &GroupInvitationTokenCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for GroupInvitationToken.
func (c *GroupInvitationTokenClient) Update() *GroupInvitationTokenUpdate {
mutation := newGroupInvitationTokenMutation(c.config, OpUpdate)
@@ -1241,6 +1377,21 @@ func (c *ItemClient) CreateBulk(builders ...*ItemCreate) *ItemCreateBulk {
return &ItemCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *ItemClient) MapCreateBulk(slice any, setFunc func(*ItemCreate, int)) *ItemCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &ItemCreateBulk{err: fmt.Errorf("calling to ItemClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*ItemCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &ItemCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Item.
func (c *ItemClient) Update() *ItemUpdate {
mutation := newItemMutation(c.config, OpUpdate)
@@ -1487,6 +1638,21 @@ func (c *ItemFieldClient) CreateBulk(builders ...*ItemFieldCreate) *ItemFieldCre
return &ItemFieldCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *ItemFieldClient) MapCreateBulk(slice any, setFunc func(*ItemFieldCreate, int)) *ItemFieldCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &ItemFieldCreateBulk{err: fmt.Errorf("calling to ItemFieldClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*ItemFieldCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &ItemFieldCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for ItemField.
func (c *ItemFieldClient) Update() *ItemFieldUpdate {
mutation := newItemFieldMutation(c.config, OpUpdate)
@@ -1621,6 +1787,21 @@ func (c *LabelClient) CreateBulk(builders ...*LabelCreate) *LabelCreateBulk {
return &LabelCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *LabelClient) MapCreateBulk(slice any, setFunc func(*LabelCreate, int)) *LabelCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &LabelCreateBulk{err: fmt.Errorf("calling to LabelClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*LabelCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &LabelCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Label.
func (c *LabelClient) Update() *LabelUpdate {
mutation := newLabelMutation(c.config, OpUpdate)
@@ -1771,6 +1952,21 @@ func (c *LocationClient) CreateBulk(builders ...*LocationCreate) *LocationCreate
return &LocationCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *LocationClient) MapCreateBulk(slice any, setFunc func(*LocationCreate, int)) *LocationCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &LocationCreateBulk{err: fmt.Errorf("calling to LocationClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*LocationCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &LocationCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Location.
func (c *LocationClient) Update() *LocationUpdate {
mutation := newLocationMutation(c.config, OpUpdate)
@@ -1953,6 +2149,21 @@ func (c *MaintenanceEntryClient) CreateBulk(builders ...*MaintenanceEntryCreate)
return &MaintenanceEntryCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *MaintenanceEntryClient) MapCreateBulk(slice any, setFunc func(*MaintenanceEntryCreate, int)) *MaintenanceEntryCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &MaintenanceEntryCreateBulk{err: fmt.Errorf("calling to MaintenanceEntryClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*MaintenanceEntryCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &MaintenanceEntryCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for MaintenanceEntry.
func (c *MaintenanceEntryClient) Update() *MaintenanceEntryUpdate {
mutation := newMaintenanceEntryMutation(c.config, OpUpdate)
@@ -2087,6 +2298,21 @@ func (c *NotifierClient) CreateBulk(builders ...*NotifierCreate) *NotifierCreate
return &NotifierCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *NotifierClient) MapCreateBulk(slice any, setFunc func(*NotifierCreate, int)) *NotifierCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &NotifierCreateBulk{err: fmt.Errorf("calling to NotifierClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*NotifierCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &NotifierCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for Notifier.
func (c *NotifierClient) Update() *NotifierUpdate {
mutation := newNotifierMutation(c.config, OpUpdate)
@@ -2237,6 +2463,21 @@ func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk {
return &UserCreateBulk{config: c.config, builders: builders}
}
// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates
// a builder and applies setFunc on it.
func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk {
rv := reflect.ValueOf(slice)
if rv.Kind() != reflect.Slice {
return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)}
}
builders := make([]*UserCreate, rv.Len())
for i := 0; i < rv.Len(); i++ {
builders[i] = c.Create()
setFunc(builders[i], i)
}
return &UserCreateBulk{config: c.config, builders: builders}
}
// Update returns an update builder for User.
func (c *UserClient) Update() *UserUpdate {
mutation := newUserMutation(c.config, OpUpdate)
@@ -2369,3 +2610,15 @@ func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error)
return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op())
}
}
// hooks and interceptors per client, for fast access.
type (
hooks struct {
Attachment, AuthRoles, AuthTokens, Document, Group, GroupInvitationToken, Item,
ItemField, Label, Location, MaintenanceEntry, Notifier, User []ent.Hook
}
inters struct {
Attachment, AuthRoles, AuthTokens, Document, Group, GroupInvitationToken, Item,
ItemField, Label, Location, MaintenanceEntry, Notifier, User []ent.Interceptor
}
)

View File

@@ -1,90 +0,0 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"entgo.io/ent"
"entgo.io/ent/dialect"
)
// Option function to configure the client.
type Option func(*config)
// Config is the configuration for the client and its builder.
type config struct {
// driver used for executing database requests.
driver dialect.Driver
// debug enable a debug logging.
debug bool
// log used for logging on debug mode.
log func(...any)
// hooks to execute on mutations.
hooks *hooks
// interceptors to execute on queries.
inters *inters
}
// hooks and interceptors per client, for fast access.
type (
hooks struct {
Attachment []ent.Hook
AuthRoles []ent.Hook
AuthTokens []ent.Hook
Document []ent.Hook
Group []ent.Hook
GroupInvitationToken []ent.Hook
Item []ent.Hook
ItemField []ent.Hook
Label []ent.Hook
Location []ent.Hook
MaintenanceEntry []ent.Hook
Notifier []ent.Hook
User []ent.Hook
}
inters struct {
Attachment []ent.Interceptor
AuthRoles []ent.Interceptor
AuthTokens []ent.Interceptor
Document []ent.Interceptor
Group []ent.Interceptor
GroupInvitationToken []ent.Interceptor
Item []ent.Interceptor
ItemField []ent.Interceptor
Label []ent.Interceptor
Location []ent.Interceptor
MaintenanceEntry []ent.Interceptor
Notifier []ent.Interceptor
User []ent.Interceptor
}
)
// Options applies the options on the config object.
func (c *config) options(opts ...Option) {
for _, opt := range opts {
opt(c)
}
if c.debug {
c.driver = dialect.Debug(c.driver, c.log)
}
}
// Debug enables debug logging on the ent.Driver.
func Debug() Option {
return func(c *config) {
c.debug = true
}
}
// Log sets the logging function for debug mode.
func Log(fn func(...any)) Option {
return func(c *config) {
c.log = fn
}
}
// Driver configures the client driver.
func Driver(driver dialect.Driver) Option {
return func(c *config) {
c.driver = driver
}
}

View File

@@ -1,33 +0,0 @@
// Code generated by ent, DO NOT EDIT.
package ent
import (
"context"
)
type clientCtxKey struct{}
// FromContext returns a Client stored inside a context, or nil if there isn't one.
func FromContext(ctx context.Context) *Client {
c, _ := ctx.Value(clientCtxKey{}).(*Client)
return c
}
// NewContext returns a new context with the given Client attached.
func NewContext(parent context.Context, c *Client) context.Context {
return context.WithValue(parent, clientCtxKey{}, c)
}
type txCtxKey struct{}
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
func TxFromContext(ctx context.Context) *Tx {
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
return tx
}
// NewTxContext returns a new context with the given Tx attached.
func NewTxContext(parent context.Context, tx *Tx) context.Context {
return context.WithValue(parent, txCtxKey{}, tx)
}

View File

@@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/document"
@@ -30,6 +31,7 @@ type Document struct {
// The values are being populated by the DocumentQuery when eager-loading is set.
Edges DocumentEdges `json:"edges"`
group_documents *uuid.UUID
selectValues sql.SelectValues
}
// DocumentEdges holds the relations/edges for other nodes in the graph.
@@ -79,7 +81,7 @@ func (*Document) scanValues(columns []string) ([]any, error) {
case document.ForeignKeys[0]: // group_documents
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
return nil, fmt.Errorf("unexpected column %q for type Document", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -130,11 +132,19 @@ func (d *Document) assignValues(columns []string, values []any) error {
d.group_documents = new(uuid.UUID)
*d.group_documents = *value.S.(*uuid.UUID)
}
default:
d.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Document.
// This includes values selected through modifiers, order, etc.
func (d *Document) Value(name string) (ent.Value, error) {
return d.selectValues.Get(name)
}
// QueryGroup queries the "group" edge of the Document entity.
func (d *Document) QueryGroup() *GroupQuery {
return NewDocumentClient(d.config).QueryGroup(d)

View File

@@ -5,6 +5,8 @@ package document
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -87,3 +89,66 @@ var (
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
// OrderOption defines the ordering options for the Document queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByTitle orders the results by the title field.
func ByTitle(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldTitle, opts...).ToFunc()
}
// ByPath orders the results by the path field.
func ByPath(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldPath, opts...).ToFunc()
}
// ByGroupField orders the results by group field.
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
}
}
// ByAttachmentsCount orders the results by attachments count.
func ByAttachmentsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newAttachmentsStep(), opts...)
}
}
// ByAttachments orders the results by attachments terms.
func ByAttachments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newAttachmentsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
func newGroupStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(GroupInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
}
func newAttachmentsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(AttachmentsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
)
}

View File

@@ -300,11 +300,7 @@ func HasGroup() predicate.Document {
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
func HasGroupWith(preds ...predicate.Group) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(GroupInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
step := newGroupStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -327,11 +323,7 @@ func HasAttachments() predicate.Document {
// HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates).
func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(AttachmentsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn),
)
step := newAttachmentsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -342,32 +334,15 @@ func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Document {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Document) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
return predicate.Document(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Document) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
return predicate.Document(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Document) predicate.Document {
return predicate.Document(func(s *sql.Selector) {
p(s.Not())
})
return predicate.Document(sql.NotPredicates(p))
}

View File

@@ -111,7 +111,7 @@ func (dc *DocumentCreate) Mutation() *DocumentMutation {
// Save creates the Document in the database.
func (dc *DocumentCreate) Save(ctx context.Context) (*Document, error) {
dc.defaults()
return withHooks[*Document, DocumentMutation](ctx, dc.sqlSave, dc.mutation, dc.hooks)
return withHooks(ctx, dc.sqlSave, dc.mutation, dc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -238,10 +238,7 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -258,10 +255,7 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: attachment.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -275,11 +269,15 @@ func (dc *DocumentCreate) createSpec() (*Document, *sqlgraph.CreateSpec) {
// DocumentCreateBulk is the builder for creating many Document entities in bulk.
type DocumentCreateBulk struct {
config
err error
builders []*DocumentCreate
}
// Save creates the Document entities in the database.
func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) {
if dcb.err != nil {
return nil, dcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(dcb.builders))
nodes := make([]*Document, len(dcb.builders))
mutators := make([]Mutator, len(dcb.builders))
@@ -296,8 +294,8 @@ func (dcb *DocumentCreateBulk) Save(ctx context.Context) ([]*Document, error) {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, dcb.builders[i+1].mutation)
} else {

View File

@@ -27,7 +27,7 @@ func (dd *DocumentDelete) Where(ps ...predicate.Document) *DocumentDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (dd *DocumentDelete) Exec(ctx context.Context) (int, error) {
return withHooks[int, DocumentMutation](ctx, dd.sqlExec, dd.mutation, dd.hooks)
return withHooks(ctx, dd.sqlExec, dd.mutation, dd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.

View File

@@ -22,7 +22,7 @@ import (
type DocumentQuery struct {
config
ctx *QueryContext
order []OrderFunc
order []document.OrderOption
inters []Interceptor
predicates []predicate.Document
withGroup *GroupQuery
@@ -59,7 +59,7 @@ func (dq *DocumentQuery) Unique(unique bool) *DocumentQuery {
}
// Order specifies how the records should be ordered.
func (dq *DocumentQuery) Order(o ...OrderFunc) *DocumentQuery {
func (dq *DocumentQuery) Order(o ...document.OrderOption) *DocumentQuery {
dq.order = append(dq.order, o...)
return dq
}
@@ -297,7 +297,7 @@ func (dq *DocumentQuery) Clone() *DocumentQuery {
return &DocumentQuery{
config: dq.config,
ctx: dq.ctx.Clone(),
order: append([]OrderFunc{}, dq.order...),
order: append([]document.OrderOption{}, dq.order...),
inters: append([]Interceptor{}, dq.inters...),
predicates: append([]predicate.Document{}, dq.predicates...),
withGroup: dq.withGroup.Clone(),
@@ -498,7 +498,7 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ
}
query.withFKs = true
query.Where(predicate.Attachment(func(s *sql.Selector) {
s.Where(sql.InValues(document.AttachmentsColumn, fks...))
s.Where(sql.InValues(s.C(document.AttachmentsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -511,7 +511,7 @@ func (dq *DocumentQuery) loadAttachments(ctx context.Context, query *AttachmentQ
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "document_attachments" returned %v for node %v`, *fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "document_attachments" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}

View File

@@ -43,12 +43,28 @@ func (du *DocumentUpdate) SetTitle(s string) *DocumentUpdate {
return du
}
// SetNillableTitle sets the "title" field if the given value is not nil.
func (du *DocumentUpdate) SetNillableTitle(s *string) *DocumentUpdate {
if s != nil {
du.SetTitle(*s)
}
return du
}
// SetPath sets the "path" field.
func (du *DocumentUpdate) SetPath(s string) *DocumentUpdate {
du.mutation.SetPath(s)
return du
}
// SetNillablePath sets the "path" field if the given value is not nil.
func (du *DocumentUpdate) SetNillablePath(s *string) *DocumentUpdate {
if s != nil {
du.SetPath(*s)
}
return du
}
// SetGroupID sets the "group" edge to the Group entity by ID.
func (du *DocumentUpdate) SetGroupID(id uuid.UUID) *DocumentUpdate {
du.mutation.SetGroupID(id)
@@ -110,7 +126,7 @@ func (du *DocumentUpdate) RemoveAttachments(a ...*Attachment) *DocumentUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (du *DocumentUpdate) Save(ctx context.Context) (int, error) {
du.defaults()
return withHooks[int, DocumentMutation](ctx, du.sqlSave, du.mutation, du.hooks)
return withHooks(ctx, du.sqlSave, du.mutation, du.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -190,10 +206,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -206,10 +219,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -225,10 +235,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: attachment.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -241,10 +248,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: attachment.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -260,10 +264,7 @@ func (du *DocumentUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: attachment.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -303,12 +304,28 @@ func (duo *DocumentUpdateOne) SetTitle(s string) *DocumentUpdateOne {
return duo
}
// SetNillableTitle sets the "title" field if the given value is not nil.
func (duo *DocumentUpdateOne) SetNillableTitle(s *string) *DocumentUpdateOne {
if s != nil {
duo.SetTitle(*s)
}
return duo
}
// SetPath sets the "path" field.
func (duo *DocumentUpdateOne) SetPath(s string) *DocumentUpdateOne {
duo.mutation.SetPath(s)
return duo
}
// SetNillablePath sets the "path" field if the given value is not nil.
func (duo *DocumentUpdateOne) SetNillablePath(s *string) *DocumentUpdateOne {
if s != nil {
duo.SetPath(*s)
}
return duo
}
// SetGroupID sets the "group" edge to the Group entity by ID.
func (duo *DocumentUpdateOne) SetGroupID(id uuid.UUID) *DocumentUpdateOne {
duo.mutation.SetGroupID(id)
@@ -383,7 +400,7 @@ func (duo *DocumentUpdateOne) Select(field string, fields ...string) *DocumentUp
// Save executes the query and returns the updated Document entity.
func (duo *DocumentUpdateOne) Save(ctx context.Context) (*Document, error) {
duo.defaults()
return withHooks[*Document, DocumentMutation](ctx, duo.sqlSave, duo.mutation, duo.hooks)
return withHooks(ctx, duo.sqlSave, duo.mutation, duo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -480,10 +497,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -496,10 +510,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -515,10 +526,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: attachment.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -531,10 +539,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: attachment.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -550,10 +555,7 @@ func (duo *DocumentUpdateOne) sqlSave(ctx context.Context) (_node *Document, err
Columns: []string{document.AttachmentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: attachment.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {

View File

@@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"reflect"
"sync"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
@@ -45,46 +46,68 @@ type (
MutateFunc = ent.MutateFunc
)
type clientCtxKey struct{}
// FromContext returns a Client stored inside a context, or nil if there isn't one.
func FromContext(ctx context.Context) *Client {
c, _ := ctx.Value(clientCtxKey{}).(*Client)
return c
}
// NewContext returns a new context with the given Client attached.
func NewContext(parent context.Context, c *Client) context.Context {
return context.WithValue(parent, clientCtxKey{}, c)
}
type txCtxKey struct{}
// TxFromContext returns a Tx stored inside a context, or nil if there isn't one.
func TxFromContext(ctx context.Context) *Tx {
tx, _ := ctx.Value(txCtxKey{}).(*Tx)
return tx
}
// NewTxContext returns a new context with the given Tx attached.
func NewTxContext(parent context.Context, tx *Tx) context.Context {
return context.WithValue(parent, txCtxKey{}, tx)
}
// OrderFunc applies an ordering on the sql selector.
// Deprecated: Use Asc/Desc functions or the package builders instead.
type OrderFunc func(*sql.Selector)
// columnChecker returns a function indicates if the column exists in the given column.
func columnChecker(table string) func(string) error {
checks := map[string]func(string) bool{
attachment.Table: attachment.ValidColumn,
authroles.Table: authroles.ValidColumn,
authtokens.Table: authtokens.ValidColumn,
document.Table: document.ValidColumn,
group.Table: group.ValidColumn,
groupinvitationtoken.Table: groupinvitationtoken.ValidColumn,
item.Table: item.ValidColumn,
itemfield.Table: itemfield.ValidColumn,
label.Table: label.ValidColumn,
location.Table: location.ValidColumn,
maintenanceentry.Table: maintenanceentry.ValidColumn,
notifier.Table: notifier.ValidColumn,
user.Table: user.ValidColumn,
}
check, ok := checks[table]
if !ok {
return func(string) error {
return fmt.Errorf("unknown table %q", table)
}
}
return func(column string) error {
if !check(column) {
return fmt.Errorf("unknown column %q for table %q", column, table)
}
return nil
}
var (
initCheck sync.Once
columnCheck sql.ColumnCheck
)
// columnChecker checks if the column exists in the given table.
func checkColumn(table, column string) error {
initCheck.Do(func() {
columnCheck = sql.NewColumnCheck(map[string]func(string) bool{
attachment.Table: attachment.ValidColumn,
authroles.Table: authroles.ValidColumn,
authtokens.Table: authtokens.ValidColumn,
document.Table: document.ValidColumn,
group.Table: group.ValidColumn,
groupinvitationtoken.Table: groupinvitationtoken.ValidColumn,
item.Table: item.ValidColumn,
itemfield.Table: itemfield.ValidColumn,
label.Table: label.ValidColumn,
location.Table: location.ValidColumn,
maintenanceentry.Table: maintenanceentry.ValidColumn,
notifier.Table: notifier.ValidColumn,
user.Table: user.ValidColumn,
})
})
return columnCheck(table, column)
}
// Asc applies the given fields in ASC order.
func Asc(fields ...string) OrderFunc {
func Asc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
check := columnChecker(s.TableName())
for _, f := range fields {
if err := check(f); err != nil {
if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Asc(s.C(f)))
@@ -93,11 +116,10 @@ func Asc(fields ...string) OrderFunc {
}
// Desc applies the given fields in DESC order.
func Desc(fields ...string) OrderFunc {
func Desc(fields ...string) func(*sql.Selector) {
return func(s *sql.Selector) {
check := columnChecker(s.TableName())
for _, f := range fields {
if err := check(f); err != nil {
if err := checkColumn(s.TableName(), f); err != nil {
s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)})
}
s.OrderBy(sql.Desc(s.C(f)))
@@ -129,8 +151,7 @@ func Count() AggregateFunc {
// Max applies the "max" aggregation function on the given field of each group.
func Max(field string) AggregateFunc {
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@@ -141,8 +162,7 @@ func Max(field string) AggregateFunc {
// Mean applies the "mean" aggregation function on the given field of each group.
func Mean(field string) AggregateFunc {
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@@ -153,8 +173,7 @@ func Mean(field string) AggregateFunc {
// Min applies the "min" aggregation function on the given field of each group.
func Min(field string) AggregateFunc {
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@@ -165,8 +184,7 @@ func Min(field string) AggregateFunc {
// Sum applies the "sum" aggregation function on the given field of each group.
func Sum(field string) AggregateFunc {
return func(s *sql.Selector) string {
check := columnChecker(s.TableName())
if err := check(field); err != nil {
if err := checkColumn(s.TableName(), field); err != nil {
s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)})
return ""
}
@@ -503,7 +521,7 @@ func withHooks[V Value, M any, PM interface {
return exec(ctx)
}
var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) {
mutationT, ok := m.(PM)
mutationT, ok := any(m).(PM)
if !ok {
return nil, fmt.Errorf("unexpected mutation type %T", m)
}

View File

@@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
@@ -27,7 +28,8 @@ type Group struct {
Currency group.Currency `json:"currency,omitempty"`
// Edges holds the relations/edges for other nodes in the graph.
// The values are being populated by the GroupQuery when eager-loading is set.
Edges GroupEdges `json:"edges"`
Edges GroupEdges `json:"edges"`
selectValues sql.SelectValues
}
// GroupEdges holds the relations/edges for other nodes in the graph.
@@ -126,7 +128,7 @@ func (*Group) scanValues(columns []string) ([]any, error) {
case group.FieldID:
values[i] = new(uuid.UUID)
default:
return nil, fmt.Errorf("unexpected column %q for type Group", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -170,11 +172,19 @@ func (gr *Group) assignValues(columns []string, values []any) error {
} else if value.Valid {
gr.Currency = group.Currency(value.String)
}
default:
gr.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the Group.
// This includes values selected through modifiers, order, etc.
func (gr *Group) Value(name string) (ent.Value, error) {
return gr.selectValues.Get(name)
}
// QueryUsers queries the "users" edge of the Group entity.
func (gr *Group) QueryUsers() *UserQuery {
return NewGroupClient(gr.config).QueryUsers(gr)

View File

@@ -6,6 +6,8 @@ import (
"fmt"
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -129,22 +131,37 @@ const DefaultCurrency = CurrencyUsd
// Currency values.
const (
CurrencyUsd Currency = "usd"
CurrencyAed Currency = "aed"
CurrencyAud Currency = "aud"
CurrencyBgn Currency = "bgn"
CurrencyBrl Currency = "brl"
CurrencyCad Currency = "cad"
CurrencyChf Currency = "chf"
CurrencyCzk Currency = "czk"
CurrencyDkk Currency = "dkk"
CurrencyEur Currency = "eur"
CurrencyGbp Currency = "gbp"
CurrencyJpy Currency = "jpy"
CurrencyZar Currency = "zar"
CurrencyAud Currency = "aud"
CurrencyNok Currency = "nok"
CurrencySek Currency = "sek"
CurrencyDkk Currency = "dkk"
CurrencyHkd Currency = "hkd"
CurrencyIdr Currency = "idr"
CurrencyInr Currency = "inr"
CurrencyRmb Currency = "rmb"
CurrencyBgn Currency = "bgn"
CurrencyChf Currency = "chf"
CurrencyJpy Currency = "jpy"
CurrencyKrw Currency = "krw"
CurrencyMxn Currency = "mxn"
CurrencyNok Currency = "nok"
CurrencyNzd Currency = "nzd"
CurrencyPln Currency = "pln"
CurrencyTry Currency = "try"
CurrencyRmb Currency = "rmb"
CurrencyRon Currency = "ron"
CurrencyRub Currency = "rub"
CurrencySar Currency = "sar"
CurrencySek Currency = "sek"
CurrencySgd Currency = "sgd"
CurrencyThb Currency = "thb"
CurrencyTry Currency = "try"
CurrencyUsd Currency = "usd"
CurrencyXag Currency = "xag"
CurrencyXau Currency = "xau"
CurrencyZar Currency = "zar"
)
func (c Currency) String() string {
@@ -154,9 +171,184 @@ func (c Currency) String() string {
// CurrencyValidator is a validator for the "currency" field enum values. It is called by the builders before save.
func CurrencyValidator(c Currency) error {
switch c {
case CurrencyUsd, CurrencyEur, CurrencyGbp, CurrencyJpy, CurrencyZar, CurrencyAud, CurrencyNok, CurrencySek, CurrencyDkk, CurrencyInr, CurrencyRmb, CurrencyBgn, CurrencyChf, CurrencyPln, CurrencyTry, CurrencyRon:
case CurrencyAed, CurrencyAud, CurrencyBgn, CurrencyBrl, CurrencyCad, CurrencyChf, CurrencyCzk, CurrencyDkk, CurrencyEur, CurrencyGbp, CurrencyHkd, CurrencyIdr, CurrencyInr, CurrencyJpy, CurrencyKrw, CurrencyMxn, CurrencyNok, CurrencyNzd, CurrencyPln, CurrencyRmb, CurrencyRon, CurrencyRub, CurrencySar, CurrencySek, CurrencySgd, CurrencyThb, CurrencyTry, CurrencyUsd, CurrencyXag, CurrencyXau, CurrencyZar:
return nil
default:
return fmt.Errorf("group: invalid enum value for currency field: %q", c)
}
}
// OrderOption defines the ordering options for the Group queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByName orders the results by the name field.
func ByName(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldName, opts...).ToFunc()
}
// ByCurrency orders the results by the currency field.
func ByCurrency(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCurrency, opts...).ToFunc()
}
// ByUsersCount orders the results by users count.
func ByUsersCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newUsersStep(), opts...)
}
}
// ByUsers orders the results by users terms.
func ByUsers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newUsersStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByLocationsCount orders the results by locations count.
func ByLocationsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newLocationsStep(), opts...)
}
}
// ByLocations orders the results by locations terms.
func ByLocations(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newLocationsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByItemsCount orders the results by items count.
func ByItemsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newItemsStep(), opts...)
}
}
// ByItems orders the results by items terms.
func ByItems(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newItemsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByLabelsCount orders the results by labels count.
func ByLabelsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newLabelsStep(), opts...)
}
}
// ByLabels orders the results by labels terms.
func ByLabels(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newLabelsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByDocumentsCount orders the results by documents count.
func ByDocumentsCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newDocumentsStep(), opts...)
}
}
// ByDocuments orders the results by documents terms.
func ByDocuments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newDocumentsStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByInvitationTokensCount orders the results by invitation_tokens count.
func ByInvitationTokensCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newInvitationTokensStep(), opts...)
}
}
// ByInvitationTokens orders the results by invitation_tokens terms.
func ByInvitationTokens(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newInvitationTokensStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
// ByNotifiersCount orders the results by notifiers count.
func ByNotifiersCount(opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborsCount(s, newNotifiersStep(), opts...)
}
}
// ByNotifiers orders the results by notifiers terms.
func ByNotifiers(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newNotifiersStep(), append([]sql.OrderTerm{term}, terms...)...)
}
}
func newUsersStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UsersInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn),
)
}
func newLocationsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(LocationsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, LocationsTable, LocationsColumn),
)
}
func newItemsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(ItemsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
)
}
func newLabelsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(LabelsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, LabelsTable, LabelsColumn),
)
}
func newDocumentsStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn),
)
}
func newInvitationTokensStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(InvitationTokensInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, InvitationTokensTable, InvitationTokensColumn),
)
}
func newNotifiersStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(NotifiersInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
)
}

View File

@@ -250,11 +250,7 @@ func HasUsers() predicate.Group {
// HasUsersWith applies the HasEdge predicate on the "users" edge with a given conditions (other predicates).
func HasUsersWith(preds ...predicate.User) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(UsersInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, UsersTable, UsersColumn),
)
step := newUsersStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -277,11 +273,7 @@ func HasLocations() predicate.Group {
// HasLocationsWith applies the HasEdge predicate on the "locations" edge with a given conditions (other predicates).
func HasLocationsWith(preds ...predicate.Location) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(LocationsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, LocationsTable, LocationsColumn),
)
step := newLocationsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -304,11 +296,7 @@ func HasItems() predicate.Group {
// HasItemsWith applies the HasEdge predicate on the "items" edge with a given conditions (other predicates).
func HasItemsWith(preds ...predicate.Item) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(ItemsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, ItemsTable, ItemsColumn),
)
step := newItemsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -331,11 +319,7 @@ func HasLabels() predicate.Group {
// HasLabelsWith applies the HasEdge predicate on the "labels" edge with a given conditions (other predicates).
func HasLabelsWith(preds ...predicate.Label) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(LabelsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, LabelsTable, LabelsColumn),
)
step := newLabelsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -358,11 +342,7 @@ func HasDocuments() predicate.Group {
// HasDocumentsWith applies the HasEdge predicate on the "documents" edge with a given conditions (other predicates).
func HasDocumentsWith(preds ...predicate.Document) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(DocumentsInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, DocumentsTable, DocumentsColumn),
)
step := newDocumentsStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -385,11 +365,7 @@ func HasInvitationTokens() predicate.Group {
// HasInvitationTokensWith applies the HasEdge predicate on the "invitation_tokens" edge with a given conditions (other predicates).
func HasInvitationTokensWith(preds ...predicate.GroupInvitationToken) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(InvitationTokensInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, InvitationTokensTable, InvitationTokensColumn),
)
step := newInvitationTokensStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -412,11 +388,7 @@ func HasNotifiers() predicate.Group {
// HasNotifiersWith applies the HasEdge predicate on the "notifiers" edge with a given conditions (other predicates).
func HasNotifiersWith(preds ...predicate.Notifier) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(NotifiersInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.O2M, false, NotifiersTable, NotifiersColumn),
)
step := newNotifiersStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -427,32 +399,15 @@ func HasNotifiersWith(preds ...predicate.Notifier) predicate.Group {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.Group) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
return predicate.Group(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.Group) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
return predicate.Group(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.Group) predicate.Group {
return predicate.Group(func(s *sql.Selector) {
p(s.Not())
})
return predicate.Group(sql.NotPredicates(p))
}

View File

@@ -203,7 +203,7 @@ func (gc *GroupCreate) Mutation() *GroupMutation {
// Save creates the Group in the database.
func (gc *GroupCreate) Save(ctx context.Context) (*Group, error) {
gc.defaults()
return withHooks[*Group, GroupMutation](ctx, gc.sqlSave, gc.mutation, gc.hooks)
return withHooks(ctx, gc.sqlSave, gc.mutation, gc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -331,10 +331,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -350,10 +347,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: location.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -369,10 +363,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -388,10 +379,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: label.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -407,10 +395,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -426,10 +411,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: groupinvitationtoken.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -445,10 +427,7 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
Columns: []string{group.NotifiersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: notifier.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -462,11 +441,15 @@ func (gc *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) {
// GroupCreateBulk is the builder for creating many Group entities in bulk.
type GroupCreateBulk struct {
config
err error
builders []*GroupCreate
}
// Save creates the Group entities in the database.
func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) {
if gcb.err != nil {
return nil, gcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(gcb.builders))
nodes := make([]*Group, len(gcb.builders))
mutators := make([]Mutator, len(gcb.builders))
@@ -483,8 +466,8 @@ func (gcb *GroupCreateBulk) Save(ctx context.Context) ([]*Group, error) {
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, gcb.builders[i+1].mutation)
} else {

View File

@@ -27,7 +27,7 @@ func (gd *GroupDelete) Where(ps ...predicate.Group) *GroupDelete {
// Exec executes the deletion query and returns how many vertices were deleted.
func (gd *GroupDelete) Exec(ctx context.Context) (int, error) {
return withHooks[int, GroupMutation](ctx, gd.sqlExec, gd.mutation, gd.hooks)
return withHooks(ctx, gd.sqlExec, gd.mutation, gd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.

View File

@@ -27,7 +27,7 @@ import (
type GroupQuery struct {
config
ctx *QueryContext
order []OrderFunc
order []group.OrderOption
inters []Interceptor
predicates []predicate.Group
withUsers *UserQuery
@@ -68,7 +68,7 @@ func (gq *GroupQuery) Unique(unique bool) *GroupQuery {
}
// Order specifies how the records should be ordered.
func (gq *GroupQuery) Order(o ...OrderFunc) *GroupQuery {
func (gq *GroupQuery) Order(o ...group.OrderOption) *GroupQuery {
gq.order = append(gq.order, o...)
return gq
}
@@ -416,7 +416,7 @@ func (gq *GroupQuery) Clone() *GroupQuery {
return &GroupQuery{
config: gq.config,
ctx: gq.ctx.Clone(),
order: append([]OrderFunc{}, gq.order...),
order: append([]group.OrderOption{}, gq.order...),
inters: append([]Interceptor{}, gq.inters...),
predicates: append([]predicate.Group{}, gq.predicates...),
withUsers: gq.withUsers.Clone(),
@@ -681,7 +681,7 @@ func (gq *GroupQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []*
}
query.withFKs = true
query.Where(predicate.User(func(s *sql.Selector) {
s.Where(sql.InValues(group.UsersColumn, fks...))
s.Where(sql.InValues(s.C(group.UsersColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -694,7 +694,7 @@ func (gq *GroupQuery) loadUsers(ctx context.Context, query *UserQuery, nodes []*
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "group_users" returned %v for node %v`, *fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "group_users" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -712,7 +712,7 @@ func (gq *GroupQuery) loadLocations(ctx context.Context, query *LocationQuery, n
}
query.withFKs = true
query.Where(predicate.Location(func(s *sql.Selector) {
s.Where(sql.InValues(group.LocationsColumn, fks...))
s.Where(sql.InValues(s.C(group.LocationsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -725,7 +725,7 @@ func (gq *GroupQuery) loadLocations(ctx context.Context, query *LocationQuery, n
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "group_locations" returned %v for node %v`, *fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "group_locations" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -743,7 +743,7 @@ func (gq *GroupQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
}
query.withFKs = true
query.Where(predicate.Item(func(s *sql.Selector) {
s.Where(sql.InValues(group.ItemsColumn, fks...))
s.Where(sql.InValues(s.C(group.ItemsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -756,7 +756,7 @@ func (gq *GroupQuery) loadItems(ctx context.Context, query *ItemQuery, nodes []*
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "group_items" returned %v for node %v`, *fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "group_items" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -774,7 +774,7 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [
}
query.withFKs = true
query.Where(predicate.Label(func(s *sql.Selector) {
s.Where(sql.InValues(group.LabelsColumn, fks...))
s.Where(sql.InValues(s.C(group.LabelsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -787,7 +787,7 @@ func (gq *GroupQuery) loadLabels(ctx context.Context, query *LabelQuery, nodes [
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "group_labels" returned %v for node %v`, *fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "group_labels" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -805,7 +805,7 @@ func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, n
}
query.withFKs = true
query.Where(predicate.Document(func(s *sql.Selector) {
s.Where(sql.InValues(group.DocumentsColumn, fks...))
s.Where(sql.InValues(s.C(group.DocumentsColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -818,7 +818,7 @@ func (gq *GroupQuery) loadDocuments(ctx context.Context, query *DocumentQuery, n
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "group_documents" returned %v for node %v`, *fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "group_documents" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -836,7 +836,7 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
}
query.withFKs = true
query.Where(predicate.GroupInvitationToken(func(s *sql.Selector) {
s.Where(sql.InValues(group.InvitationTokensColumn, fks...))
s.Where(sql.InValues(s.C(group.InvitationTokensColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -849,7 +849,7 @@ func (gq *GroupQuery) loadInvitationTokens(ctx context.Context, query *GroupInvi
}
node, ok := nodeids[*fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "group_invitation_tokens" returned %v for node %v`, *fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "group_invitation_tokens" returned %v for node %v`, *fk, n.ID)
}
assign(node, n)
}
@@ -865,8 +865,11 @@ func (gq *GroupQuery) loadNotifiers(ctx context.Context, query *NotifierQuery, n
init(nodes[i])
}
}
if len(query.ctx.Fields) > 0 {
query.ctx.AppendFieldOnce(notifier.FieldGroupID)
}
query.Where(predicate.Notifier(func(s *sql.Selector) {
s.Where(sql.InValues(group.NotifiersColumn, fks...))
s.Where(sql.InValues(s.C(group.NotifiersColumn), fks...))
}))
neighbors, err := query.All(ctx)
if err != nil {
@@ -876,7 +879,7 @@ func (gq *GroupQuery) loadNotifiers(ctx context.Context, query *NotifierQuery, n
fk := n.GroupID
node, ok := nodeids[fk]
if !ok {
return fmt.Errorf(`unexpected foreign-key "group_id" returned %v for node %v`, fk, n.ID)
return fmt.Errorf(`unexpected referenced foreign-key "group_id" returned %v for node %v`, fk, n.ID)
}
assign(node, n)
}

View File

@@ -48,6 +48,14 @@ func (gu *GroupUpdate) SetName(s string) *GroupUpdate {
return gu
}
// SetNillableName sets the "name" field if the given value is not nil.
func (gu *GroupUpdate) SetNillableName(s *string) *GroupUpdate {
if s != nil {
gu.SetName(*s)
}
return gu
}
// SetCurrency sets the "currency" field.
func (gu *GroupUpdate) SetCurrency(gr group.Currency) *GroupUpdate {
gu.mutation.SetCurrency(gr)
@@ -322,7 +330,7 @@ func (gu *GroupUpdate) RemoveNotifiers(n ...*Notifier) *GroupUpdate {
// Save executes the query and returns the number of nodes affected by the update operation.
func (gu *GroupUpdate) Save(ctx context.Context) (int, error) {
gu.defaults()
return withHooks[int, GroupMutation](ctx, gu.sqlSave, gu.mutation, gu.hooks)
return withHooks(ctx, gu.sqlSave, gu.mutation, gu.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -399,10 +407,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -415,10 +420,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -434,10 +436,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -453,10 +452,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: location.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -469,10 +465,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: location.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -488,10 +481,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: location.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -507,10 +497,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -523,10 +510,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -542,10 +526,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -561,10 +542,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: label.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -577,10 +555,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: label.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -596,10 +571,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: label.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -615,10 +587,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -631,10 +600,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -650,10 +616,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -669,10 +632,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: groupinvitationtoken.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -685,10 +645,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: groupinvitationtoken.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -704,10 +661,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: groupinvitationtoken.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -723,10 +677,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.NotifiersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: notifier.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -739,10 +690,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.NotifiersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: notifier.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -758,10 +706,7 @@ func (gu *GroupUpdate) sqlSave(ctx context.Context) (n int, err error) {
Columns: []string{group.NotifiersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: notifier.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -801,6 +746,14 @@ func (guo *GroupUpdateOne) SetName(s string) *GroupUpdateOne {
return guo
}
// SetNillableName sets the "name" field if the given value is not nil.
func (guo *GroupUpdateOne) SetNillableName(s *string) *GroupUpdateOne {
if s != nil {
guo.SetName(*s)
}
return guo
}
// SetCurrency sets the "currency" field.
func (guo *GroupUpdateOne) SetCurrency(gr group.Currency) *GroupUpdateOne {
guo.mutation.SetCurrency(gr)
@@ -1088,7 +1041,7 @@ func (guo *GroupUpdateOne) Select(field string, fields ...string) *GroupUpdateOn
// Save executes the query and returns the updated Group entity.
func (guo *GroupUpdateOne) Save(ctx context.Context) (*Group, error) {
guo.defaults()
return withHooks[*Group, GroupMutation](ctx, guo.sqlSave, guo.mutation, guo.hooks)
return withHooks(ctx, guo.sqlSave, guo.mutation, guo.hooks)
}
// SaveX is like Save, but panics if an error occurs.
@@ -1182,10 +1135,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1198,10 +1148,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1217,10 +1164,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.UsersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: user.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1236,10 +1180,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: location.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1252,10 +1193,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: location.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1271,10 +1209,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LocationsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: location.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(location.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1290,10 +1225,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1306,10 +1238,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1325,10 +1254,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.ItemsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: item.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(item.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1344,10 +1270,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: label.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1360,10 +1283,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: label.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1379,10 +1299,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.LabelsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: label.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(label.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1398,10 +1315,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1414,10 +1328,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1433,10 +1344,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.DocumentsColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: document.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(document.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1452,10 +1360,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: groupinvitationtoken.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1468,10 +1373,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: groupinvitationtoken.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1487,10 +1389,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.InvitationTokensColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: groupinvitationtoken.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(groupinvitationtoken.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1506,10 +1405,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.NotifiersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: notifier.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
_spec.Edges.Clear = append(_spec.Edges.Clear, edge)
@@ -1522,10 +1418,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.NotifiersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: notifier.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -1541,10 +1434,7 @@ func (guo *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error
Columns: []string{group.NotifiersColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: notifier.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(notifier.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {

View File

@@ -7,6 +7,7 @@ import (
"strings"
"time"
"entgo.io/ent"
"entgo.io/ent/dialect/sql"
"github.com/google/uuid"
"github.com/hay-kot/homebox/backend/internal/data/ent/group"
@@ -32,6 +33,7 @@ type GroupInvitationToken struct {
// The values are being populated by the GroupInvitationTokenQuery when eager-loading is set.
Edges GroupInvitationTokenEdges `json:"edges"`
group_invitation_tokens *uuid.UUID
selectValues sql.SelectValues
}
// GroupInvitationTokenEdges holds the relations/edges for other nodes in the graph.
@@ -72,7 +74,7 @@ func (*GroupInvitationToken) scanValues(columns []string) ([]any, error) {
case groupinvitationtoken.ForeignKeys[0]: // group_invitation_tokens
values[i] = &sql.NullScanner{S: new(uuid.UUID)}
default:
return nil, fmt.Errorf("unexpected column %q for type GroupInvitationToken", columns[i])
values[i] = new(sql.UnknownType)
}
}
return values, nil
@@ -129,11 +131,19 @@ func (git *GroupInvitationToken) assignValues(columns []string, values []any) er
git.group_invitation_tokens = new(uuid.UUID)
*git.group_invitation_tokens = *value.S.(*uuid.UUID)
}
default:
git.selectValues.Set(columns[i], values[i])
}
}
return nil
}
// Value returns the ent.Value that was dynamically selected and assigned to the GroupInvitationToken.
// This includes values selected through modifiers, order, etc.
func (git *GroupInvitationToken) Value(name string) (ent.Value, error) {
return git.selectValues.Get(name)
}
// QueryGroup queries the "group" edge of the GroupInvitationToken entity.
func (git *GroupInvitationToken) QueryGroup() *GroupQuery {
return NewGroupInvitationTokenClient(git.config).QueryGroup(git)

View File

@@ -5,6 +5,8 @@ package groupinvitationtoken
import (
"time"
"entgo.io/ent/dialect/sql"
"entgo.io/ent/dialect/sql/sqlgraph"
"github.com/google/uuid"
)
@@ -81,3 +83,45 @@ var (
// DefaultID holds the default value on creation for the "id" field.
DefaultID func() uuid.UUID
)
// OrderOption defines the ordering options for the GroupInvitationToken queries.
type OrderOption func(*sql.Selector)
// ByID orders the results by the id field.
func ByID(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldID, opts...).ToFunc()
}
// ByCreatedAt orders the results by the created_at field.
func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldCreatedAt, opts...).ToFunc()
}
// ByUpdatedAt orders the results by the updated_at field.
func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc()
}
// ByExpiresAt orders the results by the expires_at field.
func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldExpiresAt, opts...).ToFunc()
}
// ByUses orders the results by the uses field.
func ByUses(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldUses, opts...).ToFunc()
}
// ByGroupField orders the results by group field.
func ByGroupField(field string, opts ...sql.OrderTermOption) OrderOption {
return func(s *sql.Selector) {
sqlgraph.OrderByNeighborTerms(s, newGroupStep(), sql.OrderByField(field, opts...))
}
}
func newGroupStep() *sqlgraph.Step {
return sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(GroupInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
}

View File

@@ -295,11 +295,7 @@ func HasGroup() predicate.GroupInvitationToken {
// HasGroupWith applies the HasEdge predicate on the "group" edge with a given conditions (other predicates).
func HasGroupWith(preds ...predicate.Group) predicate.GroupInvitationToken {
return predicate.GroupInvitationToken(func(s *sql.Selector) {
step := sqlgraph.NewStep(
sqlgraph.From(Table, FieldID),
sqlgraph.To(GroupInverseTable, FieldID),
sqlgraph.Edge(sqlgraph.M2O, true, GroupTable, GroupColumn),
)
step := newGroupStep()
sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) {
for _, p := range preds {
p(s)
@@ -310,32 +306,15 @@ func HasGroupWith(preds ...predicate.Group) predicate.GroupInvitationToken {
// And groups predicates with the AND operator between them.
func And(predicates ...predicate.GroupInvitationToken) predicate.GroupInvitationToken {
return predicate.GroupInvitationToken(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for _, p := range predicates {
p(s1)
}
s.Where(s1.P())
})
return predicate.GroupInvitationToken(sql.AndPredicates(predicates...))
}
// Or groups predicates with the OR operator between them.
func Or(predicates ...predicate.GroupInvitationToken) predicate.GroupInvitationToken {
return predicate.GroupInvitationToken(func(s *sql.Selector) {
s1 := s.Clone().SetP(nil)
for i, p := range predicates {
if i > 0 {
s1.Or()
}
p(s1)
}
s.Where(s1.P())
})
return predicate.GroupInvitationToken(sql.OrPredicates(predicates...))
}
// Not applies the not operator on the given predicate.
func Not(p predicate.GroupInvitationToken) predicate.GroupInvitationToken {
return predicate.GroupInvitationToken(func(s *sql.Selector) {
p(s.Not())
})
return predicate.GroupInvitationToken(sql.NotPredicates(p))
}

View File

@@ -125,7 +125,7 @@ func (gitc *GroupInvitationTokenCreate) Mutation() *GroupInvitationTokenMutation
// Save creates the GroupInvitationToken in the database.
func (gitc *GroupInvitationTokenCreate) Save(ctx context.Context) (*GroupInvitationToken, error) {
gitc.defaults()
return withHooks[*GroupInvitationToken, GroupInvitationTokenMutation](ctx, gitc.sqlSave, gitc.mutation, gitc.hooks)
return withHooks(ctx, gitc.sqlSave, gitc.mutation, gitc.hooks)
}
// SaveX calls Save and panics if Save returns an error.
@@ -254,10 +254,7 @@ func (gitc *GroupInvitationTokenCreate) createSpec() (*GroupInvitationToken, *sq
Columns: []string{groupinvitationtoken.GroupColumn},
Bidi: false,
Target: &sqlgraph.EdgeTarget{
IDSpec: &sqlgraph.FieldSpec{
Type: field.TypeUUID,
Column: group.FieldID,
},
IDSpec: sqlgraph.NewFieldSpec(group.FieldID, field.TypeUUID),
},
}
for _, k := range nodes {
@@ -272,11 +269,15 @@ func (gitc *GroupInvitationTokenCreate) createSpec() (*GroupInvitationToken, *sq
// GroupInvitationTokenCreateBulk is the builder for creating many GroupInvitationToken entities in bulk.
type GroupInvitationTokenCreateBulk struct {
config
err error
builders []*GroupInvitationTokenCreate
}
// Save creates the GroupInvitationToken entities in the database.
func (gitcb *GroupInvitationTokenCreateBulk) Save(ctx context.Context) ([]*GroupInvitationToken, error) {
if gitcb.err != nil {
return nil, gitcb.err
}
specs := make([]*sqlgraph.CreateSpec, len(gitcb.builders))
nodes := make([]*GroupInvitationToken, len(gitcb.builders))
mutators := make([]Mutator, len(gitcb.builders))
@@ -293,8 +294,8 @@ func (gitcb *GroupInvitationTokenCreateBulk) Save(ctx context.Context) ([]*Group
return nil, err
}
builder.mutation = mutation
nodes[i], specs[i] = builder.createSpec()
var err error
nodes[i], specs[i] = builder.createSpec()
if i < len(mutators)-1 {
_, err = mutators[i+1].Mutate(root, gitcb.builders[i+1].mutation)
} else {

View File

@@ -27,7 +27,7 @@ func (gitd *GroupInvitationTokenDelete) Where(ps ...predicate.GroupInvitationTok
// Exec executes the deletion query and returns how many vertices were deleted.
func (gitd *GroupInvitationTokenDelete) Exec(ctx context.Context) (int, error) {
return withHooks[int, GroupInvitationTokenMutation](ctx, gitd.sqlExec, gitd.mutation, gitd.hooks)
return withHooks(ctx, gitd.sqlExec, gitd.mutation, gitd.hooks)
}
// ExecX is like Exec, but panics if an error occurs.

View File

@@ -20,7 +20,7 @@ import (
type GroupInvitationTokenQuery struct {
config
ctx *QueryContext
order []OrderFunc
order []groupinvitationtoken.OrderOption
inters []Interceptor
predicates []predicate.GroupInvitationToken
withGroup *GroupQuery
@@ -56,7 +56,7 @@ func (gitq *GroupInvitationTokenQuery) Unique(unique bool) *GroupInvitationToken
}
// Order specifies how the records should be ordered.
func (gitq *GroupInvitationTokenQuery) Order(o ...OrderFunc) *GroupInvitationTokenQuery {
func (gitq *GroupInvitationTokenQuery) Order(o ...groupinvitationtoken.OrderOption) *GroupInvitationTokenQuery {
gitq.order = append(gitq.order, o...)
return gitq
}
@@ -272,7 +272,7 @@ func (gitq *GroupInvitationTokenQuery) Clone() *GroupInvitationTokenQuery {
return &GroupInvitationTokenQuery{
config: gitq.config,
ctx: gitq.ctx.Clone(),
order: append([]OrderFunc{}, gitq.order...),
order: append([]groupinvitationtoken.OrderOption{}, gitq.order...),
inters: append([]Interceptor{}, gitq.inters...),
predicates: append([]predicate.GroupInvitationToken{}, gitq.predicates...),
withGroup: gitq.withGroup.Clone(),

Some files were not shown because too many files have changed in this diff Show More