mirror of
https://github.com/crazy-max/diun.git
synced 2025-12-30 17:47:20 +01:00
Merge pull request #1280 from crazy-max/dependabot/go_modules/k8s.io/client-go-0.32.0
chore(deps): bump k8s.io/client-go from 0.29.3 to 0.32.0
This commit is contained in:
28
go.mod
28
go.mod
@@ -47,9 +47,9 @@ require (
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
|
||||
google.golang.org/protobuf v1.35.2
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.29.3
|
||||
k8s.io/apimachinery v0.29.3
|
||||
k8s.io/client-go v0.29.3
|
||||
k8s.io/api v0.32.0
|
||||
k8s.io/apimachinery v0.32.0
|
||||
k8s.io/client-go v0.32.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -74,6 +74,7 @@ require (
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/felixge/fgprof v0.9.3 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
@@ -85,8 +86,9 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/css v1.0.1 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
@@ -127,27 +129,29 @@ require (
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 // indirect
|
||||
github.com/vanng822/css v0.0.0-20190504095207-a21e860bcd04 // indirect
|
||||
github.com/vanng822/go-premailer v0.0.0-20191214114701-be27abe028fe // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect
|
||||
golang.org/x/net v0.29.0 // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/term v0.27.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/klog/v2 v2.110.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
68
go.sum
68
go.sum
@@ -98,12 +98,13 @@ github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g=
|
||||
github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
|
||||
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
|
||||
github.com/go-gomail/gomail v0.0.0-20160411212932-81ebce5c23df h1:Bao6dhmbTA1KFVxmJ6nBoMuOJit2yjEgLJpIMYpop0E=
|
||||
github.com/go-gomail/gomail v0.0.0-20160411212932-81ebce5c23df/go.mod h1:GJr+FCSXshIwgHBtLglIg9M2l2kQSi6QjVAngtzI08Y=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
@@ -122,8 +123,8 @@ github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJn
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.23.0 h1:/PwmTwZhS0dPkav3cdK9kV1FsAmrL8sThn8IHr/sO+o=
|
||||
github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
@@ -138,8 +139,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
|
||||
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
|
||||
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg=
|
||||
github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -255,10 +256,10 @@ github.com/nlopes/slack v0.6.0 h1:jt0jxVQGhssx1Ib7naAOZEZcGdtIhTzkP0nopK0AsRA=
|
||||
github.com/nlopes/slack v0.6.0/go.mod h1:JzQ9m3PMAqcpeCam7UaHSuBuupz7CmpjehYMayT6YOk=
|
||||
github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
|
||||
github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o=
|
||||
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
|
||||
github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM=
|
||||
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
|
||||
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
|
||||
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
@@ -331,6 +332,8 @@ github.com/vanng822/go-premailer v0.0.0-20191214114701-be27abe028fe h1:9YnI5plmy
|
||||
github.com/vanng822/go-premailer v0.0.0-20191214114701-be27abe028fe/go.mod h1:JTFJA/t820uFDoyPpErFQ3rb3amdZoPtxcKervG0OE4=
|
||||
github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
|
||||
github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
@@ -381,8 +384,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo=
|
||||
golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0=
|
||||
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
|
||||
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -428,8 +431,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
@@ -459,12 +462,13 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE=
|
||||
gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
@@ -473,21 +477,21 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
|
||||
k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw=
|
||||
k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80=
|
||||
k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
|
||||
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
|
||||
k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg=
|
||||
k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0=
|
||||
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
|
||||
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780=
|
||||
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
|
||||
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
|
||||
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
|
||||
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
|
||||
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
|
||||
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
|
||||
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
|
||||
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=
|
||||
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
|
||||
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
12
vendor/github.com/fxamacker/cbor/v2/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
104
vendor/github.com/fxamacker/cbor/v2/.golangci.yml
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
# Do not delete linter settings. Linters like gocritic can be enabled on the command line.
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
rules:
|
||||
prevent_unmaintained_packages:
|
||||
list-mode: strict
|
||||
files:
|
||||
- $all
|
||||
- "!$test"
|
||||
allow:
|
||||
- $gostd
|
||||
- github.com/x448/float16
|
||||
deny:
|
||||
- pkg: io/ioutil
|
||||
desc: "replaced by io and os packages since Go 1.16: https://tip.golang.org/doc/go1.16#ioutil"
|
||||
dupl:
|
||||
threshold: 100
|
||||
funlen:
|
||||
lines: 100
|
||||
statements: 50
|
||||
goconst:
|
||||
ignore-tests: true
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
gocritic:
|
||||
enabled-tags:
|
||||
- diagnostic
|
||||
- experimental
|
||||
- opinionated
|
||||
- performance
|
||||
- style
|
||||
disabled-checks:
|
||||
- commentedOutCode
|
||||
- dupImport # https://github.com/go-critic/go-critic/issues/845
|
||||
- ifElseChain
|
||||
- octalLiteral
|
||||
- paramTypeCombine
|
||||
- whyNoLint
|
||||
gofmt:
|
||||
simplify: false
|
||||
goimports:
|
||||
local-prefixes: github.com/fxamacker/cbor
|
||||
golint:
|
||||
min-confidence: 0
|
||||
govet:
|
||||
check-shadowing: true
|
||||
lll:
|
||||
line-length: 140
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
staticcheck:
|
||||
checks: ["all"]
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- bidichk
|
||||
- depguard
|
||||
- errcheck
|
||||
- exportloopref
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- gofmt
|
||||
- goimports
|
||||
- goprintffuncname
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- nilerr
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
|
||||
issues:
|
||||
# max-issues-per-linter default is 50. Set to 0 to disable limit.
|
||||
max-issues-per-linter: 0
|
||||
# max-same-issues default is 3. Set to 0 to disable limit.
|
||||
max-same-issues: 0
|
||||
|
||||
exclude-rules:
|
||||
- path: decode.go
|
||||
text: "string ` overflows ` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string ` \\(range is \\[` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string `, ` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string ` overflows Go's int64` has (\\d+) occurrences, make it a constant"
|
||||
- path: decode.go
|
||||
text: "string `\\]\\)` has (\\d+) occurrences, make it a constant"
|
||||
- path: valid.go
|
||||
text: "string ` for type ` has (\\d+) occurrences, make it a constant"
|
||||
- path: valid.go
|
||||
text: "string `cbor: ` has (\\d+) occurrences, make it a constant"
|
||||
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
133
vendor/github.com/fxamacker/cbor/v2/CODE_OF_CONDUCT.md
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||
identity and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the overall
|
||||
community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or advances of
|
||||
any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email address,
|
||||
without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
faye.github@gmail.com.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series of
|
||||
actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or permanent
|
||||
ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within the
|
||||
community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.1, available at
|
||||
[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
|
||||
|
||||
Community Impact Guidelines were inspired by
|
||||
[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at
|
||||
[https://www.contributor-covenant.org/translations][translations].
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
|
||||
[Mozilla CoC]: https://github.com/mozilla/diversity
|
||||
[FAQ]: https://www.contributor-covenant.org/faq
|
||||
[translations]: https://www.contributor-covenant.org/translations
|
||||
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
41
vendor/github.com/fxamacker/cbor/v2/CONTRIBUTING.md
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
# How to contribute
|
||||
|
||||
You can contribute by using the library, opening issues, or opening pull requests.
|
||||
|
||||
## Bug reports and security vulnerabilities
|
||||
|
||||
Most issues are tracked publicly on [GitHub](https://github.com/fxamacker/cbor/issues).
|
||||
|
||||
To report security vulnerabilities, please email faye.github@gmail.com and allow time for the problem to be resolved before disclosing it to the public. For more info, see [Security Policy](https://github.com/fxamacker/cbor#security-policy).
|
||||
|
||||
Please do not send data that might contain personally identifiable information, even if you think you have permission. That type of support requires payment and a signed contract where I'm indemnified, held harmless, and defended by you for any data you send to me.
|
||||
|
||||
## Pull requests
|
||||
|
||||
Please [create an issue](https://github.com/fxamacker/cbor/issues/new/choose) before you begin work on a PR. The improvement may have already been considered, etc.
|
||||
|
||||
Pull requests have signing requirements and must not be anonymous. Exceptions are usually made for docs and CI scripts.
|
||||
|
||||
See the [Pull Request Template](https://github.com/fxamacker/cbor/blob/master/.github/pull_request_template.md) for details.
|
||||
|
||||
Pull requests have a greater chance of being approved if:
|
||||
- it does not reduce speed, increase memory use, reduce security, etc. for people not using the new option or feature.
|
||||
- it has > 97% code coverage.
|
||||
|
||||
## Describe your issue
|
||||
|
||||
Clearly describe the issue:
|
||||
* If it's a bug, please provide: **version of this library** and **Go** (`go version`), **unmodified error message**, and describe **how to reproduce it**. Also state **what you expected to happen** instead of the error.
|
||||
* If you propose a change or addition, try to give an example how the improved code could look like or how to use it.
|
||||
* If you found a compilation error, please confirm you're using a supported version of Go. If you are, then provide the output of `go version` first, followed by the complete error message.
|
||||
|
||||
## Please don't
|
||||
|
||||
Please don't send data containing personally identifiable information, even if you think you have permission. That type of support requires payment and a contract where I'm indemnified, held harmless, and defended for any data you send to me.
|
||||
|
||||
Please don't send CBOR data larger than 1024 bytes by email. If you want to send crash-producing CBOR data > 1024 bytes by email, please get my permission before sending it to me.
|
||||
|
||||
## Credits
|
||||
|
||||
- This guide used nlohmann/json contribution guidelines for inspiration as suggested in issue #22.
|
||||
- Special thanks to @lukseven for pointing out the contribution guidelines didn't mention signing requirements.
|
||||
21
vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
Normal file
21
vendor/github.com/fxamacker/cbor/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019-present Faye Amacker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
691
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
Normal file
691
vendor/github.com/fxamacker/cbor/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,691 @@
|
||||
# CBOR Codec in Go
|
||||
|
||||
<!-- [](#cbor-library-in-go) -->
|
||||
|
||||
[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html).
|
||||
|
||||
CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name-comparison-of-other-binary-) to JSON, MessagePack, Protocol Buffers, etc. CBOR is an Internet Standard defined by [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94) and is designed to be relevant for decades.
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor).
|
||||
|
||||
See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer.
|
||||
|
||||
## fxamacker/cbor
|
||||
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci)
|
||||
[](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22)
|
||||
[](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml)
|
||||
[](#fuzzing-and-code-coverage)
|
||||
[](https://goreportcard.com/report/github.com/fxamacker/cbor)
|
||||
|
||||
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||
|
||||
Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc.
|
||||
|
||||
Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc.
|
||||
|
||||
<details><summary>Highlights</summary><p/>
|
||||
|
||||
__🚀 Speed__
|
||||
|
||||
Encoding and decoding is fast without using Go's `unsafe` package. Slower settings are opt-in. Default limits allow very fast and memory efficient rejection of malformed CBOR data.
|
||||
|
||||
__🔒 Security__
|
||||
|
||||
Decoder has configurable limits that defend against malicious inputs. Duplicate map key detection is supported. By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
|
||||
Codec passed multiple confidential security assessments in 2022. No vulnerabilities found in subset of codec in a [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) prepared by NCC Group for Microsoft Corporation.
|
||||
|
||||
__🗜️ Data Size__
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit.
|
||||
|
||||
__:jigsaw: Usability__
|
||||
|
||||
API is mostly same as `encoding/json` plus interfaces that simplify concurrency for CBOR options. Encoding and decoding modes can be created at startup and reused by any goroutines.
|
||||
|
||||
Presets include Core Deterministic Encoding, Preferred Serialization, CTAP2 Canonical CBOR, etc.
|
||||
|
||||
__📆 Extensibility__
|
||||
|
||||
Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949.html#section-7.1) (e.g. CBOR tags) and extensive settings. API has interfaces that allow users to create custom encoding and decoding without modifying this library.
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
### Secure Decoding with Configurable Settings
|
||||
|
||||
`fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data.
|
||||
|
||||
By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security).
|
||||
|
||||
<details><summary>Example decoding with encoding/gob 💥 fatal error (out of memory)</summary><p/>
|
||||
|
||||
```Go
|
||||
// Example of encoding/gob having "fatal error: runtime: out of memory"
|
||||
// while decoding 181 bytes.
|
||||
package main
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Example data is from https://github.com/golang/go/issues/24446
|
||||
// (shortened to 181 bytes).
|
||||
const data = "4dffb503010102303001ff30000109010130010800010130010800010130" +
|
||||
"01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" +
|
||||
"860001013001ff860001013001ffb80000001eff850401010e3030303030" +
|
||||
"30303030303030303001ff3000010c0104000016ffb70201010830303030" +
|
||||
"3030303001ff3000010c000030ffb6040405fcff00303030303030303030" +
|
||||
"303030303030303030303030303030303030303030303030303030303030" +
|
||||
"30"
|
||||
|
||||
type X struct {
|
||||
J *X
|
||||
K map[string]int
|
||||
}
|
||||
|
||||
func main() {
|
||||
raw, _ := hex.DecodeString(data)
|
||||
decoder := gob.NewDecoder(bytes.NewReader(raw))
|
||||
|
||||
var x X
|
||||
decoder.Decode(&x) // fatal error: runtime: out of memory
|
||||
fmt.Println("Decoding finished.")
|
||||
}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to
|
||||
decode 10 bytes of malicious CBOR data to `[]byte` (with default settings):
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op |
|
||||
|
||||
<details><summary>Benchmark details</summary><p/>
|
||||
|
||||
Latest comparison used:
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
#### Prior comparisons
|
||||
|
||||
| Codec | Speed (ns/op) | Memory | Allocs |
|
||||
| :---- | ------------: | -----: | -----: |
|
||||
| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op |
|
||||
| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op |
|
||||
| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op |
|
||||
| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate |
|
||||
|
||||
- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}`
|
||||
- go1.19.6, linux/amd64, i5-13600K (DDR4)
|
||||
- go test -bench=. -benchmem -count=20
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
### Smaller Encodings with Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
```Go
|
||||
// Example encoding nested struct (with omitempty tag)
|
||||
// - encoding/json: 18 byte JSON
|
||||
// - fxamacker/cbor: 1 byte CBOR
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
type GrandChild struct {
|
||||
Quux int `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Child struct {
|
||||
Baz int `json:",omitempty"`
|
||||
Qux GrandChild `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Foo Child `json:",omitempty"`
|
||||
Bar int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func cb() {
|
||||
results, _ := cbor.Marshal(Parent{})
|
||||
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
|
||||
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
fmt.Println("DN: " + text)
|
||||
}
|
||||
|
||||
func js() {
|
||||
results, _ := json.Marshal(Parent{})
|
||||
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
|
||||
text := string(results) // JSON
|
||||
fmt.Println("JSON: " + text)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cb()
|
||||
fmt.Println("-------------")
|
||||
js()
|
||||
}
|
||||
```
|
||||
|
||||
Output (DN is Diagnostic Notation):
|
||||
```
|
||||
hex(CBOR): a0
|
||||
DN: {}
|
||||
-------------
|
||||
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
JSON: {"Foo":{"Qux":{}}}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
Example using different struct tags together:
|
||||
|
||||

|
||||
|
||||
API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options.
|
||||
|
||||
## Quick Start
|
||||
|
||||
__Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`.
|
||||
|
||||
### Key Points
|
||||
|
||||
This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742).
|
||||
|
||||
- __CBOR data item__ is a single piece of CBOR data and its structure may contain 0 or more nested data items.
|
||||
- __CBOR sequence__ is a concatenation of 0 or more encoded CBOR data items.
|
||||
|
||||
Configurable limits and options can be used to balance trade-offs.
|
||||
|
||||
- Encoding and decoding modes are created from options (settings).
|
||||
- Modes can be created at startup and reused.
|
||||
- Modes are safe for concurrent use.
|
||||
|
||||
### Default Mode
|
||||
|
||||
Package level functions only use this library's default settings.
|
||||
They provide the "default mode" of encoding and decoding.
|
||||
|
||||
```go
|
||||
// API matches encoding/json for Marshal, Unmarshal, Encode, Decode, etc.
|
||||
b, err = cbor.Marshal(v) // encode v to []byte b
|
||||
err = cbor.Unmarshal(b, &v) // decode []byte b to v
|
||||
decoder = cbor.NewDecoder(r) // create decoder with io.Reader r
|
||||
err = decoder.Decode(&v) // decode a CBOR data item to v
|
||||
|
||||
// v2.7.0 added MarshalToBuffer() and UserBufferEncMode interface.
|
||||
err = cbor.MarshalToBuffer(v, b) // encode v to b instead of using built-in buf pool.
|
||||
|
||||
// v2.5.0 added new functions that return remaining bytes.
|
||||
|
||||
// UnmarshalFirst decodes first CBOR data item and returns remaining bytes.
|
||||
rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v
|
||||
|
||||
// DiagnoseFirst translates first CBOR data item to text and returns remaining bytes.
|
||||
text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text
|
||||
|
||||
// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes,
|
||||
// but new funcs UnmarshalFirst and DiagnoseFirst do not.
|
||||
```
|
||||
|
||||
__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc.
|
||||
|
||||
- Different CBOR libraries may use different default settings.
|
||||
- CBOR-based formats or protocols usually require specific settings.
|
||||
|
||||
For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset.
|
||||
|
||||
### Presets
|
||||
|
||||
Presets can be used as-is or as a starting point for custom settings.
|
||||
|
||||
```go
|
||||
// EncOptions is a struct of encoder settings.
|
||||
func CoreDetEncOptions() EncOptions // RFC 8949 Core Deterministic Encoding
|
||||
func PreferredUnsortedEncOptions() EncOptions // RFC 8949 Preferred Serialization
|
||||
func CanonicalEncOptions() EncOptions // RFC 7049 Canonical CBOR
|
||||
func CTAP2EncOptions() EncOptions // FIDO2 CTAP2 Canonical CBOR
|
||||
```
|
||||
|
||||
Presets are used to create custom modes.
|
||||
|
||||
### Custom Modes
|
||||
|
||||
Modes are created from settings. Once created, modes have immutable settings.
|
||||
|
||||
💡 Create the mode at startup and reuse it. It is safe for concurrent use.
|
||||
|
||||
```Go
|
||||
// Create encoding mode.
|
||||
opts := cbor.CoreDetEncOptions() // use preset options as a starting point
|
||||
opts.Time = cbor.TimeUnix // change any settings if needed
|
||||
em, err := opts.EncMode() // create an immutable encoding mode
|
||||
|
||||
// Reuse the encoding mode. It is safe for concurrent use.
|
||||
|
||||
// API matches encoding/json.
|
||||
b, err := em.Marshal(v) // encode v to []byte b
|
||||
encoder := em.NewEncoder(w) // create encoder with io.Writer w
|
||||
err := encoder.Encode(v) // encode v to io.Writer w
|
||||
```
|
||||
|
||||
Default mode and custom modes automatically apply struct tags.
|
||||
|
||||
### User Specified Buffer for Encoding (v2.7.0)
|
||||
|
||||
`UserBufferEncMode` interface extends `EncMode` interface to add `MarshalToBuffer()`. It accepts a user-specified buffer instead of using built-in buffer pool.
|
||||
|
||||
```Go
|
||||
em, err := myEncOptions.UserBufferEncMode() // create UserBufferEncMode mode
|
||||
|
||||
var buf bytes.Buffer
|
||||
err = em.MarshalToBuffer(v, &buf) // encode v to provided buf
|
||||
```
|
||||
|
||||
### Struct Tags
|
||||
|
||||
Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs.
|
||||
|
||||
<details><summary>Example encoding 3-level nested Go struct to 1 byte CBOR</summary><p/>
|
||||
|
||||
https://go.dev/play/p/YxwvfPdFQG2
|
||||
|
||||
```Go
|
||||
// Example encoding nested struct (with omitempty tag)
|
||||
// - encoding/json: 18 byte JSON
|
||||
// - fxamacker/cbor: 1 byte CBOR
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/fxamacker/cbor/v2"
|
||||
)
|
||||
|
||||
type GrandChild struct {
|
||||
Quux int `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Child struct {
|
||||
Baz int `json:",omitempty"`
|
||||
Qux GrandChild `json:",omitempty"`
|
||||
}
|
||||
|
||||
type Parent struct {
|
||||
Foo Child `json:",omitempty"`
|
||||
Bar int `json:",omitempty"`
|
||||
}
|
||||
|
||||
func cb() {
|
||||
results, _ := cbor.Marshal(Parent{})
|
||||
fmt.Println("hex(CBOR): " + hex.EncodeToString(results))
|
||||
|
||||
text, _ := cbor.Diagnose(results) // Diagnostic Notation
|
||||
fmt.Println("DN: " + text)
|
||||
}
|
||||
|
||||
func js() {
|
||||
results, _ := json.Marshal(Parent{})
|
||||
fmt.Println("hex(JSON): " + hex.EncodeToString(results))
|
||||
|
||||
text := string(results) // JSON
|
||||
fmt.Println("JSON: " + text)
|
||||
}
|
||||
|
||||
func main() {
|
||||
cb()
|
||||
fmt.Println("-------------")
|
||||
js()
|
||||
}
|
||||
```
|
||||
|
||||
Output (DN is Diagnostic Notation):
|
||||
```
|
||||
hex(CBOR): a0
|
||||
DN: {}
|
||||
-------------
|
||||
hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d
|
||||
JSON: {"Foo":{"Qux":{}}}
|
||||
```
|
||||
|
||||
<hr/>
|
||||
|
||||
</details>
|
||||
|
||||
<details><summary>Example using several struct tags</summary><p/>
|
||||
|
||||

|
||||
|
||||
</details>
|
||||
|
||||
Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys.
|
||||
|
||||
### CBOR Tags
|
||||
|
||||
CBOR tags are specified in a `TagSet`.
|
||||
|
||||
Custom modes can be created with a `TagSet` to handle CBOR tags.
|
||||
|
||||
```go
|
||||
em, err := opts.EncMode() // no CBOR tags
|
||||
em, err := opts.EncModeWithTags(ts) // immutable CBOR tags
|
||||
em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags
|
||||
```
|
||||
|
||||
`TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`.
|
||||
|
||||
<details><summary>Example using TagSet and TagOptions</summary><p/>
|
||||
|
||||
```go
|
||||
// Use signedCWT struct defined in "Decoding CWT" example.
|
||||
|
||||
// Create TagSet (safe for concurrency).
|
||||
tags := cbor.NewTagSet()
|
||||
// Register tag COSE_Sign1 18 with signedCWT type.
|
||||
tags.Add(
|
||||
cbor.TagOptions{EncTag: cbor.EncTagRequired, DecTag: cbor.DecTagRequired},
|
||||
reflect.TypeOf(signedCWT{}),
|
||||
18)
|
||||
|
||||
// Create DecMode with immutable tags.
|
||||
dm, _ := cbor.DecOptions{}.DecModeWithTags(tags)
|
||||
|
||||
// Unmarshal to signedCWT with tag support.
|
||||
var v signedCWT
|
||||
if err := dm.Unmarshal(data, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create EncMode with immutable tags.
|
||||
em, _ := cbor.EncOptions{}.EncModeWithTags(tags)
|
||||
|
||||
// Marshal signedCWT with tag number.
|
||||
if data, err := cbor.Marshal(v); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
### Functions and Interfaces
|
||||
|
||||
<details><summary>Functions and interfaces at a glance</summary><p/>
|
||||
|
||||
Common functions with same API as `encoding/json`:
|
||||
- `Marshal`, `Unmarshal`
|
||||
- `NewEncoder`, `(*Encoder).Encode`
|
||||
- `NewDecoder`, `(*Decoder).Decode`
|
||||
|
||||
NOTE: `Unmarshal` will return `ExtraneousDataError` if there are remaining bytes
|
||||
because RFC 8949 treats CBOR data item with remaining bytes as malformed.
|
||||
- 💡 Use `UnmarshalFirst` to decode first CBOR data item and return any remaining bytes.
|
||||
|
||||
Other useful functions:
|
||||
- `Diagnose`, `DiagnoseFirst` produce human-readable [Extended Diagnostic Notation](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G) from CBOR data.
|
||||
- `UnmarshalFirst` decodes first CBOR data item and return any remaining bytes.
|
||||
- `Wellformed` returns true if the the CBOR data item is well-formed.
|
||||
|
||||
Interfaces identical or comparable to Go `encoding` packages include:
|
||||
`Marshaler`, `Unmarshaler`, `BinaryMarshaler`, and `BinaryUnmarshaler`.
|
||||
|
||||
The `RawMessage` type can be used to delay CBOR decoding or precompute CBOR encoding.
|
||||
|
||||
</details>
|
||||
|
||||
### Security Tips
|
||||
|
||||
🔒 Use Go's `io.LimitReader` to limit size when decoding very large or indefinite size data.
|
||||
|
||||
Default limits may need to be increased for systems handling very large data (e.g. blockchains).
|
||||
|
||||
`DecOptions` can be used to modify default limits for `MaxArrayElements`, `MaxMapPairs`, and `MaxNestedLevels`.
|
||||
|
||||
## Status
|
||||
|
||||
v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality.
|
||||
|
||||
For more details, see [release notes](https://github.com/fxamacker/cbor/releases).
|
||||
|
||||
### Prior Release
|
||||
|
||||
[v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings.
|
||||
|
||||
v2.5.0 was released on Sunday, August 13, 2023 with new features and important bug fixes. It is fuzz tested and production quality after extended beta [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
|
||||
__IMPORTANT__: 👉 Before upgrading from v2.4 or older release, please read the notable changes highlighted in the release notes. v2.5.0 is a large release with bug fixes to error handling for extraneous data in `Unmarshal`, etc. that should be reviewed before upgrading.
|
||||
|
||||
See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) for list of new features, improvements, and bug fixes.
|
||||
|
||||
See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc.
|
||||
|
||||
<!--
|
||||
<details><summary>👉 Benchmark Comparison: v2.4.0 vs v2.5.0</summary><p/>
|
||||
|
||||
TODO: Update to v2.4.0 vs 2.5.0 (not beta2).
|
||||
|
||||
Comparison of v2.4.0 vs v2.5.0-beta2 provided by @448 (edited to fit width).
|
||||
|
||||
PR [#382](https://github.com/fxamacker/cbor/pull/382) returns buffer to pool in `Encode()`. It adds a bit of overhead to `Encode()` but `NewEncoder().Encode()` is a lot faster and uses less memory as shown here:
|
||||
|
||||
```
|
||||
$ benchstat bench-v2.4.0.log bench-f9e6291.log
|
||||
goos: linux
|
||||
goarch: amd64
|
||||
pkg: github.com/fxamacker/cbor/v2
|
||||
cpu: 12th Gen Intel(R) Core(TM) i7-12700H
|
||||
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||
│ sec/op │ sec/op vs base │
|
||||
NewEncoderEncode/Go_bool_to_CBOR_bool-20 236.70n ± 2% 58.04n ± 1% -75.48% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 238.00n ± 2% 63.93n ± 1% -73.14% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 238.65n ± 2% 64.88n ± 1% -72.81% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_float64_to_CBOR_float-20 242.00n ± 2% 63.00n ± 1% -73.97% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 245.60n ± 1% 68.55n ± 1% -72.09% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_string_to_CBOR_text-20 243.20n ± 3% 68.39n ± 1% -71.88% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]int_to_CBOR_array-20 563.0n ± 2% 378.3n ± 0% -32.81% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 2.043µ ± 2% 1.906µ ± 2% -6.75% (p=0.000 n=10)
|
||||
geomean 349.7n 122.7n -64.92%
|
||||
|
||||
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||
│ B/op │ B/op vs base │
|
||||
NewEncoderEncode/Go_bool_to_CBOR_bool-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_float64_to_CBOR_float-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_string_to_CBOR_text-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]int_to_CBOR_array-20 128.0 ± 0% 0.0 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 544.0 ± 0% 416.0 ± 0% -23.53% (p=0.000 n=10)
|
||||
geomean 153.4 ? ¹ ²
|
||||
¹ summaries must be >0 to compute geomean
|
||||
² ratios must be >0 to compute geomean
|
||||
|
||||
│ bench-v2.4.0.log │ bench-f9e6291.log │
|
||||
│ allocs/op │ allocs/op vs base │
|
||||
NewEncoderEncode/Go_bool_to_CBOR_bool-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_uint64_to_CBOR_positive_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_int64_to_CBOR_negative_int-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_float64_to_CBOR_float-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]uint8_to_CBOR_bytes-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_string_to_CBOR_text-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_[]int_to_CBOR_array-20 2.000 ± 0% 0.000 ± 0% -100.00% (p=0.000 n=10)
|
||||
NewEncoderEncode/Go_map[string]string_to_CBOR_map-20 28.00 ± 0% 26.00 ± 0% -7.14% (p=0.000 n=10)
|
||||
geomean 2.782 ? ¹ ²
|
||||
¹ summaries must be >0 to compute geomean
|
||||
² ratios must be >0 to compute geomean
|
||||
```
|
||||
|
||||
</details>
|
||||
-->
|
||||
|
||||
## Who uses fxamacker/cbor
|
||||
|
||||
`fxamacker/cbor` is used in projects by Arm Ltd., Berlin Institute of Health at Charité, Chainlink, Cisco, Confidential Computing Consortium, ConsenSys, Dapper Labs, EdgeX Foundry, F5, FIDO Alliance, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Matrix.org, Microsoft, Mozilla, National Cybersecurity Agency of France (govt), Netherlands (govt), Oasis Protocol, Smallstep, Tailscale, Taurus SA, Teleport, TIBCO, and others.
|
||||
|
||||
`fxamacker/cbor` passed multiple confidential security assessments. A [nonconfidential security assessment](https://github.com/veraison/go-cose/blob/v1.0.0-rc.1/reports/NCC_Microsoft-go-cose-Report_2022-05-26_v1.0.pdf) (prepared by NCC Group for Microsoft Corporation) includes a subset of fxamacker/cbor v2.4.0 in its scope.
|
||||
|
||||
## Standards
|
||||
|
||||
`fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)).
|
||||
|
||||
Notable CBOR features include:
|
||||
|
||||
| CBOR Feature | Description |
|
||||
| :--- | :--- |
|
||||
| CBOR tags | API supports built-in and user-defined tags. |
|
||||
| Preferred serialization | Integers encode to fewest bytes. Optional float64 → float32 → float16. |
|
||||
| Map key sorting | Unsorted, length-first (Canonical CBOR), and bytewise-lexicographic (CTAP2). |
|
||||
| Duplicate map keys | Always forbid for encoding and option to allow/forbid for decoding. |
|
||||
| Indefinite length data | Option to allow/forbid for encoding and decoding. |
|
||||
| Well-formedness | Always checked and enforced. |
|
||||
| Basic validity checks | Optionally check UTF-8 validity and duplicate map keys. |
|
||||
| Security considerations | Prevent integer overflow and resource exhaustion (RFC 8949 Section 10). |
|
||||
|
||||
Known limitations are noted in the [Limitations section](#limitations).
|
||||
|
||||
Go nil values for slices, maps, pointers, etc. are encoded as CBOR null. Empty slices, maps, etc. are encoded as empty CBOR arrays and maps.
|
||||
|
||||
Decoder checks for all required well-formedness errors, including all "subkinds" of syntax errors and too little data.
|
||||
|
||||
After well-formedness is verified, basic validity errors are handled as follows:
|
||||
|
||||
* Invalid UTF-8 string: Decoder has option to check and return invalid UTF-8 string error. This check is enabled by default.
|
||||
* Duplicate keys in a map: Decoder has options to ignore or enforce rejection of duplicate map keys.
|
||||
|
||||
When decoding well-formed CBOR arrays and maps, decoder saves the first error it encounters and continues with the next item. Options to handle this differently may be added in the future.
|
||||
|
||||
By default, decoder treats time values of floating-point NaN and Infinity as if they are CBOR Null or CBOR Undefined.
|
||||
|
||||
__Click to expand topic:__
|
||||
|
||||
<details>
|
||||
<summary>Duplicate Map Keys</summary><p>
|
||||
|
||||
This library provides options for fast detection and rejection of duplicate map keys based on applying a Go-specific data model to CBOR's extended generic data model in order to determine duplicate vs distinct map keys. Detection relies on whether the CBOR map key would be a duplicate "key" when decoded and applied to the user-provided Go map or struct.
|
||||
|
||||
`DupMapKeyQuiet` turns off detection of duplicate map keys. It tries to use a "keep fastest" method by choosing either "keep first" or "keep last" depending on the Go data type.
|
||||
|
||||
`DupMapKeyEnforcedAPF` enforces detection and rejection of duplidate map keys. Decoding stops immediately and returns `DupMapKeyError` when the first duplicate key is detected. The error includes the duplicate map key and the index number.
|
||||
|
||||
APF suffix means "Allow Partial Fill" so the destination map or struct can contain some decoded values at the time of error. It is the caller's responsibility to respond to the `DupMapKeyError` by discarding the partially filled result if that's required by their protocol.
|
||||
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>Tag Validity</summary><p>
|
||||
|
||||
This library checks tag validity for built-in tags (currently tag numbers 0, 1, 2, 3, and 55799):
|
||||
|
||||
* Inadmissible type for tag content
|
||||
* Inadmissible value for tag content
|
||||
|
||||
Unknown tag data items (not tag number 0, 1, 2, 3, or 55799) are handled in two ways:
|
||||
|
||||
* When decoding into an empty interface, unknown tag data item will be decoded into `cbor.Tag` data type, which contains tag number and tag content. The tag content will be decoded into the default Go data type for the CBOR data type.
|
||||
* When decoding into other Go types, unknown tag data item is decoded into the specified Go type. If Go type is registered with a tag number, the tag number can optionally be verified.
|
||||
|
||||
Decoder also has an option to forbid tag data items (treat any tag data item as error) which is specified by protocols such as CTAP2 Canonical CBOR.
|
||||
|
||||
For more information, see [decoding options](#decoding-options-1) and [tag options](#tag-options).
|
||||
|
||||
</details>
|
||||
|
||||
## Limitations
|
||||
|
||||
If any of these limitations prevent you from using this library, please open an issue along with a link to your project.
|
||||
|
||||
* CBOR `Undefined` (0xf7) value decodes to Go's `nil` value. CBOR `Null` (0xf6) more closely matches Go's `nil`.
|
||||
* CBOR map keys with data types not supported by Go for map keys are ignored and an error is returned after continuing to decode remaining items.
|
||||
* When decoding registered CBOR tag data to interface type, decoder creates a pointer to registered Go type matching CBOR tag number. Requiring a pointer for this is a Go limitation.
|
||||
|
||||
## Fuzzing and Code Coverage
|
||||
|
||||
__Code coverage__ is always 95% or higher (with `go test -cover`) when tagging a release.
|
||||
|
||||
__Coverage-guided fuzzing__ must pass billions of execs using before tagging a release. Fuzzing is done using nonpublic code which may eventually get merged into this project. Until then, reports like OpenSSF Scorecard can't detect fuzz tests being used by this project.
|
||||
|
||||
<hr>
|
||||
|
||||
## Versions and API Changes
|
||||
This project uses [Semantic Versioning](https://semver.org), so the API is always backwards compatible unless the major version number changes.
|
||||
|
||||
These functions have signatures identical to encoding/json and their API will continue to match `encoding/json` even after major new releases:
|
||||
`Marshal`, `Unmarshal`, `NewEncoder`, `NewDecoder`, `(*Encoder).Encode`, and `(*Decoder).Decode`.
|
||||
|
||||
Exclusions from SemVer:
|
||||
- Newly added API documented as "subject to change".
|
||||
- Newly added API in the master branch that has never been tagged in non-beta release.
|
||||
- If function parameters are unchanged, bug fixes that change behavior (e.g. return error for edge case was missed in prior version). We try to highlight these in the release notes and add extended beta period. E.g. [v2.5.0-beta](https://github.com/fxamacker/cbor/releases/tag/v2.5.0-beta) (Dec 2022) -> [v2.5.0](https://github.com/fxamacker/cbor/releases/tag/v2.5.0) (Aug 2023).
|
||||
|
||||
This project avoids breaking changes to behavior of encoding and decoding functions unless required to improve conformance with supported RFCs (e.g. RFC 8949, RFC 8742, etc.) Visible changes that don't improve conformance to standards are typically made available as new opt-in settings or new functions.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). Contact [faye.github@gmail.com](mailto:faye.github@gmail.com) with any questions or comments.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please open an issue before beginning work on a PR. The improvement may have already been considered, etc.
|
||||
|
||||
For more info, see [How to Contribute](CONTRIBUTING.md).
|
||||
|
||||
## Security Policy
|
||||
|
||||
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||
|
||||
For the full text of the Security Policy, see [SECURITY.md](SECURITY.md).
|
||||
|
||||
## Acknowledgements
|
||||
|
||||
Many thanks to all the contributors on this project!
|
||||
|
||||
I'm especially grateful to Bastian Müller and Dieter Shirley for suggesting and collaborating on CBOR stream mode, and much more.
|
||||
|
||||
I'm very grateful to Stefan Tatschner, Yawning Angel, Jernej Kos, x448, ZenGround0, and Jakob Borg for their contributions or support in the very early days.
|
||||
|
||||
Big thanks to Ben Luddy for his contributions in v2.6.0 and v2.7.0.
|
||||
|
||||
This library clearly wouldn't be possible without Carsten Bormann authoring CBOR RFCs.
|
||||
|
||||
Special thanks to Laurence Lundblade and Jeffrey Yasskin for their help on IETF mailing list or at [7049bis](https://github.com/cbor-wg/CBORbis).
|
||||
|
||||
Huge thanks to The Go Authors for creating a fun and practical programming language with batteries included!
|
||||
|
||||
This library uses `x448/float16` which used to be included. As a standalone package, `x448/float16` is useful to other projects as well.
|
||||
|
||||
## License
|
||||
|
||||
Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker).
|
||||
|
||||
fxamacker/cbor is licensed under the MIT License. See [LICENSE](LICENSE) for the full license text.
|
||||
|
||||
<hr>
|
||||
7
vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
Normal file
7
vendor/github.com/fxamacker/cbor/v2/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# Security Policy
|
||||
|
||||
Security fixes are provided for the latest released version of fxamacker/cbor.
|
||||
|
||||
If the security vulnerability is already known to the public, then you can open an issue as a bug report.
|
||||
|
||||
To report security vulnerabilities not yet known to the public, please email faye.github@gmail.com and allow time for the problem to be resolved before reporting it to the public.
|
||||
63
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
Normal file
63
vendor/github.com/fxamacker/cbor/v2/bytestring.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// ByteString represents CBOR byte string (major type 2). ByteString can be used
|
||||
// when using a Go []byte is not possible or convenient. For example, Go doesn't
|
||||
// allow []byte as map key, so ByteString can be used to support data formats
|
||||
// having CBOR map with byte string keys. ByteString can also be used to
|
||||
// encode invalid UTF-8 string as CBOR byte string.
|
||||
// See DecOption.MapKeyByteStringMode for more details.
|
||||
type ByteString string
|
||||
|
||||
// Bytes returns bytes representing ByteString.
|
||||
func (bs ByteString) Bytes() []byte {
|
||||
return []byte(bs)
|
||||
}
|
||||
|
||||
// MarshalCBOR encodes ByteString as CBOR byte string (major type 2).
|
||||
func (bs ByteString) MarshalCBOR() ([]byte, error) {
|
||||
e := getEncodeBuffer()
|
||||
defer putEncodeBuffer(e)
|
||||
|
||||
// Encode length
|
||||
encodeHead(e, byte(cborTypeByteString), uint64(len(bs)))
|
||||
|
||||
// Encode data
|
||||
buf := make([]byte, e.Len()+len(bs))
|
||||
n := copy(buf, e.Bytes())
|
||||
copy(buf[n:], bs)
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// UnmarshalCBOR decodes CBOR byte string (major type 2) to ByteString.
|
||||
// Decoding CBOR null and CBOR undefined sets ByteString to be empty.
|
||||
func (bs *ByteString) UnmarshalCBOR(data []byte) error {
|
||||
if bs == nil {
|
||||
return errors.New("cbor.ByteString: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and CBOR undefined to ByteString resets data.
|
||||
// This behavior is similar to decoding CBOR null and CBOR undefined to []byte.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
*bs = ""
|
||||
return nil
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Check if CBOR data type is byte string
|
||||
if typ := d.nextCBORType(); typ != cborTypeByteString {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeByteString.String()}
|
||||
}
|
||||
|
||||
b, _ := d.parseByteString()
|
||||
*bs = ByteString(b)
|
||||
return nil
|
||||
}
|
||||
363
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
Normal file
363
vendor/github.com/fxamacker/cbor/v2/cache.go
generated
vendored
Normal file
@@ -0,0 +1,363 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type encodeFuncs struct {
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
}
|
||||
|
||||
var (
|
||||
decodingStructTypeCache sync.Map // map[reflect.Type]*decodingStructType
|
||||
encodingStructTypeCache sync.Map // map[reflect.Type]*encodingStructType
|
||||
encodeFuncCache sync.Map // map[reflect.Type]encodeFuncs
|
||||
typeInfoCache sync.Map // map[reflect.Type]*typeInfo
|
||||
)
|
||||
|
||||
type specialType int
|
||||
|
||||
const (
|
||||
specialTypeNone specialType = iota
|
||||
specialTypeUnmarshalerIface
|
||||
specialTypeEmptyIface
|
||||
specialTypeIface
|
||||
specialTypeTag
|
||||
specialTypeTime
|
||||
)
|
||||
|
||||
type typeInfo struct {
|
||||
elemTypeInfo *typeInfo
|
||||
keyTypeInfo *typeInfo
|
||||
typ reflect.Type
|
||||
kind reflect.Kind
|
||||
nonPtrType reflect.Type
|
||||
nonPtrKind reflect.Kind
|
||||
spclType specialType
|
||||
}
|
||||
|
||||
func newTypeInfo(t reflect.Type) *typeInfo {
|
||||
tInfo := typeInfo{typ: t, kind: t.Kind()}
|
||||
|
||||
for t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
|
||||
k := t.Kind()
|
||||
|
||||
tInfo.nonPtrType = t
|
||||
tInfo.nonPtrKind = k
|
||||
|
||||
if k == reflect.Interface {
|
||||
if t.NumMethod() == 0 {
|
||||
tInfo.spclType = specialTypeEmptyIface
|
||||
} else {
|
||||
tInfo.spclType = specialTypeIface
|
||||
}
|
||||
} else if t == typeTag {
|
||||
tInfo.spclType = specialTypeTag
|
||||
} else if t == typeTime {
|
||||
tInfo.spclType = specialTypeTime
|
||||
} else if reflect.PtrTo(t).Implements(typeUnmarshaler) {
|
||||
tInfo.spclType = specialTypeUnmarshalerIface
|
||||
}
|
||||
|
||||
switch k {
|
||||
case reflect.Array, reflect.Slice:
|
||||
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||
case reflect.Map:
|
||||
tInfo.keyTypeInfo = getTypeInfo(t.Key())
|
||||
tInfo.elemTypeInfo = getTypeInfo(t.Elem())
|
||||
}
|
||||
|
||||
return &tInfo
|
||||
}
|
||||
|
||||
type decodingStructType struct {
|
||||
fields fields
|
||||
fieldIndicesByName map[string]int
|
||||
err error
|
||||
toArray bool
|
||||
}
|
||||
|
||||
// The stdlib errors.Join was introduced in Go 1.20, and we still support Go 1.17, so instead,
|
||||
// here's a very basic implementation of an aggregated error.
|
||||
type multierror []error
|
||||
|
||||
func (m multierror) Error() string {
|
||||
var sb strings.Builder
|
||||
for i, err := range m {
|
||||
sb.WriteString(err.Error())
|
||||
if i < len(m)-1 {
|
||||
sb.WriteString(", ")
|
||||
}
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func getDecodingStructType(t reflect.Type) *decodingStructType {
|
||||
if v, _ := decodingStructTypeCache.Load(t); v != nil {
|
||||
return v.(*decodingStructType)
|
||||
}
|
||||
|
||||
flds, structOptions := getFields(t)
|
||||
|
||||
toArray := hasToArrayOption(structOptions)
|
||||
|
||||
var errs []error
|
||||
for i := 0; i < len(flds); i++ {
|
||||
if flds[i].keyAsInt {
|
||||
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||
if numErr != nil {
|
||||
errs = append(errs, errors.New("cbor: failed to parse field name \""+flds[i].name+"\" to int ("+numErr.Error()+")"))
|
||||
break
|
||||
}
|
||||
flds[i].nameAsInt = int64(nameAsInt)
|
||||
}
|
||||
|
||||
flds[i].typInfo = getTypeInfo(flds[i].typ)
|
||||
}
|
||||
|
||||
fieldIndicesByName := make(map[string]int, len(flds))
|
||||
for i, fld := range flds {
|
||||
if _, ok := fieldIndicesByName[fld.name]; ok {
|
||||
errs = append(errs, fmt.Errorf("cbor: two or more fields of %v have the same name %q", t, fld.name))
|
||||
continue
|
||||
}
|
||||
fieldIndicesByName[fld.name] = i
|
||||
}
|
||||
|
||||
var err error
|
||||
{
|
||||
var multi multierror
|
||||
for _, each := range errs {
|
||||
if each != nil {
|
||||
multi = append(multi, each)
|
||||
}
|
||||
}
|
||||
if len(multi) == 1 {
|
||||
err = multi[0]
|
||||
} else if len(multi) > 1 {
|
||||
err = multi
|
||||
}
|
||||
}
|
||||
|
||||
structType := &decodingStructType{
|
||||
fields: flds,
|
||||
fieldIndicesByName: fieldIndicesByName,
|
||||
err: err,
|
||||
toArray: toArray,
|
||||
}
|
||||
decodingStructTypeCache.Store(t, structType)
|
||||
return structType
|
||||
}
|
||||
|
||||
type encodingStructType struct {
|
||||
fields fields
|
||||
bytewiseFields fields
|
||||
lengthFirstFields fields
|
||||
omitEmptyFieldsIdx []int
|
||||
err error
|
||||
toArray bool
|
||||
}
|
||||
|
||||
func (st *encodingStructType) getFields(em *encMode) fields {
|
||||
switch em.sort {
|
||||
case SortNone, SortFastShuffle:
|
||||
return st.fields
|
||||
case SortLengthFirst:
|
||||
return st.lengthFirstFields
|
||||
default:
|
||||
return st.bytewiseFields
|
||||
}
|
||||
}
|
||||
|
||||
type bytewiseFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *bytewiseFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *bytewiseFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *bytewiseFieldSorter) Less(i, j int) bool {
|
||||
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||
}
|
||||
|
||||
type lengthFirstFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *lengthFirstFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *lengthFirstFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *lengthFirstFieldSorter) Less(i, j int) bool {
|
||||
if len(x.fields[i].cborName) != len(x.fields[j].cborName) {
|
||||
return len(x.fields[i].cborName) < len(x.fields[j].cborName)
|
||||
}
|
||||
return bytes.Compare(x.fields[i].cborName, x.fields[j].cborName) <= 0
|
||||
}
|
||||
|
||||
func getEncodingStructType(t reflect.Type) (*encodingStructType, error) {
|
||||
if v, _ := encodingStructTypeCache.Load(t); v != nil {
|
||||
structType := v.(*encodingStructType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
flds, structOptions := getFields(t)
|
||||
|
||||
if hasToArrayOption(structOptions) {
|
||||
return getEncodingStructToArrayType(t, flds)
|
||||
}
|
||||
|
||||
var err error
|
||||
var hasKeyAsInt bool
|
||||
var hasKeyAsStr bool
|
||||
var omitEmptyIdx []int
|
||||
e := getEncodeBuffer()
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
err = &UnsupportedTypeError{t}
|
||||
break
|
||||
}
|
||||
|
||||
// Encode field name
|
||||
if flds[i].keyAsInt {
|
||||
nameAsInt, numErr := strconv.Atoi(flds[i].name)
|
||||
if numErr != nil {
|
||||
err = errors.New("cbor: failed to parse field name \"" + flds[i].name + "\" to int (" + numErr.Error() + ")")
|
||||
break
|
||||
}
|
||||
flds[i].nameAsInt = int64(nameAsInt)
|
||||
if nameAsInt >= 0 {
|
||||
encodeHead(e, byte(cborTypePositiveInt), uint64(nameAsInt))
|
||||
} else {
|
||||
n := nameAsInt*(-1) - 1
|
||||
encodeHead(e, byte(cborTypeNegativeInt), uint64(n))
|
||||
}
|
||||
flds[i].cborName = make([]byte, e.Len())
|
||||
copy(flds[i].cborName, e.Bytes())
|
||||
e.Reset()
|
||||
|
||||
hasKeyAsInt = true
|
||||
} else {
|
||||
encodeHead(e, byte(cborTypeTextString), uint64(len(flds[i].name)))
|
||||
flds[i].cborName = make([]byte, e.Len()+len(flds[i].name))
|
||||
n := copy(flds[i].cborName, e.Bytes())
|
||||
copy(flds[i].cborName[n:], flds[i].name)
|
||||
e.Reset()
|
||||
|
||||
// If cborName contains a text string, then cborNameByteString contains a
|
||||
// string that has the byte string major type but is otherwise identical to
|
||||
// cborName.
|
||||
flds[i].cborNameByteString = make([]byte, len(flds[i].cborName))
|
||||
copy(flds[i].cborNameByteString, flds[i].cborName)
|
||||
// Reset encoded CBOR type to byte string, preserving the "additional
|
||||
// information" bits:
|
||||
flds[i].cborNameByteString[0] = byte(cborTypeByteString) |
|
||||
getAdditionalInformation(flds[i].cborNameByteString[0])
|
||||
|
||||
hasKeyAsStr = true
|
||||
}
|
||||
|
||||
// Check if field can be omitted when empty
|
||||
if flds[i].omitEmpty {
|
||||
omitEmptyIdx = append(omitEmptyIdx, i)
|
||||
}
|
||||
}
|
||||
putEncodeBuffer(e)
|
||||
|
||||
if err != nil {
|
||||
structType := &encodingStructType{err: err}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
// Sort fields by canonical order
|
||||
bytewiseFields := make(fields, len(flds))
|
||||
copy(bytewiseFields, flds)
|
||||
sort.Sort(&bytewiseFieldSorter{bytewiseFields})
|
||||
|
||||
lengthFirstFields := bytewiseFields
|
||||
if hasKeyAsInt && hasKeyAsStr {
|
||||
lengthFirstFields = make(fields, len(flds))
|
||||
copy(lengthFirstFields, flds)
|
||||
sort.Sort(&lengthFirstFieldSorter{lengthFirstFields})
|
||||
}
|
||||
|
||||
structType := &encodingStructType{
|
||||
fields: flds,
|
||||
bytewiseFields: bytewiseFields,
|
||||
lengthFirstFields: lengthFirstFields,
|
||||
omitEmptyFieldsIdx: omitEmptyIdx,
|
||||
}
|
||||
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
func getEncodingStructToArrayType(t reflect.Type, flds fields) (*encodingStructType, error) {
|
||||
for i := 0; i < len(flds); i++ {
|
||||
// Get field's encodeFunc
|
||||
flds[i].ef, flds[i].ief = getEncodeFunc(flds[i].typ)
|
||||
if flds[i].ef == nil {
|
||||
structType := &encodingStructType{err: &UnsupportedTypeError{t}}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
}
|
||||
|
||||
structType := &encodingStructType{
|
||||
fields: flds,
|
||||
toArray: true,
|
||||
}
|
||||
encodingStructTypeCache.Store(t, structType)
|
||||
return structType, structType.err
|
||||
}
|
||||
|
||||
func getEncodeFunc(t reflect.Type) (encodeFunc, isEmptyFunc) {
|
||||
if v, _ := encodeFuncCache.Load(t); v != nil {
|
||||
fs := v.(encodeFuncs)
|
||||
return fs.ef, fs.ief
|
||||
}
|
||||
ef, ief := getEncodeFuncInternal(t)
|
||||
encodeFuncCache.Store(t, encodeFuncs{ef, ief})
|
||||
return ef, ief
|
||||
}
|
||||
|
||||
func getTypeInfo(t reflect.Type) *typeInfo {
|
||||
if v, _ := typeInfoCache.Load(t); v != nil {
|
||||
return v.(*typeInfo)
|
||||
}
|
||||
tInfo := newTypeInfo(t)
|
||||
typeInfoCache.Store(t, tInfo)
|
||||
return tInfo
|
||||
}
|
||||
|
||||
func hasToArrayOption(tag string) bool {
|
||||
s := ",toarray"
|
||||
idx := strings.Index(tag, s)
|
||||
return idx >= 0 && (len(tag) == idx+len(s) || tag[idx+len(s)] == ',')
|
||||
}
|
||||
182
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
Normal file
182
vendor/github.com/fxamacker/cbor/v2/common.go
generated
vendored
Normal file
@@ -0,0 +1,182 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type cborType uint8
|
||||
|
||||
const (
|
||||
cborTypePositiveInt cborType = 0x00
|
||||
cborTypeNegativeInt cborType = 0x20
|
||||
cborTypeByteString cborType = 0x40
|
||||
cborTypeTextString cborType = 0x60
|
||||
cborTypeArray cborType = 0x80
|
||||
cborTypeMap cborType = 0xa0
|
||||
cborTypeTag cborType = 0xc0
|
||||
cborTypePrimitives cborType = 0xe0
|
||||
)
|
||||
|
||||
func (t cborType) String() string {
|
||||
switch t {
|
||||
case cborTypePositiveInt:
|
||||
return "positive integer"
|
||||
case cborTypeNegativeInt:
|
||||
return "negative integer"
|
||||
case cborTypeByteString:
|
||||
return "byte string"
|
||||
case cborTypeTextString:
|
||||
return "UTF-8 text string"
|
||||
case cborTypeArray:
|
||||
return "array"
|
||||
case cborTypeMap:
|
||||
return "map"
|
||||
case cborTypeTag:
|
||||
return "tag"
|
||||
case cborTypePrimitives:
|
||||
return "primitives"
|
||||
default:
|
||||
return "Invalid type " + strconv.Itoa(int(t))
|
||||
}
|
||||
}
|
||||
|
||||
type additionalInformation uint8
|
||||
|
||||
const (
|
||||
maxAdditionalInformationWithoutArgument = 23
|
||||
additionalInformationWith1ByteArgument = 24
|
||||
additionalInformationWith2ByteArgument = 25
|
||||
additionalInformationWith4ByteArgument = 26
|
||||
additionalInformationWith8ByteArgument = 27
|
||||
|
||||
// For major type 7.
|
||||
additionalInformationAsFalse = 20
|
||||
additionalInformationAsTrue = 21
|
||||
additionalInformationAsNull = 22
|
||||
additionalInformationAsUndefined = 23
|
||||
additionalInformationAsFloat16 = 25
|
||||
additionalInformationAsFloat32 = 26
|
||||
additionalInformationAsFloat64 = 27
|
||||
|
||||
// For major type 2, 3, 4, 5.
|
||||
additionalInformationAsIndefiniteLengthFlag = 31
|
||||
)
|
||||
|
||||
const (
|
||||
maxSimpleValueInAdditionalInformation = 23
|
||||
minSimpleValueIn1ByteArgument = 32
|
||||
)
|
||||
|
||||
func (ai additionalInformation) isIndefiniteLength() bool {
|
||||
return ai == additionalInformationAsIndefiniteLengthFlag
|
||||
}
|
||||
|
||||
const (
|
||||
// From RFC 8949 Section 3:
|
||||
// "The initial byte of each encoded data item contains both information about the major type
|
||||
// (the high-order 3 bits, described in Section 3.1) and additional information
|
||||
// (the low-order 5 bits)."
|
||||
|
||||
// typeMask is used to extract major type in initial byte of encoded data item.
|
||||
typeMask = 0xe0
|
||||
|
||||
// additionalInformationMask is used to extract additional information in initial byte of encoded data item.
|
||||
additionalInformationMask = 0x1f
|
||||
)
|
||||
|
||||
func getType(raw byte) cborType {
|
||||
return cborType(raw & typeMask)
|
||||
}
|
||||
|
||||
func getAdditionalInformation(raw byte) byte {
|
||||
return raw & additionalInformationMask
|
||||
}
|
||||
|
||||
func isBreakFlag(raw byte) bool {
|
||||
return raw == cborBreakFlag
|
||||
}
|
||||
|
||||
func parseInitialByte(b byte) (t cborType, ai byte) {
|
||||
return getType(b), getAdditionalInformation(b)
|
||||
}
|
||||
|
||||
const (
|
||||
tagNumRFC3339Time = 0
|
||||
tagNumEpochTime = 1
|
||||
tagNumUnsignedBignum = 2
|
||||
tagNumNegativeBignum = 3
|
||||
tagNumExpectedLaterEncodingBase64URL = 21
|
||||
tagNumExpectedLaterEncodingBase64 = 22
|
||||
tagNumExpectedLaterEncodingBase16 = 23
|
||||
tagNumSelfDescribedCBOR = 55799
|
||||
)
|
||||
|
||||
const (
|
||||
cborBreakFlag = byte(0xff)
|
||||
cborByteStringWithIndefiniteLengthHead = byte(0x5f)
|
||||
cborTextStringWithIndefiniteLengthHead = byte(0x7f)
|
||||
cborArrayWithIndefiniteLengthHead = byte(0x9f)
|
||||
cborMapWithIndefiniteLengthHead = byte(0xbf)
|
||||
)
|
||||
|
||||
var (
|
||||
cborFalse = []byte{0xf4}
|
||||
cborTrue = []byte{0xf5}
|
||||
cborNil = []byte{0xf6}
|
||||
cborNaN = []byte{0xf9, 0x7e, 0x00}
|
||||
cborPositiveInfinity = []byte{0xf9, 0x7c, 0x00}
|
||||
cborNegativeInfinity = []byte{0xf9, 0xfc, 0x00}
|
||||
)
|
||||
|
||||
// validBuiltinTag checks that supported built-in tag numbers are followed by expected content types.
|
||||
func validBuiltinTag(tagNum uint64, contentHead byte) error {
|
||||
t := getType(contentHead)
|
||||
switch tagNum {
|
||||
case tagNumRFC3339Time:
|
||||
// Tag content (date/time text string in RFC 3339 format) must be string type.
|
||||
if t != cborTypeTextString {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumRFC3339Time,
|
||||
"text string",
|
||||
t.String())
|
||||
}
|
||||
return nil
|
||||
|
||||
case tagNumEpochTime:
|
||||
// Tag content (epoch date/time) must be uint, int, or float type.
|
||||
if t != cborTypePositiveInt && t != cborTypeNegativeInt && (contentHead < 0xf9 || contentHead > 0xfb) {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumEpochTime,
|
||||
"integer or floating-point number",
|
||||
t.String())
|
||||
}
|
||||
return nil
|
||||
|
||||
case tagNumUnsignedBignum, tagNumNegativeBignum:
|
||||
// Tag content (bignum) must be byte type.
|
||||
if t != cborTypeByteString {
|
||||
return newInadmissibleTagContentTypeErrorf(
|
||||
fmt.Sprintf(
|
||||
"tag number %d or %d must be followed by byte string, got %s",
|
||||
tagNumUnsignedBignum,
|
||||
tagNumNegativeBignum,
|
||||
t.String(),
|
||||
))
|
||||
}
|
||||
return nil
|
||||
|
||||
case tagNumExpectedLaterEncodingBase64URL, tagNumExpectedLaterEncodingBase64, tagNumExpectedLaterEncodingBase16:
|
||||
// From RFC 8949 3.4.5.2:
|
||||
// The data item tagged can be a byte string or any other data item. In the latter
|
||||
// case, the tag applies to all of the byte string data items contained in the data
|
||||
// item, except for those contained in a nested data item tagged with an expected
|
||||
// conversion.
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
3187
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
Normal file
3187
vendor/github.com/fxamacker/cbor/v2/decode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
724
vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
Normal file
724
vendor/github.com/fxamacker/cbor/v2/diagnose.go
generated
vendored
Normal file
@@ -0,0 +1,724 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"math/big"
|
||||
"strconv"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/x448/float16"
|
||||
)
|
||||
|
||||
// DiagMode is the main interface for CBOR diagnostic notation.
|
||||
type DiagMode interface {
|
||||
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using this DiagMode.
|
||||
Diagnose([]byte) (string, error)
|
||||
|
||||
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||
DiagnoseFirst([]byte) (string, []byte, error)
|
||||
|
||||
// DiagOptions returns user specified options used to create this DiagMode.
|
||||
DiagOptions() DiagOptions
|
||||
}
|
||||
|
||||
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||
type ByteStringEncoding uint8
|
||||
|
||||
const (
|
||||
// ByteStringBase16Encoding encodes byte strings in base16, without padding.
|
||||
ByteStringBase16Encoding ByteStringEncoding = iota
|
||||
|
||||
// ByteStringBase32Encoding encodes byte strings in base32, without padding.
|
||||
ByteStringBase32Encoding
|
||||
|
||||
// ByteStringBase32HexEncoding encodes byte strings in base32hex, without padding.
|
||||
ByteStringBase32HexEncoding
|
||||
|
||||
// ByteStringBase64Encoding encodes byte strings in base64url, without padding.
|
||||
ByteStringBase64Encoding
|
||||
|
||||
maxByteStringEncoding
|
||||
)
|
||||
|
||||
func (bse ByteStringEncoding) valid() error {
|
||||
if bse >= maxByteStringEncoding {
|
||||
return errors.New("cbor: invalid ByteStringEncoding " + strconv.Itoa(int(bse)))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiagOptions specifies Diag options.
|
||||
type DiagOptions struct {
|
||||
// ByteStringEncoding specifies the base encoding that byte strings are notated.
|
||||
// Default is ByteStringBase16Encoding.
|
||||
ByteStringEncoding ByteStringEncoding
|
||||
|
||||
// ByteStringHexWhitespace specifies notating with whitespace in byte string
|
||||
// when ByteStringEncoding is ByteStringBase16Encoding.
|
||||
ByteStringHexWhitespace bool
|
||||
|
||||
// ByteStringText specifies notating with text in byte string
|
||||
// if it is a valid UTF-8 text.
|
||||
ByteStringText bool
|
||||
|
||||
// ByteStringEmbeddedCBOR specifies notating embedded CBOR in byte string
|
||||
// if it is a valid CBOR bytes.
|
||||
ByteStringEmbeddedCBOR bool
|
||||
|
||||
// CBORSequence specifies notating CBOR sequences.
|
||||
// otherwise, it returns an error if there are more bytes after the first CBOR.
|
||||
CBORSequence bool
|
||||
|
||||
// FloatPrecisionIndicator specifies appending a suffix to indicate float precision.
|
||||
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-encoding-indicators.
|
||||
FloatPrecisionIndicator bool
|
||||
|
||||
// MaxNestedLevels specifies the max nested levels allowed for any combination of CBOR array, maps, and tags.
|
||||
// Default is 32 levels and it can be set to [4, 65535]. Note that higher maximum levels of nesting can
|
||||
// require larger amounts of stack to deserialize. Don't increase this higher than you require.
|
||||
MaxNestedLevels int
|
||||
|
||||
// MaxArrayElements specifies the max number of elements for CBOR arrays.
|
||||
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||
MaxArrayElements int
|
||||
|
||||
// MaxMapPairs specifies the max number of key-value pairs for CBOR maps.
|
||||
// Default is 128*1024=131072 and it can be set to [16, 2147483647]
|
||||
MaxMapPairs int
|
||||
}
|
||||
|
||||
// DiagMode returns a DiagMode with immutable options.
|
||||
func (opts DiagOptions) DiagMode() (DiagMode, error) {
|
||||
return opts.diagMode()
|
||||
}
|
||||
|
||||
func (opts DiagOptions) diagMode() (*diagMode, error) {
|
||||
if err := opts.ByteStringEncoding.valid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
decMode, err := DecOptions{
|
||||
MaxNestedLevels: opts.MaxNestedLevels,
|
||||
MaxArrayElements: opts.MaxArrayElements,
|
||||
MaxMapPairs: opts.MaxMapPairs,
|
||||
}.decMode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &diagMode{
|
||||
byteStringEncoding: opts.ByteStringEncoding,
|
||||
byteStringHexWhitespace: opts.ByteStringHexWhitespace,
|
||||
byteStringText: opts.ByteStringText,
|
||||
byteStringEmbeddedCBOR: opts.ByteStringEmbeddedCBOR,
|
||||
cborSequence: opts.CBORSequence,
|
||||
floatPrecisionIndicator: opts.FloatPrecisionIndicator,
|
||||
decMode: decMode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type diagMode struct {
|
||||
byteStringEncoding ByteStringEncoding
|
||||
byteStringHexWhitespace bool
|
||||
byteStringText bool
|
||||
byteStringEmbeddedCBOR bool
|
||||
cborSequence bool
|
||||
floatPrecisionIndicator bool
|
||||
decMode *decMode
|
||||
}
|
||||
|
||||
// DiagOptions returns user specified options used to create this DiagMode.
|
||||
func (dm *diagMode) DiagOptions() DiagOptions {
|
||||
return DiagOptions{
|
||||
ByteStringEncoding: dm.byteStringEncoding,
|
||||
ByteStringHexWhitespace: dm.byteStringHexWhitespace,
|
||||
ByteStringText: dm.byteStringText,
|
||||
ByteStringEmbeddedCBOR: dm.byteStringEmbeddedCBOR,
|
||||
CBORSequence: dm.cborSequence,
|
||||
FloatPrecisionIndicator: dm.floatPrecisionIndicator,
|
||||
MaxNestedLevels: dm.decMode.maxNestedLevels,
|
||||
MaxArrayElements: dm.decMode.maxArrayElements,
|
||||
MaxMapPairs: dm.decMode.maxMapPairs,
|
||||
}
|
||||
}
|
||||
|
||||
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items using the DiagMode.
|
||||
func (dm *diagMode) Diagnose(data []byte) (string, error) {
|
||||
return newDiagnose(data, dm.decMode, dm).diag(dm.cborSequence)
|
||||
}
|
||||
|
||||
// DiagnoseFirst returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||
func (dm *diagMode) DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||
return newDiagnose(data, dm.decMode, dm).diagFirst()
|
||||
}
|
||||
|
||||
var defaultDiagMode, _ = DiagOptions{}.diagMode()
|
||||
|
||||
// Diagnose returns extended diagnostic notation (EDN) of CBOR data items
|
||||
// using the default diagnostic mode.
|
||||
//
|
||||
// Refer to https://www.rfc-editor.org/rfc/rfc8949.html#name-diagnostic-notation.
|
||||
func Diagnose(data []byte) (string, error) {
|
||||
return defaultDiagMode.Diagnose(data)
|
||||
}
|
||||
|
||||
// Diagnose returns extended diagnostic notation (EDN) of the first CBOR data item using the DiagMode. Any remaining bytes are returned in rest.
|
||||
func DiagnoseFirst(data []byte) (diagNotation string, rest []byte, err error) {
|
||||
return defaultDiagMode.DiagnoseFirst(data)
|
||||
}
|
||||
|
||||
type diagnose struct {
|
||||
dm *diagMode
|
||||
d *decoder
|
||||
w *bytes.Buffer
|
||||
}
|
||||
|
||||
func newDiagnose(data []byte, decm *decMode, diagm *diagMode) *diagnose {
|
||||
return &diagnose{
|
||||
dm: diagm,
|
||||
d: &decoder{data: data, dm: decm},
|
||||
w: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
func (di *diagnose) diag(cborSequence bool) (string, error) {
|
||||
// CBOR Sequence
|
||||
firstItem := true
|
||||
for {
|
||||
switch err := di.wellformed(cborSequence); err {
|
||||
case nil:
|
||||
if !firstItem {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
firstItem = false
|
||||
if itemErr := di.item(); itemErr != nil {
|
||||
return di.w.String(), itemErr
|
||||
}
|
||||
|
||||
case io.EOF:
|
||||
if firstItem {
|
||||
return di.w.String(), err
|
||||
}
|
||||
return di.w.String(), nil
|
||||
|
||||
default:
|
||||
return di.w.String(), err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (di *diagnose) diagFirst() (diagNotation string, rest []byte, err error) {
|
||||
err = di.wellformed(true)
|
||||
if err == nil {
|
||||
err = di.item()
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
// Return EDN and the rest of the data slice (which might be len 0)
|
||||
return di.w.String(), di.d.data[di.d.off:], nil
|
||||
}
|
||||
|
||||
return di.w.String(), nil, err
|
||||
}
|
||||
|
||||
func (di *diagnose) wellformed(allowExtraData bool) error {
|
||||
off := di.d.off
|
||||
err := di.d.wellformed(allowExtraData, false)
|
||||
di.d.off = off
|
||||
return err
|
||||
}
|
||||
|
||||
func (di *diagnose) item() error { //nolint:gocyclo
|
||||
initialByte := di.d.data[di.d.off]
|
||||
switch initialByte {
|
||||
case cborByteStringWithIndefiniteLengthHead,
|
||||
cborTextStringWithIndefiniteLengthHead: // indefinite-length byte/text string
|
||||
di.d.off++
|
||||
if isBreakFlag(di.d.data[di.d.off]) {
|
||||
di.d.off++
|
||||
switch initialByte {
|
||||
case cborByteStringWithIndefiniteLengthHead:
|
||||
// indefinite-length bytes with no chunks.
|
||||
di.w.WriteString(`''_`)
|
||||
return nil
|
||||
case cborTextStringWithIndefiniteLengthHead:
|
||||
// indefinite-length text with no chunks.
|
||||
di.w.WriteString(`""_`)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteString("(_ ")
|
||||
|
||||
i := 0
|
||||
for !di.d.foundBreak() {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
|
||||
i++
|
||||
// wellformedIndefiniteString() already checked that the next item is a byte/text string.
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteByte(')')
|
||||
return nil
|
||||
|
||||
case cborArrayWithIndefiniteLengthHead: // indefinite-length array
|
||||
di.d.off++
|
||||
di.w.WriteString("[_ ")
|
||||
|
||||
i := 0
|
||||
for !di.d.foundBreak() {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
|
||||
i++
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteByte(']')
|
||||
return nil
|
||||
|
||||
case cborMapWithIndefiniteLengthHead: // indefinite-length map
|
||||
di.d.off++
|
||||
di.w.WriteString("{_ ")
|
||||
|
||||
i := 0
|
||||
for !di.d.foundBreak() {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
|
||||
i++
|
||||
// key
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
di.w.WriteString(": ")
|
||||
|
||||
// value
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteByte('}')
|
||||
return nil
|
||||
}
|
||||
|
||||
t := di.d.nextCBORType()
|
||||
switch t {
|
||||
case cborTypePositiveInt:
|
||||
_, _, val := di.d.getHead()
|
||||
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||
return nil
|
||||
|
||||
case cborTypeNegativeInt:
|
||||
_, _, val := di.d.getHead()
|
||||
if val > math.MaxInt64 {
|
||||
// CBOR negative integer overflows int64, use big.Int to store value.
|
||||
bi := new(big.Int)
|
||||
bi.SetUint64(val)
|
||||
bi.Add(bi, big.NewInt(1))
|
||||
bi.Neg(bi)
|
||||
di.w.WriteString(bi.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
nValue := int64(-1) ^ int64(val)
|
||||
di.w.WriteString(strconv.FormatInt(nValue, 10))
|
||||
return nil
|
||||
|
||||
case cborTypeByteString:
|
||||
b, _ := di.d.parseByteString()
|
||||
return di.encodeByteString(b)
|
||||
|
||||
case cborTypeTextString:
|
||||
b, err := di.d.parseTextString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return di.encodeTextString(string(b), '"')
|
||||
|
||||
case cborTypeArray:
|
||||
_, _, val := di.d.getHead()
|
||||
count := int(val)
|
||||
di.w.WriteByte('[')
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
di.w.WriteByte(']')
|
||||
return nil
|
||||
|
||||
case cborTypeMap:
|
||||
_, _, val := di.d.getHead()
|
||||
count := int(val)
|
||||
di.w.WriteByte('{')
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
if i > 0 {
|
||||
di.w.WriteString(", ")
|
||||
}
|
||||
// key
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
di.w.WriteString(": ")
|
||||
// value
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
di.w.WriteByte('}')
|
||||
return nil
|
||||
|
||||
case cborTypeTag:
|
||||
_, _, tagNum := di.d.getHead()
|
||||
switch tagNum {
|
||||
case tagNumUnsignedBignum:
|
||||
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumUnsignedBignum,
|
||||
"byte string",
|
||||
nt.String())
|
||||
}
|
||||
|
||||
b, _ := di.d.parseByteString()
|
||||
bi := new(big.Int).SetBytes(b)
|
||||
di.w.WriteString(bi.String())
|
||||
return nil
|
||||
|
||||
case tagNumNegativeBignum:
|
||||
if nt := di.d.nextCBORType(); nt != cborTypeByteString {
|
||||
return newInadmissibleTagContentTypeError(
|
||||
tagNumNegativeBignum,
|
||||
"byte string",
|
||||
nt.String(),
|
||||
)
|
||||
}
|
||||
|
||||
b, _ := di.d.parseByteString()
|
||||
bi := new(big.Int).SetBytes(b)
|
||||
bi.Add(bi, big.NewInt(1))
|
||||
bi.Neg(bi)
|
||||
di.w.WriteString(bi.String())
|
||||
return nil
|
||||
|
||||
default:
|
||||
di.w.WriteString(strconv.FormatUint(tagNum, 10))
|
||||
di.w.WriteByte('(')
|
||||
if err := di.item(); err != nil {
|
||||
return err
|
||||
}
|
||||
di.w.WriteByte(')')
|
||||
return nil
|
||||
}
|
||||
|
||||
case cborTypePrimitives:
|
||||
_, ai, val := di.d.getHead()
|
||||
switch ai {
|
||||
case additionalInformationAsFalse:
|
||||
di.w.WriteString("false")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsTrue:
|
||||
di.w.WriteString("true")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsNull:
|
||||
di.w.WriteString("null")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsUndefined:
|
||||
di.w.WriteString("undefined")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsFloat16,
|
||||
additionalInformationAsFloat32,
|
||||
additionalInformationAsFloat64:
|
||||
return di.encodeFloat(ai, val)
|
||||
|
||||
default:
|
||||
di.w.WriteString("simple(")
|
||||
di.w.WriteString(strconv.FormatUint(val, 10))
|
||||
di.w.WriteByte(')')
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeU16 format a rune as "\uxxxx"
|
||||
func (di *diagnose) writeU16(val rune) {
|
||||
di.w.WriteString("\\u")
|
||||
var in [2]byte
|
||||
in[0] = byte(val >> 8)
|
||||
in[1] = byte(val)
|
||||
sz := hex.EncodedLen(len(in))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
hex.Encode(dst, in[:])
|
||||
di.w.Write(dst)
|
||||
}
|
||||
|
||||
var rawBase32Encoding = base32.StdEncoding.WithPadding(base32.NoPadding)
|
||||
var rawBase32HexEncoding = base32.HexEncoding.WithPadding(base32.NoPadding)
|
||||
|
||||
func (di *diagnose) encodeByteString(val []byte) error {
|
||||
if len(val) > 0 {
|
||||
if di.dm.byteStringText && utf8.Valid(val) {
|
||||
return di.encodeTextString(string(val), '\'')
|
||||
}
|
||||
|
||||
if di.dm.byteStringEmbeddedCBOR {
|
||||
di2 := newDiagnose(val, di.dm.decMode, di.dm)
|
||||
// should always notating embedded CBOR sequence.
|
||||
if str, err := di2.diag(true); err == nil {
|
||||
di.w.WriteString("<<")
|
||||
di.w.WriteString(str)
|
||||
di.w.WriteString(">>")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch di.dm.byteStringEncoding {
|
||||
case ByteStringBase16Encoding:
|
||||
di.w.WriteString("h'")
|
||||
if di.dm.byteStringHexWhitespace {
|
||||
sz := hex.EncodedLen(len(val))
|
||||
if len(val) > 0 {
|
||||
sz += len(val) - 1
|
||||
}
|
||||
di.w.Grow(sz)
|
||||
|
||||
dst := di.w.Bytes()[di.w.Len():]
|
||||
for i := range val {
|
||||
if i > 0 {
|
||||
dst = append(dst, ' ')
|
||||
}
|
||||
hex.Encode(dst[len(dst):len(dst)+2], val[i:i+1])
|
||||
dst = dst[:len(dst)+2]
|
||||
}
|
||||
di.w.Write(dst)
|
||||
} else {
|
||||
sz := hex.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
hex.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
}
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
case ByteStringBase32Encoding:
|
||||
di.w.WriteString("b32'")
|
||||
sz := rawBase32Encoding.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
rawBase32Encoding.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
case ByteStringBase32HexEncoding:
|
||||
di.w.WriteString("h32'")
|
||||
sz := rawBase32HexEncoding.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
rawBase32HexEncoding.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
case ByteStringBase64Encoding:
|
||||
di.w.WriteString("b64'")
|
||||
sz := base64.RawURLEncoding.EncodedLen(len(val))
|
||||
di.w.Grow(sz)
|
||||
dst := di.w.Bytes()[di.w.Len() : di.w.Len()+sz]
|
||||
base64.RawURLEncoding.Encode(dst, val)
|
||||
di.w.Write(dst)
|
||||
di.w.WriteByte('\'')
|
||||
return nil
|
||||
|
||||
default:
|
||||
// It should not be possible for users to construct a *diagMode with an invalid byte
|
||||
// string encoding.
|
||||
panic(fmt.Sprintf("diagmode has invalid ByteStringEncoding %v", di.dm.byteStringEncoding))
|
||||
}
|
||||
}
|
||||
|
||||
const utf16SurrSelf = rune(0x10000)
|
||||
|
||||
// quote should be either `'` or `"`
|
||||
func (di *diagnose) encodeTextString(val string, quote byte) error {
|
||||
di.w.WriteByte(quote)
|
||||
|
||||
for i := 0; i < len(val); {
|
||||
if b := val[i]; b < utf8.RuneSelf {
|
||||
switch {
|
||||
case b == '\t', b == '\n', b == '\r', b == '\\', b == quote:
|
||||
di.w.WriteByte('\\')
|
||||
|
||||
switch b {
|
||||
case '\t':
|
||||
b = 't'
|
||||
case '\n':
|
||||
b = 'n'
|
||||
case '\r':
|
||||
b = 'r'
|
||||
}
|
||||
di.w.WriteByte(b)
|
||||
|
||||
case b >= ' ' && b <= '~':
|
||||
di.w.WriteByte(b)
|
||||
|
||||
default:
|
||||
di.writeU16(rune(b))
|
||||
}
|
||||
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
c, size := utf8.DecodeRuneInString(val[i:])
|
||||
switch {
|
||||
case c == utf8.RuneError:
|
||||
return &SemanticError{"cbor: invalid UTF-8 string"}
|
||||
|
||||
case c < utf16SurrSelf:
|
||||
di.writeU16(c)
|
||||
|
||||
default:
|
||||
c1, c2 := utf16.EncodeRune(c)
|
||||
di.writeU16(c1)
|
||||
di.writeU16(c2)
|
||||
}
|
||||
|
||||
i += size
|
||||
}
|
||||
|
||||
di.w.WriteByte(quote)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (di *diagnose) encodeFloat(ai byte, val uint64) error {
|
||||
f64 := float64(0)
|
||||
switch ai {
|
||||
case additionalInformationAsFloat16:
|
||||
f16 := float16.Frombits(uint16(val))
|
||||
switch {
|
||||
case f16.IsNaN():
|
||||
di.w.WriteString("NaN")
|
||||
return nil
|
||||
case f16.IsInf(1):
|
||||
di.w.WriteString("Infinity")
|
||||
return nil
|
||||
case f16.IsInf(-1):
|
||||
di.w.WriteString("-Infinity")
|
||||
return nil
|
||||
default:
|
||||
f64 = float64(f16.Float32())
|
||||
}
|
||||
|
||||
case additionalInformationAsFloat32:
|
||||
f32 := math.Float32frombits(uint32(val))
|
||||
switch {
|
||||
case f32 != f32:
|
||||
di.w.WriteString("NaN")
|
||||
return nil
|
||||
case f32 > math.MaxFloat32:
|
||||
di.w.WriteString("Infinity")
|
||||
return nil
|
||||
case f32 < -math.MaxFloat32:
|
||||
di.w.WriteString("-Infinity")
|
||||
return nil
|
||||
default:
|
||||
f64 = float64(f32)
|
||||
}
|
||||
|
||||
case additionalInformationAsFloat64:
|
||||
f64 = math.Float64frombits(val)
|
||||
switch {
|
||||
case f64 != f64:
|
||||
di.w.WriteString("NaN")
|
||||
return nil
|
||||
case f64 > math.MaxFloat64:
|
||||
di.w.WriteString("Infinity")
|
||||
return nil
|
||||
case f64 < -math.MaxFloat64:
|
||||
di.w.WriteString("-Infinity")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Use ES6 number to string conversion which should match most JSON generators.
|
||||
// Inspired by https://github.com/golang/go/blob/4df10fba1687a6d4f51d7238a403f8f2298f6a16/src/encoding/json/encode.go#L585
|
||||
const bitSize = 64
|
||||
b := make([]byte, 0, 32)
|
||||
if abs := math.Abs(f64); abs != 0 && (abs < 1e-6 || abs >= 1e21) {
|
||||
b = strconv.AppendFloat(b, f64, 'e', -1, bitSize)
|
||||
// clean up e-09 to e-9
|
||||
n := len(b)
|
||||
if n >= 4 && string(b[n-4:n-1]) == "e-0" {
|
||||
b = append(b[:n-2], b[n-1])
|
||||
}
|
||||
} else {
|
||||
b = strconv.AppendFloat(b, f64, 'f', -1, bitSize)
|
||||
}
|
||||
|
||||
// add decimal point and trailing zero if needed
|
||||
if bytes.IndexByte(b, '.') < 0 {
|
||||
if i := bytes.IndexByte(b, 'e'); i < 0 {
|
||||
b = append(b, '.', '0')
|
||||
} else {
|
||||
b = append(b[:i+2], b[i:]...)
|
||||
b[i] = '.'
|
||||
b[i+1] = '0'
|
||||
}
|
||||
}
|
||||
|
||||
di.w.WriteString(string(b))
|
||||
|
||||
if di.dm.floatPrecisionIndicator {
|
||||
switch ai {
|
||||
case additionalInformationAsFloat16:
|
||||
di.w.WriteString("_1")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsFloat32:
|
||||
di.w.WriteString("_2")
|
||||
return nil
|
||||
|
||||
case additionalInformationAsFloat64:
|
||||
di.w.WriteString("_3")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
129
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
Normal file
129
vendor/github.com/fxamacker/cbor/v2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,129 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
/*
|
||||
Package cbor is a modern CBOR codec (RFC 8949 & RFC 7049) with CBOR tags,
|
||||
Go struct tags (toarray/keyasint/omitempty), Core Deterministic Encoding,
|
||||
CTAP2, Canonical CBOR, float64->32->16, and duplicate map key detection.
|
||||
|
||||
Encoding options allow "preferred serialization" by encoding integers and floats
|
||||
to their smallest forms (e.g. float16) when values fit.
|
||||
|
||||
Struct tags like "keyasint", "toarray" and "omitempty" make CBOR data smaller
|
||||
and easier to use with structs.
|
||||
|
||||
For example, "toarray" tag makes struct fields encode to CBOR array elements. And
|
||||
"keyasint" makes a field encode to an element of CBOR map with specified int key.
|
||||
|
||||
Latest docs can be viewed at https://github.com/fxamacker/cbor#cbor-library-in-go
|
||||
|
||||
# Basics
|
||||
|
||||
The Quick Start guide is at https://github.com/fxamacker/cbor#quick-start
|
||||
|
||||
Function signatures identical to encoding/json include:
|
||||
|
||||
Marshal, Unmarshal, NewEncoder, NewDecoder, (*Encoder).Encode, (*Decoder).Decode.
|
||||
|
||||
Standard interfaces include:
|
||||
|
||||
BinaryMarshaler, BinaryUnmarshaler, Marshaler, and Unmarshaler.
|
||||
|
||||
Custom encoding and decoding is possible by implementing standard interfaces for
|
||||
user-defined Go types.
|
||||
|
||||
Codec functions are available at package-level (using defaults options) or by
|
||||
creating modes from options at runtime.
|
||||
|
||||
"Mode" in this API means definite way of encoding (EncMode) or decoding (DecMode).
|
||||
|
||||
EncMode and DecMode interfaces are created from EncOptions or DecOptions structs.
|
||||
|
||||
em, err := cbor.EncOptions{...}.EncMode()
|
||||
em, err := cbor.CanonicalEncOptions().EncMode()
|
||||
em, err := cbor.CTAP2EncOptions().EncMode()
|
||||
|
||||
Modes use immutable options to avoid side-effects and simplify concurrency. Behavior of
|
||||
modes won't accidentally change at runtime after they're created.
|
||||
|
||||
Modes are intended to be reused and are safe for concurrent use.
|
||||
|
||||
EncMode and DecMode Interfaces
|
||||
|
||||
// EncMode interface uses immutable options and is safe for concurrent use.
|
||||
type EncMode interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
NewEncoder(w io.Writer) *Encoder
|
||||
EncOptions() EncOptions // returns copy of options
|
||||
}
|
||||
|
||||
// DecMode interface uses immutable options and is safe for concurrent use.
|
||||
type DecMode interface {
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
NewDecoder(r io.Reader) *Decoder
|
||||
DecOptions() DecOptions // returns copy of options
|
||||
}
|
||||
|
||||
Using Default Encoding Mode
|
||||
|
||||
b, err := cbor.Marshal(v)
|
||||
|
||||
encoder := cbor.NewEncoder(w)
|
||||
err = encoder.Encode(v)
|
||||
|
||||
Using Default Decoding Mode
|
||||
|
||||
err := cbor.Unmarshal(b, &v)
|
||||
|
||||
decoder := cbor.NewDecoder(r)
|
||||
err = decoder.Decode(&v)
|
||||
|
||||
Creating and Using Encoding Modes
|
||||
|
||||
// Create EncOptions using either struct literal or a function.
|
||||
opts := cbor.CanonicalEncOptions()
|
||||
|
||||
// If needed, modify encoding options
|
||||
opts.Time = cbor.TimeUnix
|
||||
|
||||
// Create reusable EncMode interface with immutable options, safe for concurrent use.
|
||||
em, err := opts.EncMode()
|
||||
|
||||
// Use EncMode like encoding/json, with same function signatures.
|
||||
b, err := em.Marshal(v)
|
||||
// or
|
||||
encoder := em.NewEncoder(w)
|
||||
err := encoder.Encode(v)
|
||||
|
||||
// NOTE: Both em.Marshal(v) and encoder.Encode(v) use encoding options
|
||||
// specified during creation of em (encoding mode).
|
||||
|
||||
# CBOR Options
|
||||
|
||||
Predefined Encoding Options: https://github.com/fxamacker/cbor#predefined-encoding-options
|
||||
|
||||
Encoding Options: https://github.com/fxamacker/cbor#encoding-options
|
||||
|
||||
Decoding Options: https://github.com/fxamacker/cbor#decoding-options
|
||||
|
||||
# Struct Tags
|
||||
|
||||
Struct tags like `cbor:"name,omitempty"` and `json:"name,omitempty"` work as expected.
|
||||
If both struct tags are specified then `cbor` is used.
|
||||
|
||||
Struct tags like "keyasint", "toarray", and "omitempty" make it easy to use
|
||||
very compact formats like COSE and CWT (CBOR Web Tokens) with structs.
|
||||
|
||||
For example, "toarray" makes struct fields encode to array elements. And "keyasint"
|
||||
makes struct fields encode to elements of CBOR map with int keys.
|
||||
|
||||
https://raw.githubusercontent.com/fxamacker/images/master/cbor/v2.0.0/cbor_easy_api.png
|
||||
|
||||
Struct tags are listed at https://github.com/fxamacker/cbor#struct-tags-1
|
||||
|
||||
# Tests and Fuzzing
|
||||
|
||||
Over 375 tests are included in this package. Cover-guided fuzzing is handled by
|
||||
a private fuzzer that replaced fxamacker/cbor-fuzz years ago.
|
||||
*/
|
||||
package cbor
|
||||
1989
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
Normal file
1989
vendor/github.com/fxamacker/cbor/v2/encode.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
94
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
Normal file
94
vendor/github.com/fxamacker/cbor/v2/encode_map.go
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type mapKeyValueEncodeFunc struct {
|
||||
kf, ef encodeFunc
|
||||
kpool, vpool sync.Pool
|
||||
}
|
||||
|
||||
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||
iterk := me.kpool.Get().(*reflect.Value)
|
||||
defer func() {
|
||||
iterk.SetZero()
|
||||
me.kpool.Put(iterk)
|
||||
}()
|
||||
iterv := me.vpool.Get().(*reflect.Value)
|
||||
defer func() {
|
||||
iterv.SetZero()
|
||||
me.vpool.Put(iterv)
|
||||
}()
|
||||
|
||||
if kvs == nil {
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
iterk.SetIterKey(iter)
|
||||
iterv.SetIterValue(iter)
|
||||
|
||||
if err := me.kf(e, em, *iterk); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := me.ef(e, em, *iterv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
initial := e.Len()
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
iterk.SetIterKey(iter)
|
||||
iterv.SetIterValue(iter)
|
||||
|
||||
offset := e.Len()
|
||||
if err := me.kf(e, em, *iterk); err != nil {
|
||||
return err
|
||||
}
|
||||
valueOffset := e.Len()
|
||||
if err := me.ef(e, em, *iterv); err != nil {
|
||||
return err
|
||||
}
|
||||
kvs[i] = keyValue{
|
||||
offset: offset - initial,
|
||||
valueOffset: valueOffset - initial,
|
||||
nextOffset: e.Len() - initial,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
mkv := &mapKeyValueEncodeFunc{
|
||||
kf: kf,
|
||||
ef: ef,
|
||||
kpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
rk := reflect.New(t.Key()).Elem()
|
||||
return &rk
|
||||
},
|
||||
},
|
||||
vpool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
rv := reflect.New(t.Elem()).Elem()
|
||||
return &rv
|
||||
},
|
||||
},
|
||||
}
|
||||
return mapEncodeFunc{
|
||||
e: mkv.encodeKeyValues,
|
||||
}.encode
|
||||
}
|
||||
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
Normal file
60
vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
//go:build !go1.20
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type mapKeyValueEncodeFunc struct {
|
||||
kf, ef encodeFunc
|
||||
}
|
||||
|
||||
func (me *mapKeyValueEncodeFunc) encodeKeyValues(e *bytes.Buffer, em *encMode, v reflect.Value, kvs []keyValue) error {
|
||||
if kvs == nil {
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
initial := e.Len()
|
||||
for i, iter := 0, v.MapRange(); iter.Next(); i++ {
|
||||
offset := e.Len()
|
||||
if err := me.kf(e, em, iter.Key()); err != nil {
|
||||
return err
|
||||
}
|
||||
valueOffset := e.Len()
|
||||
if err := me.ef(e, em, iter.Value()); err != nil {
|
||||
return err
|
||||
}
|
||||
kvs[i] = keyValue{
|
||||
offset: offset - initial,
|
||||
valueOffset: valueOffset - initial,
|
||||
nextOffset: e.Len() - initial,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEncodeMapFunc(t reflect.Type) encodeFunc {
|
||||
kf, _ := getEncodeFunc(t.Key())
|
||||
ef, _ := getEncodeFunc(t.Elem())
|
||||
if kf == nil || ef == nil {
|
||||
return nil
|
||||
}
|
||||
mkv := &mapKeyValueEncodeFunc{kf: kf, ef: ef}
|
||||
return mapEncodeFunc{
|
||||
e: mkv.encodeKeyValues,
|
||||
}.encode
|
||||
}
|
||||
69
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
Normal file
69
vendor/github.com/fxamacker/cbor/v2/simplevalue.go
generated
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// SimpleValue represents CBOR simple value.
|
||||
// CBOR simple value is:
|
||||
// - an extension point like CBOR tag.
|
||||
// - a subset of CBOR major type 7 that isn't floating-point.
|
||||
// - "identified by a number between 0 and 255, but distinct from that number itself".
|
||||
// For example, "a simple value 2 is not equivalent to an integer 2" as a CBOR map key.
|
||||
//
|
||||
// CBOR simple values identified by 20..23 are: "false", "true" , "null", and "undefined".
|
||||
// Other CBOR simple values are currently unassigned/reserved by IANA.
|
||||
type SimpleValue uint8
|
||||
|
||||
var (
|
||||
typeSimpleValue = reflect.TypeOf(SimpleValue(0))
|
||||
)
|
||||
|
||||
// MarshalCBOR encodes SimpleValue as CBOR simple value (major type 7).
|
||||
func (sv SimpleValue) MarshalCBOR() ([]byte, error) {
|
||||
// RFC 8949 3.3. Floating-Point Numbers and Values with No Content says:
|
||||
// "An encoder MUST NOT issue two-byte sequences that start with 0xf8
|
||||
// (major type 7, additional information 24) and continue with a byte
|
||||
// less than 0x20 (32 decimal). Such sequences are not well-formed.
|
||||
// (This implies that an encoder cannot encode false, true, null, or
|
||||
// undefined in two-byte sequences and that only the one-byte variants
|
||||
// of these are well-formed; more generally speaking, each simple value
|
||||
// only has a single representation variant)."
|
||||
|
||||
switch {
|
||||
case sv <= maxSimpleValueInAdditionalInformation:
|
||||
return []byte{byte(cborTypePrimitives) | byte(sv)}, nil
|
||||
|
||||
case sv >= minSimpleValueIn1ByteArgument:
|
||||
return []byte{byte(cborTypePrimitives) | additionalInformationWith1ByteArgument, byte(sv)}, nil
|
||||
|
||||
default:
|
||||
return nil, &UnsupportedValueError{msg: fmt.Sprintf("SimpleValue(%d)", sv)}
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalCBOR decodes CBOR simple value (major type 7) to SimpleValue.
|
||||
func (sv *SimpleValue) UnmarshalCBOR(data []byte) error {
|
||||
if sv == nil {
|
||||
return errors.New("cbor.SimpleValue: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
typ, ai, val := d.getHead()
|
||||
|
||||
if typ != cborTypePrimitives {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue"}
|
||||
}
|
||||
if ai > additionalInformationWith1ByteArgument {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: "SimpleValue", errorMsg: "not simple values"}
|
||||
}
|
||||
|
||||
// It is safe to cast val to uint8 here because
|
||||
// - data is already verified to be well-formed CBOR simple value and
|
||||
// - val is <= math.MaxUint8.
|
||||
*sv = SimpleValue(val)
|
||||
return nil
|
||||
}
|
||||
277
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
Normal file
277
vendor/github.com/fxamacker/cbor/v2/stream.go
generated
vendored
Normal file
@@ -0,0 +1,277 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Decoder reads and decodes CBOR values from io.Reader.
|
||||
type Decoder struct {
|
||||
r io.Reader
|
||||
d decoder
|
||||
buf []byte
|
||||
off int // next read offset in buf
|
||||
bytesRead int
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads and decodes from r using
|
||||
// the default decoding options.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return defaultDecMode.NewDecoder(r)
|
||||
}
|
||||
|
||||
// Decode reads CBOR value and decodes it into the value pointed to by v.
|
||||
func (dec *Decoder) Decode(v interface{}) error {
|
||||
_, err := dec.readNext()
|
||||
if err != nil {
|
||||
// Return validation error or read error.
|
||||
return err
|
||||
}
|
||||
|
||||
dec.d.reset(dec.buf[dec.off:])
|
||||
err = dec.d.value(v)
|
||||
|
||||
// Increment dec.off even if decoding err is not nil because
|
||||
// dec.d.off points to the next CBOR data item if current
|
||||
// CBOR data item is valid but failed to be decoded into v.
|
||||
// This allows next CBOR data item to be decoded in next
|
||||
// call to this function.
|
||||
dec.off += dec.d.off
|
||||
dec.bytesRead += dec.d.off
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip skips to the next CBOR data item (if there is any),
|
||||
// otherwise it returns error such as io.EOF, io.UnexpectedEOF, etc.
|
||||
func (dec *Decoder) Skip() error {
|
||||
n, err := dec.readNext()
|
||||
if err != nil {
|
||||
// Return validation error or read error.
|
||||
return err
|
||||
}
|
||||
|
||||
dec.off += n
|
||||
dec.bytesRead += n
|
||||
return nil
|
||||
}
|
||||
|
||||
// NumBytesRead returns the number of bytes read.
|
||||
func (dec *Decoder) NumBytesRead() int {
|
||||
return dec.bytesRead
|
||||
}
|
||||
|
||||
// Buffered returns a reader for data remaining in Decoder's buffer.
|
||||
// Returned reader is valid until the next call to Decode or Skip.
|
||||
func (dec *Decoder) Buffered() io.Reader {
|
||||
return bytes.NewReader(dec.buf[dec.off:])
|
||||
}
|
||||
|
||||
// readNext() reads next CBOR data item from Reader to buffer.
|
||||
// It returns the size of next CBOR data item.
|
||||
// It also returns validation error or read error if any.
|
||||
func (dec *Decoder) readNext() (int, error) {
|
||||
var readErr error
|
||||
var validErr error
|
||||
|
||||
for {
|
||||
// Process any unread data in dec.buf.
|
||||
if dec.off < len(dec.buf) {
|
||||
dec.d.reset(dec.buf[dec.off:])
|
||||
off := dec.off // Save offset before data validation
|
||||
validErr = dec.d.wellformed(true, false)
|
||||
dec.off = off // Restore offset
|
||||
|
||||
if validErr == nil {
|
||||
return dec.d.off, nil
|
||||
}
|
||||
|
||||
if validErr != io.ErrUnexpectedEOF {
|
||||
return 0, validErr
|
||||
}
|
||||
|
||||
// Process last read error on io.ErrUnexpectedEOF.
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
// current CBOR data item is incomplete.
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, readErr
|
||||
}
|
||||
}
|
||||
|
||||
// More data is needed and there was no read error.
|
||||
var n int
|
||||
for n == 0 {
|
||||
n, readErr = dec.read()
|
||||
if n == 0 && readErr != nil {
|
||||
// No more data can be read and read error is encountered.
|
||||
// At this point, validErr is either nil or io.ErrUnexpectedEOF.
|
||||
if readErr == io.EOF {
|
||||
if validErr == io.ErrUnexpectedEOF {
|
||||
// current CBOR data item is incomplete.
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
return 0, readErr
|
||||
}
|
||||
}
|
||||
|
||||
// At this point, dec.buf contains new data from last read (n > 0).
|
||||
}
|
||||
}
|
||||
|
||||
// read() reads data from Reader to buffer.
|
||||
// It returns number of bytes read and any read error encountered.
|
||||
// Postconditions:
|
||||
// - dec.buf contains previously unread data and new data.
|
||||
// - dec.off is 0.
|
||||
func (dec *Decoder) read() (int, error) {
|
||||
// Grow buf if needed.
|
||||
const minRead = 512
|
||||
if cap(dec.buf)-len(dec.buf)+dec.off < minRead {
|
||||
oldUnreadBuf := dec.buf[dec.off:]
|
||||
dec.buf = make([]byte, len(dec.buf)-dec.off, 2*cap(dec.buf)+minRead)
|
||||
dec.overwriteBuf(oldUnreadBuf)
|
||||
}
|
||||
|
||||
// Copy unread data over read data and reset off to 0.
|
||||
if dec.off > 0 {
|
||||
dec.overwriteBuf(dec.buf[dec.off:])
|
||||
}
|
||||
|
||||
// Read from reader and reslice buf.
|
||||
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
|
||||
dec.buf = dec.buf[0 : len(dec.buf)+n]
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (dec *Decoder) overwriteBuf(newBuf []byte) {
|
||||
n := copy(dec.buf, newBuf)
|
||||
dec.buf = dec.buf[:n]
|
||||
dec.off = 0
|
||||
}
|
||||
|
||||
// Encoder writes CBOR values to io.Writer.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
em *encMode
|
||||
indefTypes []cborType
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w using the default encoding options.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return defaultEncMode.NewEncoder(w)
|
||||
}
|
||||
|
||||
// Encode writes the CBOR encoding of v.
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
if len(enc.indefTypes) > 0 && v != nil {
|
||||
indefType := enc.indefTypes[len(enc.indefTypes)-1]
|
||||
if indefType == cborTypeTextString {
|
||||
k := reflect.TypeOf(v).Kind()
|
||||
if k != reflect.String {
|
||||
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length text string")
|
||||
}
|
||||
} else if indefType == cborTypeByteString {
|
||||
t := reflect.TypeOf(v)
|
||||
k := t.Kind()
|
||||
if (k != reflect.Array && k != reflect.Slice) || t.Elem().Kind() != reflect.Uint8 {
|
||||
return errors.New("cbor: cannot encode item type " + k.String() + " for indefinite-length byte string")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buf := getEncodeBuffer()
|
||||
|
||||
err := encode(buf, enc.em, reflect.ValueOf(v))
|
||||
if err == nil {
|
||||
_, err = enc.w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
putEncodeBuffer(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
// StartIndefiniteByteString starts byte string encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes definite length byte strings
|
||||
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteByteString() error {
|
||||
return enc.startIndefinite(cborTypeByteString)
|
||||
}
|
||||
|
||||
// StartIndefiniteTextString starts text string encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes definite length text strings
|
||||
// ("chunks") as one contiguous string until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteTextString() error {
|
||||
return enc.startIndefinite(cborTypeTextString)
|
||||
}
|
||||
|
||||
// StartIndefiniteArray starts array encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes elements of the array
|
||||
// until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteArray() error {
|
||||
return enc.startIndefinite(cborTypeArray)
|
||||
}
|
||||
|
||||
// StartIndefiniteMap starts array encoding of indefinite length.
|
||||
// Subsequent calls of (*Encoder).Encode() encodes elements of the map
|
||||
// until EndIndefinite is called.
|
||||
func (enc *Encoder) StartIndefiniteMap() error {
|
||||
return enc.startIndefinite(cborTypeMap)
|
||||
}
|
||||
|
||||
// EndIndefinite closes last opened indefinite length value.
|
||||
func (enc *Encoder) EndIndefinite() error {
|
||||
if len(enc.indefTypes) == 0 {
|
||||
return errors.New("cbor: cannot encode \"break\" code outside indefinite length values")
|
||||
}
|
||||
_, err := enc.w.Write([]byte{cborBreakFlag})
|
||||
if err == nil {
|
||||
enc.indefTypes = enc.indefTypes[:len(enc.indefTypes)-1]
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var cborIndefHeader = map[cborType][]byte{
|
||||
cborTypeByteString: {cborByteStringWithIndefiniteLengthHead},
|
||||
cborTypeTextString: {cborTextStringWithIndefiniteLengthHead},
|
||||
cborTypeArray: {cborArrayWithIndefiniteLengthHead},
|
||||
cborTypeMap: {cborMapWithIndefiniteLengthHead},
|
||||
}
|
||||
|
||||
func (enc *Encoder) startIndefinite(typ cborType) error {
|
||||
if enc.em.indefLength == IndefLengthForbidden {
|
||||
return &IndefiniteLengthError{typ}
|
||||
}
|
||||
_, err := enc.w.Write(cborIndefHeader[typ])
|
||||
if err == nil {
|
||||
enc.indefTypes = append(enc.indefTypes, typ)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded CBOR value.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalCBOR returns m or CBOR nil if m is nil.
|
||||
func (m RawMessage) MarshalCBOR() ([]byte, error) {
|
||||
if len(m) == 0 {
|
||||
return cborNil, nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// UnmarshalCBOR creates a copy of data and saves to *m.
|
||||
func (m *RawMessage) UnmarshalCBOR(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("cbor.RawMessage: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
*m = append((*m)[0:0], data...)
|
||||
return nil
|
||||
}
|
||||
260
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
Normal file
260
vendor/github.com/fxamacker/cbor/v2/structfields.go
generated
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type field struct {
|
||||
name string
|
||||
nameAsInt int64 // used to decoder to match field name with CBOR int
|
||||
cborName []byte
|
||||
cborNameByteString []byte // major type 2 name encoding iff cborName has major type 3
|
||||
idx []int
|
||||
typ reflect.Type
|
||||
ef encodeFunc
|
||||
ief isEmptyFunc
|
||||
typInfo *typeInfo // used to decoder to reuse type info
|
||||
tagged bool // used to choose dominant field (at the same level tagged fields dominate untagged fields)
|
||||
omitEmpty bool // used to skip empty field
|
||||
keyAsInt bool // used to encode/decode field name as int
|
||||
}
|
||||
|
||||
type fields []*field
|
||||
|
||||
// indexFieldSorter sorts fields by field idx at each level, breaking ties with idx depth.
|
||||
type indexFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *indexFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *indexFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *indexFieldSorter) Less(i, j int) bool {
|
||||
iIdx, jIdx := x.fields[i].idx, x.fields[j].idx
|
||||
for k := 0; k < len(iIdx) && k < len(jIdx); k++ {
|
||||
if iIdx[k] != jIdx[k] {
|
||||
return iIdx[k] < jIdx[k]
|
||||
}
|
||||
}
|
||||
return len(iIdx) <= len(jIdx)
|
||||
}
|
||||
|
||||
// nameLevelAndTagFieldSorter sorts fields by field name, idx depth, and presence of tag.
|
||||
type nameLevelAndTagFieldSorter struct {
|
||||
fields fields
|
||||
}
|
||||
|
||||
func (x *nameLevelAndTagFieldSorter) Len() int {
|
||||
return len(x.fields)
|
||||
}
|
||||
|
||||
func (x *nameLevelAndTagFieldSorter) Swap(i, j int) {
|
||||
x.fields[i], x.fields[j] = x.fields[j], x.fields[i]
|
||||
}
|
||||
|
||||
func (x *nameLevelAndTagFieldSorter) Less(i, j int) bool {
|
||||
fi, fj := x.fields[i], x.fields[j]
|
||||
if fi.name != fj.name {
|
||||
return fi.name < fj.name
|
||||
}
|
||||
if len(fi.idx) != len(fj.idx) {
|
||||
return len(fi.idx) < len(fj.idx)
|
||||
}
|
||||
if fi.tagged != fj.tagged {
|
||||
return fi.tagged
|
||||
}
|
||||
return i < j // Field i and j have the same name, depth, and tagged status. Nothing else matters.
|
||||
}
|
||||
|
||||
// getFields returns visible fields of struct type t following visibility rules for JSON encoding.
|
||||
func getFields(t reflect.Type) (flds fields, structOptions string) {
|
||||
// Get special field "_" tag options
|
||||
if f, ok := t.FieldByName("_"); ok {
|
||||
tag := f.Tag.Get("cbor")
|
||||
if tag != "-" {
|
||||
structOptions = tag
|
||||
}
|
||||
}
|
||||
|
||||
// nTypes contains next level anonymous fields' types and indexes
|
||||
// (there can be multiple fields of the same type at the same level)
|
||||
flds, nTypes := appendFields(t, nil, nil, nil)
|
||||
|
||||
if len(nTypes) > 0 {
|
||||
|
||||
var cTypes map[reflect.Type][][]int // current level anonymous fields' types and indexes
|
||||
vTypes := map[reflect.Type]bool{t: true} // visited field types at less nested levels
|
||||
|
||||
for len(nTypes) > 0 {
|
||||
cTypes, nTypes = nTypes, nil
|
||||
|
||||
for t, idx := range cTypes {
|
||||
// If there are multiple anonymous fields of the same struct type at the same level, all are ignored.
|
||||
if len(idx) > 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Anonymous field of the same type at deeper nested level is ignored.
|
||||
if vTypes[t] {
|
||||
continue
|
||||
}
|
||||
vTypes[t] = true
|
||||
|
||||
flds, nTypes = appendFields(t, idx[0], flds, nTypes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(&nameLevelAndTagFieldSorter{flds})
|
||||
|
||||
// Keep visible fields.
|
||||
j := 0 // index of next unique field
|
||||
for i := 0; i < len(flds); {
|
||||
name := flds[i].name
|
||||
if i == len(flds)-1 || // last field
|
||||
name != flds[i+1].name || // field i has unique field name
|
||||
len(flds[i].idx) < len(flds[i+1].idx) || // field i is at a less nested level than field i+1
|
||||
(flds[i].tagged && !flds[i+1].tagged) { // field i is tagged while field i+1 is not
|
||||
flds[j] = flds[i]
|
||||
j++
|
||||
}
|
||||
|
||||
// Skip fields with the same field name.
|
||||
for i++; i < len(flds) && name == flds[i].name; i++ { //nolint:revive
|
||||
}
|
||||
}
|
||||
if j != len(flds) {
|
||||
flds = flds[:j]
|
||||
}
|
||||
|
||||
// Sort fields by field index
|
||||
sort.Sort(&indexFieldSorter{flds})
|
||||
|
||||
return flds, structOptions
|
||||
}
|
||||
|
||||
// appendFields appends type t's exportable fields to flds and anonymous struct fields to nTypes .
|
||||
func appendFields(
|
||||
t reflect.Type,
|
||||
idx []int,
|
||||
flds fields,
|
||||
nTypes map[reflect.Type][][]int,
|
||||
) (
|
||||
_flds fields,
|
||||
_nTypes map[reflect.Type][][]int,
|
||||
) {
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
|
||||
ft := f.Type
|
||||
for ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
if !isFieldExportable(f, ft.Kind()) {
|
||||
continue
|
||||
}
|
||||
|
||||
tag := f.Tag.Get("cbor")
|
||||
if tag == "" {
|
||||
tag = f.Tag.Get("json")
|
||||
}
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
tagged := tag != ""
|
||||
|
||||
// Parse field tag options
|
||||
var tagFieldName string
|
||||
var omitempty, keyasint bool
|
||||
for j := 0; tag != ""; j++ {
|
||||
var token string
|
||||
idx := strings.IndexByte(tag, ',')
|
||||
if idx == -1 {
|
||||
token, tag = tag, ""
|
||||
} else {
|
||||
token, tag = tag[:idx], tag[idx+1:]
|
||||
}
|
||||
if j == 0 {
|
||||
tagFieldName = token
|
||||
} else {
|
||||
switch token {
|
||||
case "omitempty":
|
||||
omitempty = true
|
||||
case "keyasint":
|
||||
keyasint = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fieldName := tagFieldName
|
||||
if tagFieldName == "" {
|
||||
fieldName = f.Name
|
||||
}
|
||||
|
||||
fIdx := make([]int, len(idx)+1)
|
||||
copy(fIdx, idx)
|
||||
fIdx[len(fIdx)-1] = i
|
||||
|
||||
if !f.Anonymous || ft.Kind() != reflect.Struct || tagFieldName != "" {
|
||||
flds = append(flds, &field{
|
||||
name: fieldName,
|
||||
idx: fIdx,
|
||||
typ: f.Type,
|
||||
omitEmpty: omitempty,
|
||||
keyAsInt: keyasint,
|
||||
tagged: tagged})
|
||||
} else {
|
||||
if nTypes == nil {
|
||||
nTypes = make(map[reflect.Type][][]int)
|
||||
}
|
||||
nTypes[ft] = append(nTypes[ft], fIdx)
|
||||
}
|
||||
}
|
||||
|
||||
return flds, nTypes
|
||||
}
|
||||
|
||||
// isFieldExportable returns true if f is an exportable (regular or anonymous) field or
|
||||
// a nonexportable anonymous field of struct type.
|
||||
// Nonexportable anonymous field of struct type can contain exportable fields.
|
||||
func isFieldExportable(f reflect.StructField, fk reflect.Kind) bool { //nolint:gocritic // ignore hugeParam
|
||||
exportable := f.PkgPath == ""
|
||||
return exportable || (f.Anonymous && fk == reflect.Struct)
|
||||
}
|
||||
|
||||
type embeddedFieldNullPtrFunc func(reflect.Value) (reflect.Value, error)
|
||||
|
||||
// getFieldValue returns field value of struct v by index. When encountering null pointer
|
||||
// to anonymous (embedded) struct field, f is called with the last traversed field value.
|
||||
func getFieldValue(v reflect.Value, idx []int, f embeddedFieldNullPtrFunc) (fv reflect.Value, err error) {
|
||||
fv = v
|
||||
for i, n := range idx {
|
||||
fv = fv.Field(n)
|
||||
|
||||
if i < len(idx)-1 {
|
||||
if fv.Kind() == reflect.Ptr && fv.Type().Elem().Kind() == reflect.Struct {
|
||||
if fv.IsNil() {
|
||||
// Null pointer to embedded struct field
|
||||
fv, err = f(fv)
|
||||
if err != nil || !fv.IsValid() {
|
||||
return fv, err
|
||||
}
|
||||
}
|
||||
fv = fv.Elem()
|
||||
}
|
||||
}
|
||||
}
|
||||
return fv, nil
|
||||
}
|
||||
299
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
Normal file
299
vendor/github.com/fxamacker/cbor/v2/tag.go
generated
vendored
Normal file
@@ -0,0 +1,299 @@
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Tag represents CBOR tag data, including tag number and unmarshaled tag content. Marshaling and
|
||||
// unmarshaling of tag content is subject to any encode and decode options that would apply to
|
||||
// enclosed data item if it were to appear outside of a tag.
|
||||
type Tag struct {
|
||||
Number uint64
|
||||
Content interface{}
|
||||
}
|
||||
|
||||
// RawTag represents CBOR tag data, including tag number and raw tag content.
|
||||
// RawTag implements Unmarshaler and Marshaler interfaces.
|
||||
type RawTag struct {
|
||||
Number uint64
|
||||
Content RawMessage
|
||||
}
|
||||
|
||||
// UnmarshalCBOR sets *t with tag number and raw tag content copied from data.
|
||||
func (t *RawTag) UnmarshalCBOR(data []byte) error {
|
||||
if t == nil {
|
||||
return errors.New("cbor.RawTag: UnmarshalCBOR on nil pointer")
|
||||
}
|
||||
|
||||
// Decoding CBOR null and undefined to cbor.RawTag is no-op.
|
||||
if len(data) == 1 && (data[0] == 0xf6 || data[0] == 0xf7) {
|
||||
return nil
|
||||
}
|
||||
|
||||
d := decoder{data: data, dm: defaultDecMode}
|
||||
|
||||
// Unmarshal tag number.
|
||||
typ, _, num := d.getHead()
|
||||
if typ != cborTypeTag {
|
||||
return &UnmarshalTypeError{CBORType: typ.String(), GoType: typeRawTag.String()}
|
||||
}
|
||||
t.Number = num
|
||||
|
||||
// Unmarshal tag content.
|
||||
c := d.data[d.off:]
|
||||
t.Content = make([]byte, len(c))
|
||||
copy(t.Content, c)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalCBOR returns CBOR encoding of t.
|
||||
func (t RawTag) MarshalCBOR() ([]byte, error) {
|
||||
if t.Number == 0 && len(t.Content) == 0 {
|
||||
// Marshal uninitialized cbor.RawTag
|
||||
b := make([]byte, len(cborNil))
|
||||
copy(b, cborNil)
|
||||
return b, nil
|
||||
}
|
||||
|
||||
e := getEncodeBuffer()
|
||||
|
||||
encodeHead(e, byte(cborTypeTag), t.Number)
|
||||
|
||||
content := t.Content
|
||||
if len(content) == 0 {
|
||||
content = cborNil
|
||||
}
|
||||
|
||||
buf := make([]byte, len(e.Bytes())+len(content))
|
||||
n := copy(buf, e.Bytes())
|
||||
copy(buf[n:], content)
|
||||
|
||||
putEncodeBuffer(e)
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
// DecTagMode specifies how decoder handles tag number.
|
||||
type DecTagMode int
|
||||
|
||||
const (
|
||||
// DecTagIgnored makes decoder ignore tag number (skips if present).
|
||||
DecTagIgnored DecTagMode = iota
|
||||
|
||||
// DecTagOptional makes decoder verify tag number if it's present.
|
||||
DecTagOptional
|
||||
|
||||
// DecTagRequired makes decoder verify tag number and tag number must be present.
|
||||
DecTagRequired
|
||||
|
||||
maxDecTagMode
|
||||
)
|
||||
|
||||
func (dtm DecTagMode) valid() bool {
|
||||
return dtm >= 0 && dtm < maxDecTagMode
|
||||
}
|
||||
|
||||
// EncTagMode specifies how encoder handles tag number.
|
||||
type EncTagMode int
|
||||
|
||||
const (
|
||||
// EncTagNone makes encoder not encode tag number.
|
||||
EncTagNone EncTagMode = iota
|
||||
|
||||
// EncTagRequired makes encoder encode tag number.
|
||||
EncTagRequired
|
||||
|
||||
maxEncTagMode
|
||||
)
|
||||
|
||||
func (etm EncTagMode) valid() bool {
|
||||
return etm >= 0 && etm < maxEncTagMode
|
||||
}
|
||||
|
||||
// TagOptions specifies how encoder and decoder handle tag number.
|
||||
type TagOptions struct {
|
||||
DecTag DecTagMode
|
||||
EncTag EncTagMode
|
||||
}
|
||||
|
||||
// TagSet is an interface to add and remove tag info. It is used by EncMode and DecMode
|
||||
// to provide CBOR tag support.
|
||||
type TagSet interface {
|
||||
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||
Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error
|
||||
|
||||
// Remove removes given tag content type from TagSet.
|
||||
Remove(contentType reflect.Type)
|
||||
|
||||
tagProvider
|
||||
}
|
||||
|
||||
type tagProvider interface {
|
||||
getTagItemFromType(t reflect.Type) *tagItem
|
||||
getTypeFromTagNum(num []uint64) reflect.Type
|
||||
}
|
||||
|
||||
type tagItem struct {
|
||||
num []uint64
|
||||
cborTagNum []byte
|
||||
contentType reflect.Type
|
||||
opts TagOptions
|
||||
}
|
||||
|
||||
func (t *tagItem) equalTagNum(num []uint64) bool {
|
||||
// Fast path to compare 1 tag number
|
||||
if len(t.num) == 1 && len(num) == 1 && t.num[0] == num[0] {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(t.num) != len(num) {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := 0; i < len(t.num); i++ {
|
||||
if t.num[i] != num[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type (
|
||||
tagSet map[reflect.Type]*tagItem
|
||||
|
||||
syncTagSet struct {
|
||||
sync.RWMutex
|
||||
t tagSet
|
||||
}
|
||||
)
|
||||
|
||||
func (t tagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||
return t[typ]
|
||||
}
|
||||
|
||||
func (t tagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||
for typ, tag := range t {
|
||||
if tag.equalTagNum(num) {
|
||||
return typ
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewTagSet returns TagSet (safe for concurrency).
|
||||
func NewTagSet() TagSet {
|
||||
return &syncTagSet{t: make(map[reflect.Type]*tagItem)}
|
||||
}
|
||||
|
||||
// Add adds given tag number(s), content type, and tag options to TagSet.
|
||||
func (t *syncTagSet) Add(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) error {
|
||||
if contentType == nil {
|
||||
return errors.New("cbor: cannot add nil content type to TagSet")
|
||||
}
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
tag, err := newTagItem(opts, contentType, num, nestedNum...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.Lock()
|
||||
defer t.Unlock()
|
||||
for typ, ti := range t.t {
|
||||
if typ == contentType {
|
||||
return errors.New("cbor: content type " + contentType.String() + " already exists in TagSet")
|
||||
}
|
||||
if ti.equalTagNum(tag.num) {
|
||||
return fmt.Errorf("cbor: tag number %v already exists in TagSet", tag.num)
|
||||
}
|
||||
}
|
||||
t.t[contentType] = tag
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes given tag content type from TagSet.
|
||||
func (t *syncTagSet) Remove(contentType reflect.Type) {
|
||||
for contentType.Kind() == reflect.Ptr {
|
||||
contentType = contentType.Elem()
|
||||
}
|
||||
t.Lock()
|
||||
delete(t.t, contentType)
|
||||
t.Unlock()
|
||||
}
|
||||
|
||||
func (t *syncTagSet) getTagItemFromType(typ reflect.Type) *tagItem {
|
||||
t.RLock()
|
||||
ti := t.t[typ]
|
||||
t.RUnlock()
|
||||
return ti
|
||||
}
|
||||
|
||||
func (t *syncTagSet) getTypeFromTagNum(num []uint64) reflect.Type {
|
||||
t.RLock()
|
||||
rt := t.t.getTypeFromTagNum(num)
|
||||
t.RUnlock()
|
||||
return rt
|
||||
}
|
||||
|
||||
func newTagItem(opts TagOptions, contentType reflect.Type, num uint64, nestedNum ...uint64) (*tagItem, error) {
|
||||
if opts.DecTag == DecTagIgnored && opts.EncTag == EncTagNone {
|
||||
return nil, errors.New("cbor: cannot add tag with DecTagIgnored and EncTagNone options to TagSet")
|
||||
}
|
||||
if contentType.PkgPath() == "" || contentType.Kind() == reflect.Interface {
|
||||
return nil, errors.New("cbor: can only add named types to TagSet, got " + contentType.String())
|
||||
}
|
||||
if contentType == typeTime {
|
||||
return nil, errors.New("cbor: cannot add time.Time to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||
}
|
||||
if contentType == typeBigInt {
|
||||
return nil, errors.New("cbor: cannot add big.Int to TagSet, it's built-in and supported automatically")
|
||||
}
|
||||
if contentType == typeTag {
|
||||
return nil, errors.New("cbor: cannot add cbor.Tag to TagSet")
|
||||
}
|
||||
if contentType == typeRawTag {
|
||||
return nil, errors.New("cbor: cannot add cbor.RawTag to TagSet")
|
||||
}
|
||||
if num == 0 || num == 1 {
|
||||
return nil, errors.New("cbor: cannot add tag number 0 or 1 to TagSet, use EncOptions.TimeTag and DecOptions.TimeTag instead")
|
||||
}
|
||||
if num == 2 || num == 3 {
|
||||
return nil, errors.New("cbor: cannot add tag number 2 or 3 to TagSet, it's built-in and supported automatically")
|
||||
}
|
||||
if num == tagNumSelfDescribedCBOR {
|
||||
return nil, errors.New("cbor: cannot add tag number 55799 to TagSet, it's built-in and ignored automatically")
|
||||
}
|
||||
|
||||
te := tagItem{num: []uint64{num}, opts: opts, contentType: contentType}
|
||||
te.num = append(te.num, nestedNum...)
|
||||
|
||||
// Cache encoded tag numbers
|
||||
e := getEncodeBuffer()
|
||||
for _, n := range te.num {
|
||||
encodeHead(e, byte(cborTypeTag), n)
|
||||
}
|
||||
te.cborTagNum = make([]byte, e.Len())
|
||||
copy(te.cborTagNum, e.Bytes())
|
||||
putEncodeBuffer(e)
|
||||
|
||||
return &te, nil
|
||||
}
|
||||
|
||||
var (
|
||||
typeTag = reflect.TypeOf(Tag{})
|
||||
typeRawTag = reflect.TypeOf(RawTag{})
|
||||
)
|
||||
|
||||
// WrongTagError describes mismatch between CBOR tag and registered tag.
|
||||
type WrongTagError struct {
|
||||
RegisteredType reflect.Type
|
||||
RegisteredTagNum []uint64
|
||||
TagNum []uint64
|
||||
}
|
||||
|
||||
func (e *WrongTagError) Error() string {
|
||||
return fmt.Sprintf("cbor: wrong tag number for %s, got %v, expected %v", e.RegisteredType.String(), e.TagNum, e.RegisteredTagNum)
|
||||
}
|
||||
394
vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
Normal file
394
vendor/github.com/fxamacker/cbor/v2/valid.go
generated
vendored
Normal file
@@ -0,0 +1,394 @@
|
||||
// Copyright (c) Faye Amacker. All rights reserved.
|
||||
// Licensed under the MIT License. See LICENSE in the project root for license information.
|
||||
|
||||
package cbor
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/x448/float16"
|
||||
)
|
||||
|
||||
// SyntaxError is a description of a CBOR syntax error.
|
||||
type SyntaxError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *SyntaxError) Error() string { return e.msg }
|
||||
|
||||
// SemanticError is a description of a CBOR semantic error.
|
||||
type SemanticError struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *SemanticError) Error() string { return e.msg }
|
||||
|
||||
// MaxNestedLevelError indicates exceeded max nested level of any combination of CBOR arrays/maps/tags.
|
||||
type MaxNestedLevelError struct {
|
||||
maxNestedLevels int
|
||||
}
|
||||
|
||||
func (e *MaxNestedLevelError) Error() string {
|
||||
return "cbor: exceeded max nested level " + strconv.Itoa(e.maxNestedLevels)
|
||||
}
|
||||
|
||||
// MaxArrayElementsError indicates exceeded max number of elements for CBOR arrays.
|
||||
type MaxArrayElementsError struct {
|
||||
maxArrayElements int
|
||||
}
|
||||
|
||||
func (e *MaxArrayElementsError) Error() string {
|
||||
return "cbor: exceeded max number of elements " + strconv.Itoa(e.maxArrayElements) + " for CBOR array"
|
||||
}
|
||||
|
||||
// MaxMapPairsError indicates exceeded max number of key-value pairs for CBOR maps.
|
||||
type MaxMapPairsError struct {
|
||||
maxMapPairs int
|
||||
}
|
||||
|
||||
func (e *MaxMapPairsError) Error() string {
|
||||
return "cbor: exceeded max number of key-value pairs " + strconv.Itoa(e.maxMapPairs) + " for CBOR map"
|
||||
}
|
||||
|
||||
// IndefiniteLengthError indicates found disallowed indefinite length items.
|
||||
type IndefiniteLengthError struct {
|
||||
t cborType
|
||||
}
|
||||
|
||||
func (e *IndefiniteLengthError) Error() string {
|
||||
return "cbor: indefinite-length " + e.t.String() + " isn't allowed"
|
||||
}
|
||||
|
||||
// TagsMdError indicates found disallowed CBOR tags.
|
||||
type TagsMdError struct {
|
||||
}
|
||||
|
||||
func (e *TagsMdError) Error() string {
|
||||
return "cbor: CBOR tag isn't allowed"
|
||||
}
|
||||
|
||||
// ExtraneousDataError indicates found extraneous data following well-formed CBOR data item.
|
||||
type ExtraneousDataError struct {
|
||||
numOfBytes int // number of bytes of extraneous data
|
||||
index int // location of extraneous data
|
||||
}
|
||||
|
||||
func (e *ExtraneousDataError) Error() string {
|
||||
return "cbor: " + strconv.Itoa(e.numOfBytes) + " bytes of extraneous data starting at index " + strconv.Itoa(e.index)
|
||||
}
|
||||
|
||||
// wellformed checks whether the CBOR data item is well-formed.
|
||||
// allowExtraData indicates if extraneous data is allowed after the CBOR data item.
|
||||
// - use allowExtraData = true when using Decoder.Decode()
|
||||
// - use allowExtraData = false when using Unmarshal()
|
||||
func (d *decoder) wellformed(allowExtraData bool, checkBuiltinTags bool) error {
|
||||
if len(d.data) == d.off {
|
||||
return io.EOF
|
||||
}
|
||||
_, err := d.wellformedInternal(0, checkBuiltinTags)
|
||||
if err == nil {
|
||||
if !allowExtraData && d.off != len(d.data) {
|
||||
err = &ExtraneousDataError{len(d.data) - d.off, d.off}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// wellformedInternal checks data's well-formedness and returns max depth and error.
|
||||
func (d *decoder) wellformedInternal(depth int, checkBuiltinTags bool) (int, error) { //nolint:gocyclo
|
||||
t, _, val, indefiniteLength, err := d.wellformedHeadWithIndefiniteLengthFlag()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch t {
|
||||
case cborTypeByteString, cborTypeTextString:
|
||||
if indefiniteLength {
|
||||
if d.dm.indefLength == IndefLengthForbidden {
|
||||
return 0, &IndefiniteLengthError{t}
|
||||
}
|
||||
return d.wellformedIndefiniteString(t, depth, checkBuiltinTags)
|
||||
}
|
||||
valInt := int(val)
|
||||
if valInt < 0 {
|
||||
// Detect integer overflow
|
||||
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, causing integer overflow")
|
||||
}
|
||||
if len(d.data)-d.off < valInt { // valInt+off may overflow integer
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
d.off += valInt
|
||||
|
||||
case cborTypeArray, cborTypeMap:
|
||||
depth++
|
||||
if depth > d.dm.maxNestedLevels {
|
||||
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||
}
|
||||
|
||||
if indefiniteLength {
|
||||
if d.dm.indefLength == IndefLengthForbidden {
|
||||
return 0, &IndefiniteLengthError{t}
|
||||
}
|
||||
return d.wellformedIndefiniteArrayOrMap(t, depth, checkBuiltinTags)
|
||||
}
|
||||
|
||||
valInt := int(val)
|
||||
if valInt < 0 {
|
||||
// Detect integer overflow
|
||||
return 0, errors.New("cbor: " + t.String() + " length " + strconv.FormatUint(val, 10) + " is too large, it would cause integer overflow")
|
||||
}
|
||||
|
||||
if t == cborTypeArray {
|
||||
if valInt > d.dm.maxArrayElements {
|
||||
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||
}
|
||||
} else {
|
||||
if valInt > d.dm.maxMapPairs {
|
||||
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||
}
|
||||
}
|
||||
|
||||
count := 1
|
||||
if t == cborTypeMap {
|
||||
count = 2
|
||||
}
|
||||
maxDepth := depth
|
||||
for j := 0; j < count; j++ {
|
||||
for i := 0; i < valInt; i++ {
|
||||
var dpt int
|
||||
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if dpt > maxDepth {
|
||||
maxDepth = dpt // Save max depth
|
||||
}
|
||||
}
|
||||
}
|
||||
depth = maxDepth
|
||||
|
||||
case cborTypeTag:
|
||||
if d.dm.tagsMd == TagsForbidden {
|
||||
return 0, &TagsMdError{}
|
||||
}
|
||||
|
||||
tagNum := val
|
||||
|
||||
// Scan nested tag numbers to avoid recursion.
|
||||
for {
|
||||
if len(d.data) == d.off { // Tag number must be followed by tag content.
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if checkBuiltinTags {
|
||||
err = validBuiltinTag(tagNum, d.data[d.off])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
if d.dm.bignumTag == BignumTagForbidden && (tagNum == 2 || tagNum == 3) {
|
||||
return 0, &UnacceptableDataItemError{
|
||||
CBORType: cborTypeTag.String(),
|
||||
Message: "bignum",
|
||||
}
|
||||
}
|
||||
if getType(d.data[d.off]) != cborTypeTag {
|
||||
break
|
||||
}
|
||||
if _, _, tagNum, err = d.wellformedHead(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
depth++
|
||||
if depth > d.dm.maxNestedLevels {
|
||||
return 0, &MaxNestedLevelError{d.dm.maxNestedLevels}
|
||||
}
|
||||
}
|
||||
// Check tag content.
|
||||
return d.wellformedInternal(depth, checkBuiltinTags)
|
||||
}
|
||||
|
||||
return depth, nil
|
||||
}
|
||||
|
||||
// wellformedIndefiniteString checks indefinite length byte/text string's well-formedness and returns max depth and error.
|
||||
func (d *decoder) wellformedIndefiniteString(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||
var err error
|
||||
for {
|
||||
if len(d.data) == d.off {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if isBreakFlag(d.data[d.off]) {
|
||||
d.off++
|
||||
break
|
||||
}
|
||||
// Peek ahead to get next type and indefinite length status.
|
||||
nt, ai := parseInitialByte(d.data[d.off])
|
||||
if t != nt {
|
||||
return 0, &SyntaxError{"cbor: wrong element type " + nt.String() + " for indefinite-length " + t.String()}
|
||||
}
|
||||
if additionalInformation(ai).isIndefiniteLength() {
|
||||
return 0, &SyntaxError{"cbor: indefinite-length " + t.String() + " chunk is not definite-length"}
|
||||
}
|
||||
if depth, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return depth, nil
|
||||
}
|
||||
|
||||
// wellformedIndefiniteArrayOrMap checks indefinite length array/map's well-formedness and returns max depth and error.
|
||||
func (d *decoder) wellformedIndefiniteArrayOrMap(t cborType, depth int, checkBuiltinTags bool) (int, error) {
|
||||
var err error
|
||||
maxDepth := depth
|
||||
i := 0
|
||||
for {
|
||||
if len(d.data) == d.off {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
if isBreakFlag(d.data[d.off]) {
|
||||
d.off++
|
||||
break
|
||||
}
|
||||
var dpt int
|
||||
if dpt, err = d.wellformedInternal(depth, checkBuiltinTags); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if dpt > maxDepth {
|
||||
maxDepth = dpt
|
||||
}
|
||||
i++
|
||||
if t == cborTypeArray {
|
||||
if i > d.dm.maxArrayElements {
|
||||
return 0, &MaxArrayElementsError{d.dm.maxArrayElements}
|
||||
}
|
||||
} else {
|
||||
if i%2 == 0 && i/2 > d.dm.maxMapPairs {
|
||||
return 0, &MaxMapPairsError{d.dm.maxMapPairs}
|
||||
}
|
||||
}
|
||||
}
|
||||
if t == cborTypeMap && i%2 == 1 {
|
||||
return 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||
}
|
||||
return maxDepth, nil
|
||||
}
|
||||
|
||||
func (d *decoder) wellformedHeadWithIndefiniteLengthFlag() (
|
||||
t cborType,
|
||||
ai byte,
|
||||
val uint64,
|
||||
indefiniteLength bool,
|
||||
err error,
|
||||
) {
|
||||
t, ai, val, err = d.wellformedHead()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
indefiniteLength = additionalInformation(ai).isIndefiniteLength()
|
||||
return
|
||||
}
|
||||
|
||||
func (d *decoder) wellformedHead() (t cborType, ai byte, val uint64, err error) {
|
||||
dataLen := len(d.data) - d.off
|
||||
if dataLen == 0 {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
t, ai = parseInitialByte(d.data[d.off])
|
||||
val = uint64(ai)
|
||||
d.off++
|
||||
dataLen--
|
||||
|
||||
if ai <= maxAdditionalInformationWithoutArgument {
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith1ByteArgument {
|
||||
const argumentSize = 1
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = uint64(d.data[d.off])
|
||||
d.off++
|
||||
if t == cborTypePrimitives && val < 32 {
|
||||
return 0, 0, 0, &SyntaxError{"cbor: invalid simple value " + strconv.Itoa(int(val)) + " for type " + t.String()}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith2ByteArgument {
|
||||
const argumentSize = 2
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = uint64(binary.BigEndian.Uint16(d.data[d.off : d.off+argumentSize]))
|
||||
d.off += argumentSize
|
||||
if t == cborTypePrimitives {
|
||||
if err := d.acceptableFloat(float64(float16.Frombits(uint16(val)).Float32())); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith4ByteArgument {
|
||||
const argumentSize = 4
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = uint64(binary.BigEndian.Uint32(d.data[d.off : d.off+argumentSize]))
|
||||
d.off += argumentSize
|
||||
if t == cborTypePrimitives {
|
||||
if err := d.acceptableFloat(float64(math.Float32frombits(uint32(val)))); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if ai == additionalInformationWith8ByteArgument {
|
||||
const argumentSize = 8
|
||||
if dataLen < argumentSize {
|
||||
return 0, 0, 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
val = binary.BigEndian.Uint64(d.data[d.off : d.off+argumentSize])
|
||||
d.off += argumentSize
|
||||
if t == cborTypePrimitives {
|
||||
if err := d.acceptableFloat(math.Float64frombits(val)); err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
if additionalInformation(ai).isIndefiniteLength() {
|
||||
switch t {
|
||||
case cborTypePositiveInt, cborTypeNegativeInt, cborTypeTag:
|
||||
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||
case cborTypePrimitives: // 0xff (break code) should not be outside wellformedIndefinite().
|
||||
return 0, 0, 0, &SyntaxError{"cbor: unexpected \"break\" code"}
|
||||
}
|
||||
return t, ai, val, nil
|
||||
}
|
||||
|
||||
// ai == 28, 29, 30
|
||||
return 0, 0, 0, &SyntaxError{"cbor: invalid additional information " + strconv.Itoa(int(ai)) + " for type " + t.String()}
|
||||
}
|
||||
|
||||
func (d *decoder) acceptableFloat(f float64) error {
|
||||
switch {
|
||||
case d.dm.nanDec == NaNDecodeForbidden && math.IsNaN(f):
|
||||
return &UnacceptableDataItemError{
|
||||
CBORType: cborTypePrimitives.String(),
|
||||
Message: "floating-point NaN",
|
||||
}
|
||||
case d.dm.infDec == InfDecodeForbidden && math.IsInf(f, 0):
|
||||
return &UnacceptableDataItemError{
|
||||
CBORType: cborTypePrimitives.String(),
|
||||
Message: "floating-point infinity",
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
61
vendor/github.com/go-logr/logr/slogr/slogr.go
generated
vendored
61
vendor/github.com/go-logr/logr/slogr/slogr.go
generated
vendored
@@ -1,61 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package slogr enables usage of a slog.Handler with logr.Logger as front-end
|
||||
// API and of a logr.LogSink through the slog.Handler and thus slog.Logger
|
||||
// APIs.
|
||||
//
|
||||
// See the README in the top-level [./logr] package for a discussion of
|
||||
// interoperability.
|
||||
//
|
||||
// Deprecated: use the main logr package instead.
|
||||
package slogr
|
||||
|
||||
import (
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// NewLogr returns a logr.Logger which writes to the slog.Handler.
|
||||
//
|
||||
// Deprecated: use [logr.FromSlogHandler] instead.
|
||||
func NewLogr(handler slog.Handler) logr.Logger {
|
||||
return logr.FromSlogHandler(handler)
|
||||
}
|
||||
|
||||
// NewSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
|
||||
//
|
||||
// Deprecated: use [logr.ToSlogHandler] instead.
|
||||
func NewSlogHandler(logger logr.Logger) slog.Handler {
|
||||
return logr.ToSlogHandler(logger)
|
||||
}
|
||||
|
||||
// ToSlogHandler returns a slog.Handler which writes to the same sink as the logr.Logger.
|
||||
//
|
||||
// Deprecated: use [logr.ToSlogHandler] instead.
|
||||
func ToSlogHandler(logger logr.Logger) slog.Handler {
|
||||
return logr.ToSlogHandler(logger)
|
||||
}
|
||||
|
||||
// SlogSink is an optional interface that a LogSink can implement to support
|
||||
// logging through the slog.Logger or slog.Handler APIs better.
|
||||
//
|
||||
// Deprecated: use [logr.SlogSink] instead.
|
||||
type SlogSink = logr.SlogSink
|
||||
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
27
vendor/github.com/google/go-cmp/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2017 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
671
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
671
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
Normal file
@@ -0,0 +1,671 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cmp determines equality of values.
|
||||
//
|
||||
// This package is intended to be a more powerful and safer alternative to
|
||||
// [reflect.DeepEqual] for comparing whether two values are semantically equal.
|
||||
// It is intended to only be used in tests, as performance is not a goal and
|
||||
// it may panic if it cannot compare the values. Its propensity towards
|
||||
// panicking means that its unsuitable for production environments where a
|
||||
// spurious panic may be fatal.
|
||||
//
|
||||
// The primary features of cmp are:
|
||||
//
|
||||
// - When the default behavior of equality does not suit the test's needs,
|
||||
// custom equality functions can override the equality operation.
|
||||
// For example, an equality function may report floats as equal so long as
|
||||
// they are within some tolerance of each other.
|
||||
//
|
||||
// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
|
||||
// to determine equality. This allows package authors to determine
|
||||
// the equality operation for the types that they define.
|
||||
//
|
||||
// - If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on
|
||||
// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
|
||||
// unexported fields are not compared by default; they result in panics
|
||||
// unless suppressed by using an [Ignore] option
|
||||
// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
|
||||
// or explicitly compared using the [Exporter] option.
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// TODO(≥go1.18): Use any instead of interface{}.
|
||||
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one [Ignore] exists in S, then the comparison is ignored.
|
||||
// If the number of [Transformer] and [Comparer] options in S is non-zero,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single [Transformer], then use that to transform
|
||||
// the current values and recursively call Equal on the output values.
|
||||
// If S contains a single [Comparer], then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
//
|
||||
// - If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||
// evaluation proceeds to the next rule.
|
||||
//
|
||||
// - Lastly, try to compare x and y based on their basic kinds.
|
||||
// Simple kinds like booleans, integers, floats, complex numbers, strings,
|
||||
// and channels are compared using the equivalent of the == operator in Go.
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
//
|
||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
||||
// If a struct contains unexported fields, Equal panics unless an [Ignore] option
|
||||
// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
|
||||
// or the [Exporter] option explicitly permits comparing the unexported field.
|
||||
//
|
||||
// Slices are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored slice or array elements report equal.
|
||||
// Empty non-nil slices and nil slices are not equal; to equate empty slices,
|
||||
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
|
||||
//
|
||||
// Maps are equal if they are both nil or both non-nil, where recursively
|
||||
// calling Equal on all non-ignored map entries report equal.
|
||||
// Map keys are equal according to the == operator.
|
||||
// To use custom comparisons for map keys, consider using
|
||||
// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
|
||||
// Empty non-nil maps and nil maps are not equal; to equate empty maps,
|
||||
// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
|
||||
//
|
||||
// Pointers and interfaces are equal if they are both nil or both non-nil,
|
||||
// where they have the same underlying concrete type and recursively
|
||||
// calling Equal on the underlying values reports equal.
|
||||
//
|
||||
// Before recursing into a pointer, slice element, or map, the current path
|
||||
// is checked to detect whether the address has already been visited.
|
||||
// If there is a cycle, then the pointed at values are considered equal
|
||||
// only if both addresses were previously visited in the same path step.
|
||||
func Equal(x, y interface{}, opts ...Option) bool {
|
||||
s := newState(opts)
|
||||
s.compareAny(rootStep(x, y))
|
||||
return s.result.Equal()
|
||||
}
|
||||
|
||||
// Diff returns a human-readable report of the differences between two values:
|
||||
// y - x. It returns an empty string if and only if Equal returns true for the
|
||||
// same input values and options.
|
||||
//
|
||||
// The output is displayed as a literal in pseudo-Go syntax.
|
||||
// At the start of each line, a "-" prefix indicates an element removed from x,
|
||||
// a "+" prefix to indicates an element added from y, and the lack of a prefix
|
||||
// indicates an element common to both x and y. If possible, the output
|
||||
// uses fmt.Stringer.String or error.Error methods to produce more humanly
|
||||
// readable outputs. In such cases, the string is prefixed with either an
|
||||
// 's' or 'e' character, respectively, to indicate that the method was called.
|
||||
//
|
||||
// Do not depend on this output being stable. If you need the ability to
|
||||
// programmatically interpret the difference, consider using a custom Reporter.
|
||||
func Diff(x, y interface{}, opts ...Option) string {
|
||||
s := newState(opts)
|
||||
|
||||
// Optimization: If there are no other reporters, we can optimize for the
|
||||
// common case where the result is equal (and thus no reported difference).
|
||||
// This avoids the expensive construction of a difference tree.
|
||||
if len(s.reporters) == 0 {
|
||||
s.compareAny(rootStep(x, y))
|
||||
if s.result.Equal() {
|
||||
return ""
|
||||
}
|
||||
s.result = diff.Result{} // Reset results
|
||||
}
|
||||
|
||||
r := new(defaultReporter)
|
||||
s.reporters = append(s.reporters, reporter{r})
|
||||
s.compareAny(rootStep(x, y))
|
||||
d := r.String()
|
||||
if (d == "") != s.result.Equal() {
|
||||
panic("inconsistent difference and equality results")
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// rootStep constructs the first path step. If x and y have differing types,
|
||||
// then they are stored within an empty interface type.
|
||||
func rootStep(x, y interface{}) PathStep {
|
||||
vx := reflect.ValueOf(x)
|
||||
vy := reflect.ValueOf(y)
|
||||
|
||||
// If the inputs are different types, auto-wrap them in an empty interface
|
||||
// so that they have the same parent type.
|
||||
var t reflect.Type
|
||||
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
||||
t = anyType
|
||||
if vx.IsValid() {
|
||||
vvx := reflect.New(t).Elem()
|
||||
vvx.Set(vx)
|
||||
vx = vvx
|
||||
}
|
||||
if vy.IsValid() {
|
||||
vvy := reflect.New(t).Elem()
|
||||
vvy.Set(vy)
|
||||
vy = vvy
|
||||
}
|
||||
} else {
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
return &pathStep{t, vx, vy}
|
||||
}
|
||||
|
||||
type state struct {
|
||||
// These fields represent the "comparison state".
|
||||
// Calling statelessCompare must not result in observable changes to these.
|
||||
result diff.Result // The current result of comparison
|
||||
curPath Path // The current path in the value tree
|
||||
curPtrs pointerPath // The current set of visited pointers
|
||||
reporters []reporter // Optional reporters
|
||||
|
||||
// recChecker checks for infinite cycles applying the same set of
|
||||
// transformers upon the output of itself.
|
||||
recChecker recChecker
|
||||
|
||||
// dynChecker triggers pseudo-random checks for option correctness.
|
||||
// It is safe for statelessCompare to mutate this value.
|
||||
dynChecker dynChecker
|
||||
|
||||
// These fields, once set by processOption, will not change.
|
||||
exporters []exporter // List of exporters for structs with unexported fields
|
||||
opts Options // List of all fundamental and filter options
|
||||
}
|
||||
|
||||
func newState(opts []Option) *state {
|
||||
// Always ensure a validator option exists to validate the inputs.
|
||||
s := &state{opts: Options{validator{}}}
|
||||
s.curPtrs.Init()
|
||||
s.processOption(Options(opts))
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *state) processOption(opt Option) {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
case Options:
|
||||
for _, o := range opt {
|
||||
s.processOption(o)
|
||||
}
|
||||
case coreOption:
|
||||
type filtered interface {
|
||||
isFiltered() bool
|
||||
}
|
||||
if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
|
||||
panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
|
||||
}
|
||||
s.opts = append(s.opts, opt)
|
||||
case exporter:
|
||||
s.exporters = append(s.exporters, opt)
|
||||
case reporter:
|
||||
s.reporters = append(s.reporters, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown option %T", opt))
|
||||
}
|
||||
}
|
||||
|
||||
// statelessCompare compares two values and returns the result.
|
||||
// This function is stateless in that it does not alter the current result,
|
||||
// or output to any registered reporters.
|
||||
func (s *state) statelessCompare(step PathStep) diff.Result {
|
||||
// We do not save and restore curPath and curPtrs because all of the
|
||||
// compareX methods should properly push and pop from them.
|
||||
// It is an implementation bug if the contents of the paths differ from
|
||||
// when calling this function to when returning from it.
|
||||
|
||||
oldResult, oldReporters := s.result, s.reporters
|
||||
s.result = diff.Result{} // Reset result
|
||||
s.reporters = nil // Remove reporters to avoid spurious printouts
|
||||
s.compareAny(step)
|
||||
res := s.result
|
||||
s.result, s.reporters = oldResult, oldReporters
|
||||
return res
|
||||
}
|
||||
|
||||
func (s *state) compareAny(step PathStep) {
|
||||
// Update the path stack.
|
||||
s.curPath.push(step)
|
||||
defer s.curPath.pop()
|
||||
for _, r := range s.reporters {
|
||||
r.PushStep(step)
|
||||
defer r.PopStep()
|
||||
}
|
||||
s.recChecker.Check(s.curPath)
|
||||
|
||||
// Cycle-detection for slice elements (see NOTE in compareSlice).
|
||||
t := step.Type()
|
||||
vx, vy := step.Values()
|
||||
if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
|
||||
px, py := vx.Addr(), vy.Addr()
|
||||
if eq, visited := s.curPtrs.Push(px, py); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(px, py)
|
||||
}
|
||||
|
||||
// Rule 1: Check whether an option applies on this node in the value tree.
|
||||
if s.tryOptions(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 2: Check whether the type has a valid Equal method.
|
||||
if s.tryMethod(t, vx, vy) {
|
||||
return
|
||||
}
|
||||
|
||||
// Rule 3: Compare based on the underlying kind.
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
s.report(vx.Bool() == vy.Bool(), 0)
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
s.report(vx.Int() == vy.Int(), 0)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
s.report(vx.Uint() == vy.Uint(), 0)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
s.report(vx.Float() == vy.Float(), 0)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
s.report(vx.Complex() == vy.Complex(), 0)
|
||||
case reflect.String:
|
||||
s.report(vx.String() == vy.String(), 0)
|
||||
case reflect.Chan, reflect.UnsafePointer:
|
||||
s.report(vx.Pointer() == vy.Pointer(), 0)
|
||||
case reflect.Func:
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
case reflect.Struct:
|
||||
s.compareStruct(t, vx, vy)
|
||||
case reflect.Slice, reflect.Array:
|
||||
s.compareSlice(t, vx, vy)
|
||||
case reflect.Map:
|
||||
s.compareMap(t, vx, vy)
|
||||
case reflect.Ptr:
|
||||
s.comparePtr(t, vx, vy)
|
||||
case reflect.Interface:
|
||||
s.compareInterface(t, vx, vy)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", t.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Evaluate all filters and apply the remaining options.
|
||||
if opt := s.opts.filter(s, t, vx, vy); opt != nil {
|
||||
opt.apply(s, vx, vy)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
|
||||
// Check if this type even has an Equal method.
|
||||
m, ok := t.MethodByName("Equal")
|
||||
if !ok || !function.IsType(m.Type, function.EqualAssignable) {
|
||||
return false
|
||||
}
|
||||
|
||||
eq := s.callTTBFunc(m.Func, vx, vy)
|
||||
s.report(eq, reportByMethod)
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{v})[0]
|
||||
}
|
||||
|
||||
// Run the function twice and ensure that we get the same results back.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, v)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{v})[0]
|
||||
if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
|
||||
// To avoid false-positives with non-reflexive equality operations,
|
||||
// we sanity check whether a value is equal to itself.
|
||||
if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
|
||||
return want
|
||||
}
|
||||
panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
|
||||
if !s.dynChecker.Next() {
|
||||
return f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
}
|
||||
|
||||
// Swapping the input arguments is sufficient to check that
|
||||
// f is symmetric and deterministic.
|
||||
// We run in goroutines so that the race detector (if enabled) can detect
|
||||
// unsafe mutations to the input.
|
||||
c := make(chan reflect.Value)
|
||||
go detectRaces(c, f, y, x)
|
||||
got := <-c
|
||||
want := f.Call([]reflect.Value{x, y})[0].Bool()
|
||||
if !got.IsValid() || got.Bool() != want {
|
||||
panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
|
||||
}
|
||||
return want
|
||||
}
|
||||
|
||||
func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
|
||||
var ret reflect.Value
|
||||
defer func() {
|
||||
recover() // Ignore panics, let the other call to f panic instead
|
||||
c <- ret
|
||||
}()
|
||||
ret = f.Call(vs)[0]
|
||||
}
|
||||
|
||||
func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
|
||||
var addr bool
|
||||
var vax, vay reflect.Value // Addressable versions of vx and vy
|
||||
|
||||
var mayForce, mayForceInit bool
|
||||
step := StructField{&structField{}}
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
step.typ = t.Field(i).Type
|
||||
step.vx = vx.Field(i)
|
||||
step.vy = vy.Field(i)
|
||||
step.name = t.Field(i).Name
|
||||
step.idx = i
|
||||
step.unexported = !isExported(step.name)
|
||||
if step.unexported {
|
||||
if step.name == "_" {
|
||||
continue
|
||||
}
|
||||
// Defer checking of unexported fields until later to give an
|
||||
// Ignore a chance to ignore the field.
|
||||
if !vax.IsValid() || !vay.IsValid() {
|
||||
// For retrieveUnexportedField to work, the parent struct must
|
||||
// be addressable. Create a new copy of the values if
|
||||
// necessary to make them addressable.
|
||||
addr = vx.CanAddr() || vy.CanAddr()
|
||||
vax = makeAddressable(vx)
|
||||
vay = makeAddressable(vy)
|
||||
}
|
||||
if !mayForceInit {
|
||||
for _, xf := range s.exporters {
|
||||
mayForce = mayForce || xf(t)
|
||||
}
|
||||
mayForceInit = true
|
||||
}
|
||||
step.mayForce = mayForce
|
||||
step.paddr = addr
|
||||
step.pvx = vax
|
||||
step.pvy = vay
|
||||
step.field = t.Field(i)
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
|
||||
isSlice := t.Kind() == reflect.Slice
|
||||
if isSlice && (vx.IsNil() || vy.IsNil()) {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
|
||||
// since slices represents a list of pointers, rather than a single pointer.
|
||||
// The pointer checking logic must be handled on a per-element basis
|
||||
// in compareAny.
|
||||
//
|
||||
// A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
|
||||
// pointer P, a length N, and a capacity C. Supposing each slice element has
|
||||
// a memory size of M, then the slice is equivalent to the list of pointers:
|
||||
// [P+i*M for i in range(N)]
|
||||
//
|
||||
// For example, v[:0] and v[:1] are slices with the same starting pointer,
|
||||
// but they are clearly different values. Using the slice pointer alone
|
||||
// violates the assumption that equal pointers implies equal values.
|
||||
|
||||
step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
|
||||
withIndexes := func(ix, iy int) SliceIndex {
|
||||
if ix >= 0 {
|
||||
step.vx, step.xkey = vx.Index(ix), ix
|
||||
} else {
|
||||
step.vx, step.xkey = reflect.Value{}, -1
|
||||
}
|
||||
if iy >= 0 {
|
||||
step.vy, step.ykey = vy.Index(iy), iy
|
||||
} else {
|
||||
step.vy, step.ykey = reflect.Value{}, -1
|
||||
}
|
||||
return step
|
||||
}
|
||||
|
||||
// Ignore options are able to ignore missing elements in a slice.
|
||||
// However, detecting these reliably requires an optimal differencing
|
||||
// algorithm, for which diff.Difference is not.
|
||||
//
|
||||
// Instead, we first iterate through both slices to detect which elements
|
||||
// would be ignored if standing alone. The index of non-discarded elements
|
||||
// are stored in a separate slice, which diffing is then performed on.
|
||||
var indexesX, indexesY []int
|
||||
var ignoredX, ignoredY []bool
|
||||
for ix := 0; ix < vx.Len(); ix++ {
|
||||
ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesX = append(indexesX, ix)
|
||||
}
|
||||
ignoredX = append(ignoredX, ignored)
|
||||
}
|
||||
for iy := 0; iy < vy.Len(); iy++ {
|
||||
ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
|
||||
if !ignored {
|
||||
indexesY = append(indexesY, iy)
|
||||
}
|
||||
ignoredY = append(ignoredY, ignored)
|
||||
}
|
||||
|
||||
// Compute an edit-script for slices vx and vy (excluding ignored elements).
|
||||
edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
|
||||
return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
|
||||
})
|
||||
|
||||
// Replay the ignore-scripts and the edit-script.
|
||||
var ix, iy int
|
||||
for ix < vx.Len() || iy < vy.Len() {
|
||||
var e diff.EditType
|
||||
switch {
|
||||
case ix < len(ignoredX) && ignoredX[ix]:
|
||||
e = diff.UniqueX
|
||||
case iy < len(ignoredY) && ignoredY[iy]:
|
||||
e = diff.UniqueY
|
||||
default:
|
||||
e, edits = edits[0], edits[1:]
|
||||
}
|
||||
switch e {
|
||||
case diff.UniqueX:
|
||||
s.compareAny(withIndexes(ix, -1))
|
||||
ix++
|
||||
case diff.UniqueY:
|
||||
s.compareAny(withIndexes(-1, iy))
|
||||
iy++
|
||||
default:
|
||||
s.compareAny(withIndexes(ix, iy))
|
||||
ix++
|
||||
iy++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for maps.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
// We combine and sort the two map keys so that we can perform the
|
||||
// comparisons in a deterministic order.
|
||||
step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
|
||||
for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
|
||||
step.vx = vx.MapIndex(k)
|
||||
step.vy = vy.MapIndex(k)
|
||||
step.key = k
|
||||
if !step.vx.IsValid() && !step.vy.IsValid() {
|
||||
// It is possible for both vx and vy to be invalid if the
|
||||
// key contained a NaN value in it.
|
||||
//
|
||||
// Even with the ability to retrieve NaN keys in Go 1.12,
|
||||
// there still isn't a sensible way to compare the values since
|
||||
// a NaN key may map to multiple unordered values.
|
||||
// The most reasonable way to compare NaNs would be to compare the
|
||||
// set of values. However, this is impossible to do efficiently
|
||||
// since set equality is provably an O(n^2) operation given only
|
||||
// an Equal function. If we had a Less function or Hash function,
|
||||
// this could be done in O(n*log(n)) or O(n), respectively.
|
||||
//
|
||||
// Rather than adding complex logic to deal with NaNs, make it
|
||||
// the user's responsibility to compare such obscure maps.
|
||||
const help = "consider providing a Comparer to compare the map"
|
||||
panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
|
||||
}
|
||||
s.compareAny(step)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Cycle-detection for pointers.
|
||||
if eq, visited := s.curPtrs.Push(vx, vy); visited {
|
||||
s.report(eq, reportByCycle)
|
||||
return
|
||||
}
|
||||
defer s.curPtrs.Pop(vx, vy)
|
||||
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
|
||||
if vx.IsNil() || vy.IsNil() {
|
||||
s.report(vx.IsNil() && vy.IsNil(), 0)
|
||||
return
|
||||
}
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
if vx.Type() != vy.Type() {
|
||||
s.report(false, 0)
|
||||
return
|
||||
}
|
||||
s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
|
||||
}
|
||||
|
||||
func (s *state) report(eq bool, rf resultFlags) {
|
||||
if rf&reportByIgnore == 0 {
|
||||
if eq {
|
||||
s.result.NumSame++
|
||||
rf |= reportEqual
|
||||
} else {
|
||||
s.result.NumDiff++
|
||||
rf |= reportUnequal
|
||||
}
|
||||
}
|
||||
for _, r := range s.reporters {
|
||||
r.Report(Result{flags: rf})
|
||||
}
|
||||
}
|
||||
|
||||
// recChecker tracks the state needed to periodically perform checks that
|
||||
// user provided transformers are not stuck in an infinitely recursive cycle.
|
||||
type recChecker struct{ next int }
|
||||
|
||||
// Check scans the Path for any recursive transformers and panics when any
|
||||
// recursive transformers are detected. Note that the presence of a
|
||||
// recursive Transformer does not necessarily imply an infinite cycle.
|
||||
// As such, this check only activates after some minimal number of path steps.
|
||||
func (rc *recChecker) Check(p Path) {
|
||||
const minLen = 1 << 16
|
||||
if rc.next == 0 {
|
||||
rc.next = minLen
|
||||
}
|
||||
if len(p) < rc.next {
|
||||
return
|
||||
}
|
||||
rc.next <<= 1
|
||||
|
||||
// Check whether the same transformer has appeared at least twice.
|
||||
var ss []string
|
||||
m := map[Option]int{}
|
||||
for _, ps := range p {
|
||||
if t, ok := ps.(Transform); ok {
|
||||
t := t.Option()
|
||||
if m[t] == 1 { // Transformer was used exactly once before
|
||||
tf := t.(*transformer).fnc.Type()
|
||||
ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
|
||||
}
|
||||
m[t]++
|
||||
}
|
||||
}
|
||||
if len(ss) > 0 {
|
||||
const warning = "recursive set of Transformers detected"
|
||||
const help = "consider using cmpopts.AcyclicTransformer"
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
|
||||
}
|
||||
}
|
||||
|
||||
// dynChecker tracks the state needed to periodically perform checks that
|
||||
// user provided functions are symmetric and deterministic.
|
||||
// The zero value is safe for immediate use.
|
||||
type dynChecker struct{ curr, next int }
|
||||
|
||||
// Next increments the state and reports whether a check should be performed.
|
||||
//
|
||||
// Checks occur every Nth function call, where N is a triangular number:
|
||||
//
|
||||
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
||||
//
|
||||
// See https://en.wikipedia.org/wiki/Triangular_number
|
||||
//
|
||||
// This sequence ensures that the cost of checks drops significantly as
|
||||
// the number of functions calls grows larger.
|
||||
func (dc *dynChecker) Next() bool {
|
||||
ok := dc.curr == dc.next
|
||||
if ok {
|
||||
dc.curr = 0
|
||||
dc.next++
|
||||
}
|
||||
dc.curr++
|
||||
return ok
|
||||
}
|
||||
|
||||
// makeAddressable returns a value that is always addressable.
|
||||
// It returns the input verbatim if it is already addressable,
|
||||
// otherwise it creates a new value and returns an addressable copy.
|
||||
func makeAddressable(v reflect.Value) reflect.Value {
|
||||
if v.CanAddr() {
|
||||
return v
|
||||
}
|
||||
vc := reflect.New(v.Type()).Elem()
|
||||
vc.Set(v)
|
||||
return vc
|
||||
}
|
||||
31
vendor/github.com/google/go-cmp/cmp/export.go
generated
vendored
Normal file
31
vendor/github.com/google/go-cmp/cmp/export.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
|
||||
// a struct such that the value has read-write permissions.
|
||||
//
|
||||
// The parent struct, v, must be addressable, while f must be a StructField
|
||||
// describing the field to retrieve. If addr is false,
|
||||
// then the returned value will be shallowed copied to be non-addressable.
|
||||
func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
|
||||
ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
|
||||
if !addr {
|
||||
// A field is addressable if and only if the struct is addressable.
|
||||
// If the original parent value was not addressable, shallow copy the
|
||||
// value to make it non-addressable to avoid leaking an implementation
|
||||
// detail of how forcibly exporting a field works.
|
||||
if ve.Kind() == reflect.Interface && ve.IsNil() {
|
||||
return reflect.Zero(f.Type)
|
||||
}
|
||||
return reflect.ValueOf(ve.Interface()).Convert(f.Type)
|
||||
}
|
||||
return ve
|
||||
}
|
||||
18
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
18
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !cmp_debug
|
||||
// +build !cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct{}
|
||||
|
||||
func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
|
||||
return f
|
||||
}
|
||||
func (debugger) Update() {}
|
||||
func (debugger) Finish() {}
|
||||
123
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
123
vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build cmp_debug
|
||||
// +build cmp_debug
|
||||
|
||||
package diff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// The algorithm can be seen running in real-time by enabling debugging:
|
||||
// go test -tags=cmp_debug -v
|
||||
//
|
||||
// Example output:
|
||||
// === RUN TestDifference/#34
|
||||
// ┌───────────────────────────────┐
|
||||
// │ \ · · · · · · · · · · · · · · │
|
||||
// │ · # · · · · · · · · · · · · · │
|
||||
// │ · \ · · · · · · · · · · · · · │
|
||||
// │ · · \ · · · · · · · · · · · · │
|
||||
// │ · · · X # · · · · · · · · · · │
|
||||
// │ · · · # \ · · · · · · · · · · │
|
||||
// │ · · · · · # # · · · · · · · · │
|
||||
// │ · · · · · # \ · · · · · · · · │
|
||||
// │ · · · · · · · \ · · · · · · · │
|
||||
// │ · · · · · · · · \ · · · · · · │
|
||||
// │ · · · · · · · · · \ · · · · · │
|
||||
// │ · · · · · · · · · · \ · · # · │
|
||||
// │ · · · · · · · · · · · \ # # · │
|
||||
// │ · · · · · · · · · · · # # # · │
|
||||
// │ · · · · · · · · · · # # # # · │
|
||||
// │ · · · · · · · · · # # # # # · │
|
||||
// │ · · · · · · · · · · · · · · \ │
|
||||
// └───────────────────────────────┘
|
||||
// [.Y..M.XY......YXYXY.|]
|
||||
//
|
||||
// The grid represents the edit-graph where the horizontal axis represents
|
||||
// list X and the vertical axis represents list Y. The start of the two lists
|
||||
// is the top-left, while the ends are the bottom-right. The '·' represents
|
||||
// an unexplored node in the graph. The '\' indicates that the two symbols
|
||||
// from list X and Y are equal. The 'X' indicates that two symbols are similar
|
||||
// (but not exactly equal) to each other. The '#' indicates that the two symbols
|
||||
// are different (and not similar). The algorithm traverses this graph trying to
|
||||
// make the paths starting in the top-left and the bottom-right connect.
|
||||
//
|
||||
// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
|
||||
// the currently established path from the forward and reverse searches,
|
||||
// separated by a '|' character.
|
||||
|
||||
const (
|
||||
updateDelay = 100 * time.Millisecond
|
||||
finishDelay = 500 * time.Millisecond
|
||||
ansiTerminal = true // ANSI escape codes used to move terminal cursor
|
||||
)
|
||||
|
||||
var debug debugger
|
||||
|
||||
type debugger struct {
|
||||
sync.Mutex
|
||||
p1, p2 EditScript
|
||||
fwdPath, revPath *EditScript
|
||||
grid []byte
|
||||
lines int
|
||||
}
|
||||
|
||||
func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
|
||||
dbg.Lock()
|
||||
dbg.fwdPath, dbg.revPath = p1, p2
|
||||
top := "┌─" + strings.Repeat("──", nx) + "┐\n"
|
||||
row := "│ " + strings.Repeat("· ", nx) + "│\n"
|
||||
btm := "└─" + strings.Repeat("──", nx) + "┘\n"
|
||||
dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
|
||||
dbg.lines = strings.Count(dbg.String(), "\n")
|
||||
fmt.Print(dbg)
|
||||
|
||||
// Wrap the EqualFunc so that we can intercept each result.
|
||||
return func(ix, iy int) (r Result) {
|
||||
cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
|
||||
for i := range cell {
|
||||
cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
|
||||
}
|
||||
switch r = f(ix, iy); {
|
||||
case r.Equal():
|
||||
cell[0] = '\\'
|
||||
case r.Similar():
|
||||
cell[0] = 'X'
|
||||
default:
|
||||
cell[0] = '#'
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (dbg *debugger) Update() {
|
||||
dbg.print(updateDelay)
|
||||
}
|
||||
|
||||
func (dbg *debugger) Finish() {
|
||||
dbg.print(finishDelay)
|
||||
dbg.Unlock()
|
||||
}
|
||||
|
||||
func (dbg *debugger) String() string {
|
||||
dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
|
||||
for i := len(*dbg.revPath) - 1; i >= 0; i-- {
|
||||
dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
|
||||
}
|
||||
return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
|
||||
}
|
||||
|
||||
func (dbg *debugger) print(d time.Duration) {
|
||||
if ansiTerminal {
|
||||
fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
|
||||
}
|
||||
fmt.Print(dbg)
|
||||
time.Sleep(d)
|
||||
}
|
||||
402
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
402
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
Normal file
@@ -0,0 +1,402 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package diff implements an algorithm for producing edit-scripts.
|
||||
// The edit-script is a sequence of operations needed to transform one list
|
||||
// of symbols into another (or vice-versa). The edits allowed are insertions,
|
||||
// deletions, and modifications. The summation of all edits is called the
|
||||
// Levenshtein distance as this problem is well-known in computer science.
|
||||
//
|
||||
// This package prioritizes performance over accuracy. That is, the run time
|
||||
// is more important than obtaining a minimal Levenshtein distance.
|
||||
package diff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
// EditType represents a single operation within an edit-script.
|
||||
type EditType uint8
|
||||
|
||||
const (
|
||||
// Identity indicates that a symbol pair is identical in both list X and Y.
|
||||
Identity EditType = iota
|
||||
// UniqueX indicates that a symbol only exists in X and not Y.
|
||||
UniqueX
|
||||
// UniqueY indicates that a symbol only exists in Y and not X.
|
||||
UniqueY
|
||||
// Modified indicates that a symbol pair is a modification of each other.
|
||||
Modified
|
||||
)
|
||||
|
||||
// EditScript represents the series of differences between two lists.
|
||||
type EditScript []EditType
|
||||
|
||||
// String returns a human-readable string representing the edit-script where
|
||||
// Identity, UniqueX, UniqueY, and Modified are represented by the
|
||||
// '.', 'X', 'Y', and 'M' characters, respectively.
|
||||
func (es EditScript) String() string {
|
||||
b := make([]byte, len(es))
|
||||
for i, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
b[i] = '.'
|
||||
case UniqueX:
|
||||
b[i] = 'X'
|
||||
case UniqueY:
|
||||
b[i] = 'Y'
|
||||
case Modified:
|
||||
b[i] = 'M'
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// stats returns a histogram of the number of each type of edit operation.
|
||||
func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case Identity:
|
||||
s.NI++
|
||||
case UniqueX:
|
||||
s.NX++
|
||||
case UniqueY:
|
||||
s.NY++
|
||||
case Modified:
|
||||
s.NM++
|
||||
default:
|
||||
panic("invalid edit-type")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
|
||||
// lists X and Y are equal.
|
||||
func (es EditScript) Dist() int { return len(es) - es.stats().NI }
|
||||
|
||||
// LenX is the length of the X list.
|
||||
func (es EditScript) LenX() int { return len(es) - es.stats().NY }
|
||||
|
||||
// LenY is the length of the Y list.
|
||||
func (es EditScript) LenY() int { return len(es) - es.stats().NX }
|
||||
|
||||
// EqualFunc reports whether the symbols at indexes ix and iy are equal.
|
||||
// When called by Difference, the index is guaranteed to be within nx and ny.
|
||||
type EqualFunc func(ix int, iy int) Result
|
||||
|
||||
// Result is the result of comparison.
|
||||
// NumSame is the number of sub-elements that are equal.
|
||||
// NumDiff is the number of sub-elements that are not equal.
|
||||
type Result struct{ NumSame, NumDiff int }
|
||||
|
||||
// BoolResult returns a Result that is either Equal or not Equal.
|
||||
func BoolResult(b bool) Result {
|
||||
if b {
|
||||
return Result{NumSame: 1} // Equal, Similar
|
||||
} else {
|
||||
return Result{NumDiff: 2} // Not Equal, not Similar
|
||||
}
|
||||
}
|
||||
|
||||
// Equal indicates whether the symbols are equal. Two symbols are equal
|
||||
// if and only if NumDiff == 0. If Equal, then they are also Similar.
|
||||
func (r Result) Equal() bool { return r.NumDiff == 0 }
|
||||
|
||||
// Similar indicates whether two symbols are similar and may be represented
|
||||
// by using the Modified type. As a special case, we consider binary comparisons
|
||||
// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
|
||||
//
|
||||
// The exact ratio of NumSame to NumDiff to determine similarity may change.
|
||||
func (r Result) Similar() bool {
|
||||
// Use NumSame+1 to offset NumSame so that binary comparisons are similar.
|
||||
return r.NumSame+1 >= r.NumDiff
|
||||
}
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
// Difference reports whether two lists of lengths nx and ny are equal
|
||||
// given the definition of equality provided as f.
|
||||
//
|
||||
// This function returns an edit-script, which is a sequence of operations
|
||||
// needed to convert one list into the other. The following invariants for
|
||||
// the edit-script are maintained:
|
||||
// - eq == (es.Dist()==0)
|
||||
// - nx == es.LenX()
|
||||
// - ny == es.LenY()
|
||||
//
|
||||
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
||||
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
||||
// favors performance over optimality. The exact output is not guaranteed to
|
||||
// be stable and may change over time.
|
||||
func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// This algorithm is based on traversing what is known as an "edit-graph".
|
||||
// See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
|
||||
// by Eugene W. Myers. Since D can be as large as N itself, this is
|
||||
// effectively O(N^2). Unlike the algorithm from that paper, we are not
|
||||
// interested in the optimal path, but at least some "decent" path.
|
||||
//
|
||||
// For example, let X and Y be lists of symbols:
|
||||
// X = [A B C A B B A]
|
||||
// Y = [C B A B A C]
|
||||
//
|
||||
// The edit-graph can be drawn as the following:
|
||||
// A B C A B B A
|
||||
// ┌─────────────┐
|
||||
// C │_|_|\|_|_|_|_│ 0
|
||||
// B │_|\|_|_|\|\|_│ 1
|
||||
// A │\|_|_|\|_|_|\│ 2
|
||||
// B │_|\|_|_|\|\|_│ 3
|
||||
// A │\|_|_|\|_|_|\│ 4
|
||||
// C │ | |\| | | | │ 5
|
||||
// └─────────────┘ 6
|
||||
// 0 1 2 3 4 5 6 7
|
||||
//
|
||||
// List X is written along the horizontal axis, while list Y is written
|
||||
// along the vertical axis. At any point on this grid, if the symbol in
|
||||
// list X matches the corresponding symbol in list Y, then a '\' is drawn.
|
||||
// The goal of any minimal edit-script algorithm is to find a path from the
|
||||
// top-left corner to the bottom-right corner, while traveling through the
|
||||
// fewest horizontal or vertical edges.
|
||||
// A horizontal edge is equivalent to inserting a symbol from list X.
|
||||
// A vertical edge is equivalent to inserting a symbol from list Y.
|
||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||
|
||||
// Invariants:
|
||||
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
//
|
||||
// In general:
|
||||
// - fwdFrontier.X < revFrontier.X
|
||||
// - fwdFrontier.Y < revFrontier.Y
|
||||
//
|
||||
// Unless, it is time for the algorithm to terminate.
|
||||
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
||||
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
||||
fwdFrontier := fwdPath.point // Forward search frontier
|
||||
revFrontier := revPath.point // Reverse search frontier
|
||||
|
||||
// Search budget bounds the cost of searching for better paths.
|
||||
// The longest sequence of non-matching symbols that can be tolerated is
|
||||
// approximately the square-root of the search budget.
|
||||
searchBudget := 4 * (nx + ny) // O(n)
|
||||
|
||||
// Running the tests with the "cmp_debug" build tag prints a visualization
|
||||
// of the algorithm running in real-time. This is educational for
|
||||
// understanding how the algorithm works. See debug_enable.go.
|
||||
f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
|
||||
|
||||
// The algorithm below is a greedy, meet-in-the-middle algorithm for
|
||||
// computing sub-optimal edit-scripts between two lists.
|
||||
//
|
||||
// The algorithm is approximately as follows:
|
||||
// - Searching for differences switches back-and-forth between
|
||||
// a search that starts at the beginning (the top-left corner), and
|
||||
// a search that starts at the end (the bottom-right corner).
|
||||
// The goal of the search is connect with the search
|
||||
// from the opposite corner.
|
||||
// - As we search, we build a path in a greedy manner,
|
||||
// where the first match seen is added to the path (this is sub-optimal,
|
||||
// but provides a decent result in practice). When matches are found,
|
||||
// we try the next pair of symbols in the lists and follow all matches
|
||||
// as far as possible.
|
||||
// - When searching for matches, we search along a diagonal going through
|
||||
// through the "frontier" point. If no matches are found,
|
||||
// we advance the frontier towards the opposite corner.
|
||||
// - This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
|
||||
// This algorithm is correct even if searching only in the forward direction
|
||||
// or in the reverse direction. We do both because it is commonly observed
|
||||
// that two lists commonly differ because elements were added to the front
|
||||
// or end of the other list.
|
||||
//
|
||||
// Non-deterministically start with either the forward or reverse direction
|
||||
// to introduce some deliberate instability so that we have the flexibility
|
||||
// to change this algorithm in the future.
|
||||
if flags.Deterministic || randBool {
|
||||
goto forwardSearch
|
||||
} else {
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
forwardSearch:
|
||||
{
|
||||
// Forward search from the beginning.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
|
||||
switch {
|
||||
case p.X >= revPath.X || p.Y < fwdPath.Y:
|
||||
stop1 = true // Hit top-right corner
|
||||
case p.Y >= revPath.Y || p.X < fwdPath.X:
|
||||
stop2 = true // Hit bottom-left corner
|
||||
case f(p.X, p.Y).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
fwdPath.connect(p, f)
|
||||
fwdPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(fwdPath.X, fwdPath.Y).Equal() {
|
||||
break
|
||||
}
|
||||
fwdPath.append(Identity)
|
||||
}
|
||||
fwdFrontier = fwdPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards reverse point.
|
||||
if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
|
||||
fwdFrontier.X++
|
||||
} else {
|
||||
fwdFrontier.Y++
|
||||
}
|
||||
goto reverseSearch
|
||||
}
|
||||
|
||||
reverseSearch:
|
||||
{
|
||||
// Reverse search from the end.
|
||||
if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
|
||||
goto finishSearch
|
||||
}
|
||||
for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
|
||||
// Search in a diagonal pattern for a match.
|
||||
z := zigzag(i)
|
||||
p := point{revFrontier.X - z, revFrontier.Y + z}
|
||||
switch {
|
||||
case fwdPath.X >= p.X || revPath.Y < p.Y:
|
||||
stop1 = true // Hit bottom-left corner
|
||||
case fwdPath.Y >= p.Y || revPath.X < p.X:
|
||||
stop2 = true // Hit top-right corner
|
||||
case f(p.X-1, p.Y-1).Equal():
|
||||
// Match found, so connect the path to this point.
|
||||
revPath.connect(p, f)
|
||||
revPath.append(Identity)
|
||||
// Follow sequence of matches as far as possible.
|
||||
for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
|
||||
if !f(revPath.X-1, revPath.Y-1).Equal() {
|
||||
break
|
||||
}
|
||||
revPath.append(Identity)
|
||||
}
|
||||
revFrontier = revPath.point
|
||||
stop1, stop2 = true, true
|
||||
default:
|
||||
searchBudget-- // Match not found
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
// Advance the frontier towards forward point.
|
||||
if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
|
||||
revFrontier.X--
|
||||
} else {
|
||||
revFrontier.Y--
|
||||
}
|
||||
goto forwardSearch
|
||||
}
|
||||
|
||||
finishSearch:
|
||||
// Join the forward and reverse paths and then append the reverse path.
|
||||
fwdPath.connect(revPath.point, f)
|
||||
for i := len(revPath.es) - 1; i >= 0; i-- {
|
||||
t := revPath.es[i]
|
||||
revPath.es = revPath.es[:i]
|
||||
fwdPath.append(t)
|
||||
}
|
||||
debug.Finish()
|
||||
return fwdPath.es
|
||||
}
|
||||
|
||||
type path struct {
|
||||
dir int // +1 if forward, -1 if reverse
|
||||
point // Leading point of the EditScript path
|
||||
es EditScript
|
||||
}
|
||||
|
||||
// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
|
||||
// to the edit-script to connect p.point to dst.
|
||||
func (p *path) connect(dst point, f EqualFunc) {
|
||||
if p.dir > 0 {
|
||||
// Connect in forward direction.
|
||||
for dst.X > p.X && dst.Y > p.Y {
|
||||
switch r := f(p.X, p.Y); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case dst.X-p.X >= dst.Y-p.Y:
|
||||
p.append(UniqueX)
|
||||
default:
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
for dst.X > p.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for dst.Y > p.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
} else {
|
||||
// Connect in reverse direction.
|
||||
for p.X > dst.X && p.Y > dst.Y {
|
||||
switch r := f(p.X-1, p.Y-1); {
|
||||
case r.Equal():
|
||||
p.append(Identity)
|
||||
case r.Similar():
|
||||
p.append(Modified)
|
||||
case p.Y-dst.Y >= p.X-dst.X:
|
||||
p.append(UniqueY)
|
||||
default:
|
||||
p.append(UniqueX)
|
||||
}
|
||||
}
|
||||
for p.X > dst.X {
|
||||
p.append(UniqueX)
|
||||
}
|
||||
for p.Y > dst.Y {
|
||||
p.append(UniqueY)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *path) append(t EditType) {
|
||||
p.es = append(p.es, t)
|
||||
switch t {
|
||||
case Identity, Modified:
|
||||
p.add(p.dir, p.dir)
|
||||
case UniqueX:
|
||||
p.add(p.dir, 0)
|
||||
case UniqueY:
|
||||
p.add(0, p.dir)
|
||||
}
|
||||
debug.Update()
|
||||
}
|
||||
|
||||
type point struct{ X, Y int }
|
||||
|
||||
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
||||
|
||||
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
||||
//
|
||||
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
||||
func zigzag(x int) int {
|
||||
if x&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x >> 1
|
||||
}
|
||||
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
9
vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package flags
|
||||
|
||||
// Deterministic controls whether the output of Diff should be deterministic.
|
||||
// This is only used for testing.
|
||||
var Deterministic bool
|
||||
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
99
vendor/github.com/google/go-cmp/cmp/internal/function/func.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package function provides functionality for identifying function types.
|
||||
package function
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type funcType int
|
||||
|
||||
const (
|
||||
_ funcType = iota
|
||||
|
||||
tbFunc // func(T) bool
|
||||
ttbFunc // func(T, T) bool
|
||||
trbFunc // func(T, R) bool
|
||||
tibFunc // func(T, I) bool
|
||||
trFunc // func(T) R
|
||||
|
||||
Equal = ttbFunc // func(T, T) bool
|
||||
EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
|
||||
Transformer = trFunc // func(T) R
|
||||
ValueFilter = ttbFunc // func(T, T) bool
|
||||
Less = ttbFunc // func(T, T) bool
|
||||
ValuePredicate = tbFunc // func(T) bool
|
||||
KeyValuePredicate = trbFunc // func(T, R) bool
|
||||
)
|
||||
|
||||
var boolType = reflect.TypeOf(true)
|
||||
|
||||
// IsType reports whether the reflect.Type is of the specified function type.
|
||||
func IsType(t reflect.Type, ft funcType) bool {
|
||||
if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
|
||||
return false
|
||||
}
|
||||
ni, no := t.NumIn(), t.NumOut()
|
||||
switch ft {
|
||||
case tbFunc: // func(T) bool
|
||||
if ni == 1 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case ttbFunc: // func(T, T) bool
|
||||
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trbFunc: // func(T, R) bool
|
||||
if ni == 2 && no == 1 && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case tibFunc: // func(T, I) bool
|
||||
if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
|
||||
return true
|
||||
}
|
||||
case trFunc: // func(T) R
|
||||
if ni == 1 && no == 1 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
|
||||
|
||||
// NameOf returns the name of the function value.
|
||||
func NameOf(v reflect.Value) string {
|
||||
fnc := runtime.FuncForPC(v.Pointer())
|
||||
if fnc == nil {
|
||||
return "<unknown>"
|
||||
}
|
||||
fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
|
||||
|
||||
// Method closures have a "-fm" suffix.
|
||||
fullName = strings.TrimSuffix(fullName, "-fm")
|
||||
|
||||
var name string
|
||||
for len(fullName) > 0 {
|
||||
inParen := strings.HasSuffix(fullName, ")")
|
||||
fullName = strings.TrimSuffix(fullName, ")")
|
||||
|
||||
s := lastIdentRx.FindString(fullName)
|
||||
if s == "" {
|
||||
break
|
||||
}
|
||||
name = s + "." + name
|
||||
fullName = strings.TrimSuffix(fullName, s)
|
||||
|
||||
if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
|
||||
fullName = fullName[:i]
|
||||
}
|
||||
fullName = strings.TrimSuffix(fullName, ".")
|
||||
}
|
||||
return strings.TrimSuffix(name, ".")
|
||||
}
|
||||
164
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
164
vendor/github.com/google/go-cmp/cmp/internal/value/name.go
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
|
||||
// TypeString is nearly identical to reflect.Type.String,
|
||||
// but has an additional option to specify that full type names be used.
|
||||
func TypeString(t reflect.Type, qualified bool) string {
|
||||
return string(appendTypeName(nil, t, qualified, false))
|
||||
}
|
||||
|
||||
func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
|
||||
// BUG: Go reflection provides no way to disambiguate two named types
|
||||
// of the same name and within the same package,
|
||||
// but declared within the namespace of different functions.
|
||||
|
||||
// Use the "any" alias instead of "interface{}" for better readability.
|
||||
if t == anyType {
|
||||
return append(b, "any"...)
|
||||
}
|
||||
|
||||
// Named type.
|
||||
if t.Name() != "" {
|
||||
if qualified && t.PkgPath() != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, t.PkgPath()...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
b = append(b, t.Name()...)
|
||||
} else {
|
||||
b = append(b, t.String()...)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Unnamed type.
|
||||
switch k := t.Kind(); k {
|
||||
case reflect.Bool, reflect.String, reflect.UnsafePointer,
|
||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
b = append(b, k.String()...)
|
||||
case reflect.Chan:
|
||||
if t.ChanDir() == reflect.RecvDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, "chan"...)
|
||||
if t.ChanDir() == reflect.SendDir {
|
||||
b = append(b, "<-"...)
|
||||
}
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Func:
|
||||
if !elideFunc {
|
||||
b = append(b, "func"...)
|
||||
}
|
||||
b = append(b, '(')
|
||||
for i := 0; i < t.NumIn(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
if i == t.NumIn()-1 && t.IsVariadic() {
|
||||
b = append(b, "..."...)
|
||||
b = appendTypeName(b, t.In(i).Elem(), qualified, false)
|
||||
} else {
|
||||
b = appendTypeName(b, t.In(i), qualified, false)
|
||||
}
|
||||
}
|
||||
b = append(b, ')')
|
||||
switch t.NumOut() {
|
||||
case 0:
|
||||
// Do nothing
|
||||
case 1:
|
||||
b = append(b, ' ')
|
||||
b = appendTypeName(b, t.Out(0), qualified, false)
|
||||
default:
|
||||
b = append(b, " ("...)
|
||||
for i := 0; i < t.NumOut(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
b = appendTypeName(b, t.Out(i), qualified, false)
|
||||
}
|
||||
b = append(b, ')')
|
||||
}
|
||||
case reflect.Struct:
|
||||
b = append(b, "struct{ "...)
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if !sf.Anonymous {
|
||||
if qualified && sf.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, sf.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, sf.Name...)
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = appendTypeName(b, sf.Type, qualified, false)
|
||||
if sf.Tag != "" {
|
||||
b = append(b, ' ')
|
||||
b = strconv.AppendQuote(b, string(sf.Tag))
|
||||
}
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
case reflect.Slice, reflect.Array:
|
||||
b = append(b, '[')
|
||||
if k == reflect.Array {
|
||||
b = strconv.AppendUint(b, uint64(t.Len()), 10)
|
||||
}
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Map:
|
||||
b = append(b, "map["...)
|
||||
b = appendTypeName(b, t.Key(), qualified, false)
|
||||
b = append(b, ']')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Ptr:
|
||||
b = append(b, '*')
|
||||
b = appendTypeName(b, t.Elem(), qualified, false)
|
||||
case reflect.Interface:
|
||||
b = append(b, "interface{ "...)
|
||||
for i := 0; i < t.NumMethod(); i++ {
|
||||
if i > 0 {
|
||||
b = append(b, "; "...)
|
||||
}
|
||||
m := t.Method(i)
|
||||
if qualified && m.PkgPath != "" {
|
||||
b = append(b, '"')
|
||||
b = append(b, m.PkgPath...)
|
||||
b = append(b, '"')
|
||||
b = append(b, '.')
|
||||
}
|
||||
b = append(b, m.Name...)
|
||||
b = appendTypeName(b, m.Type, qualified, true)
|
||||
}
|
||||
if b[len(b)-1] == ' ' {
|
||||
b = b[:len(b)-1]
|
||||
} else {
|
||||
b = append(b, ' ')
|
||||
}
|
||||
b = append(b, '}')
|
||||
default:
|
||||
panic("invalid kind: " + k.String())
|
||||
}
|
||||
return b
|
||||
}
|
||||
34
vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
generated
vendored
Normal file
34
vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
// Copyright 2018, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Pointer is an opaque typed pointer and is guaranteed to be comparable.
|
||||
type Pointer struct {
|
||||
p unsafe.Pointer
|
||||
t reflect.Type
|
||||
}
|
||||
|
||||
// PointerOf returns a Pointer from v, which must be a
|
||||
// reflect.Ptr, reflect.Slice, or reflect.Map.
|
||||
func PointerOf(v reflect.Value) Pointer {
|
||||
// The proper representation of a pointer is unsafe.Pointer,
|
||||
// which is necessary if the GC ever uses a moving collector.
|
||||
return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
|
||||
}
|
||||
|
||||
// IsNil reports whether the pointer is nil.
|
||||
func (p Pointer) IsNil() bool {
|
||||
return p.p == nil
|
||||
}
|
||||
|
||||
// Uintptr returns the pointer as a uintptr.
|
||||
func (p Pointer) Uintptr() uintptr {
|
||||
return uintptr(p.p)
|
||||
}
|
||||
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
106
vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// SortKeys sorts a list of map keys, deduplicating keys if necessary.
|
||||
// The type of each value must be comparable.
|
||||
func SortKeys(vs []reflect.Value) []reflect.Value {
|
||||
if len(vs) == 0 {
|
||||
return vs
|
||||
}
|
||||
|
||||
// Sort the map keys.
|
||||
sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
|
||||
|
||||
// Deduplicate keys (fails for NaNs).
|
||||
vs2 := vs[:1]
|
||||
for _, v := range vs[1:] {
|
||||
if isLess(vs2[len(vs2)-1], v) {
|
||||
vs2 = append(vs2, v)
|
||||
}
|
||||
}
|
||||
return vs2
|
||||
}
|
||||
|
||||
// isLess is a generic function for sorting arbitrary map keys.
|
||||
// The inputs must be of the same type and must be comparable.
|
||||
func isLess(x, y reflect.Value) bool {
|
||||
switch x.Type().Kind() {
|
||||
case reflect.Bool:
|
||||
return !x.Bool() && y.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return x.Int() < y.Int()
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return x.Uint() < y.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
// NOTE: This does not sort -0 as less than +0
|
||||
// since Go maps treat -0 and +0 as equal keys.
|
||||
fx, fy := x.Float(), y.Float()
|
||||
return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
cx, cy := x.Complex(), y.Complex()
|
||||
rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
|
||||
if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
|
||||
return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
|
||||
}
|
||||
return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
|
||||
case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
|
||||
return x.Pointer() < y.Pointer()
|
||||
case reflect.String:
|
||||
return x.String() < y.String()
|
||||
case reflect.Array:
|
||||
for i := 0; i < x.Len(); i++ {
|
||||
if isLess(x.Index(i), y.Index(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Index(i), x.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Struct:
|
||||
for i := 0; i < x.NumField(); i++ {
|
||||
if isLess(x.Field(i), y.Field(i)) {
|
||||
return true
|
||||
}
|
||||
if isLess(y.Field(i), x.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return false
|
||||
case reflect.Interface:
|
||||
vx, vy := x.Elem(), y.Elem()
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return !vx.IsValid() && vy.IsValid()
|
||||
}
|
||||
tx, ty := vx.Type(), vy.Type()
|
||||
if tx == ty {
|
||||
return isLess(x.Elem(), y.Elem())
|
||||
}
|
||||
if tx.Kind() != ty.Kind() {
|
||||
return vx.Kind() < vy.Kind()
|
||||
}
|
||||
if tx.String() != ty.String() {
|
||||
return tx.String() < ty.String()
|
||||
}
|
||||
if tx.PkgPath() != ty.PkgPath() {
|
||||
return tx.PkgPath() < ty.PkgPath()
|
||||
}
|
||||
// This can happen in rare situations, so we fallback to just comparing
|
||||
// the unique pointer for a reflect.Type. This guarantees deterministic
|
||||
// ordering within a program, but it is obviously not stable.
|
||||
return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
|
||||
default:
|
||||
// Must be Func, Map, or Slice; which are not comparable.
|
||||
panic(fmt.Sprintf("%T is not comparable", x.Type()))
|
||||
}
|
||||
}
|
||||
554
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
554
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
Normal file
@@ -0,0 +1,554 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/function"
|
||||
)
|
||||
|
||||
// Option configures for specific behavior of [Equal] and [Diff]. In particular,
|
||||
// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
|
||||
// configure how equality is determined.
|
||||
//
|
||||
// The fundamental options may be composed with filters ([FilterPath] and
|
||||
// [FilterValues]) to control the scope over which they are applied.
|
||||
//
|
||||
// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
|
||||
// for creating options that may be used with [Equal] and [Diff].
|
||||
type Option interface {
|
||||
// filter applies all filters and returns the option that remains.
|
||||
// Each option may only read s.curPath and call s.callTTBFunc.
|
||||
//
|
||||
// An Options is returned only if multiple comparers or transformers
|
||||
// can apply simultaneously and will only contain values of those types
|
||||
// or sub-Options containing values of those types.
|
||||
filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
|
||||
}
|
||||
|
||||
// applicableOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Grouping: Options
|
||||
type applicableOption interface {
|
||||
Option
|
||||
|
||||
// apply executes the option, which may mutate s or panic.
|
||||
apply(s *state, vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
// coreOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Filters: *pathFilter | *valuesFilter
|
||||
type coreOption interface {
|
||||
Option
|
||||
isCore()
|
||||
}
|
||||
|
||||
type core struct{}
|
||||
|
||||
func (core) isCore() {}
|
||||
|
||||
// Options is a list of [Option] values that also satisfies the [Option] interface.
|
||||
// Helper comparison packages may return an Options value when packing multiple
|
||||
// [Option] values into a single [Option]. When this package processes an Options,
|
||||
// it will be implicitly expanded into a flat list.
|
||||
//
|
||||
// Applying a filter on an Options is equivalent to applying that same filter
|
||||
// on all individual options held within.
|
||||
type Options []Option
|
||||
|
||||
func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
|
||||
for _, opt := range opts {
|
||||
switch opt := opt.filter(s, t, vx, vy); opt.(type) {
|
||||
case ignore:
|
||||
return ignore{} // Only ignore can short-circuit evaluation
|
||||
case validator:
|
||||
out = validator{} // Takes precedence over comparer or transformer
|
||||
case *comparer, *transformer, Options:
|
||||
switch out.(type) {
|
||||
case nil:
|
||||
out = opt
|
||||
case validator:
|
||||
// Keep validator
|
||||
case *comparer, *transformer, Options:
|
||||
out = Options{out, opt} // Conflicting comparers or transformers
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (opts Options) apply(s *state, _, _ reflect.Value) {
|
||||
const warning = "ambiguous set of applicable options"
|
||||
const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
|
||||
var ss []string
|
||||
for _, opt := range flattenOptions(nil, opts) {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
set := strings.Join(ss, "\n\t")
|
||||
panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
|
||||
}
|
||||
|
||||
func (opts Options) String() string {
|
||||
var ss []string
|
||||
for _, opt := range opts {
|
||||
ss = append(ss, fmt.Sprint(opt))
|
||||
}
|
||||
return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
|
||||
}
|
||||
|
||||
// FilterPath returns a new [Option] where opt is only evaluated if filter f
|
||||
// returns true for the current [Path] in the value tree.
|
||||
//
|
||||
// This filter is called even if a slice element or map entry is missing and
|
||||
// provides an opportunity to ignore such cases. The filter function must be
|
||||
// symmetric such that the filter result is identical regardless of whether the
|
||||
// missing value is from x or y.
|
||||
//
|
||||
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
|
||||
// a previously filtered [Option].
|
||||
func FilterPath(f func(Path) bool, opt Option) Option {
|
||||
if f == nil {
|
||||
panic("invalid path filter function")
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
return &pathFilter{fnc: f, opt: opt}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pathFilter struct {
|
||||
core
|
||||
fnc func(Path) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if f.fnc(s.curPath) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f pathFilter) String() string {
|
||||
return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
|
||||
}
|
||||
|
||||
// FilterValues returns a new [Option] where opt is only evaluated if filter f,
|
||||
// which is a function of the form "func(T, T) bool", returns true for the
|
||||
// current pair of values being compared. If either value is invalid or
|
||||
// the type of the values is not assignable to T, then this filter implicitly
|
||||
// returns false.
|
||||
//
|
||||
// The filter function must be
|
||||
// symmetric (i.e., agnostic to the order of the inputs) and
|
||||
// deterministic (i.e., produces the same result when given the same inputs).
|
||||
// If T is an interface, it is possible that f is called with two values with
|
||||
// different concrete types that both implement T.
|
||||
//
|
||||
// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
|
||||
// a previously filtered [Option].
|
||||
func FilterValues(f interface{}, opt Option) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid values filter function: %T", f))
|
||||
}
|
||||
if opt := normalizeOption(opt); opt != nil {
|
||||
vf := &valuesFilter{fnc: v, opt: opt}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
vf.typ = ti
|
||||
}
|
||||
return vf
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type valuesFilter struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
opt Option
|
||||
}
|
||||
|
||||
func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
|
||||
return nil
|
||||
}
|
||||
if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
|
||||
return f.opt.filter(s, t, vx, vy)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f valuesFilter) String() string {
|
||||
return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
|
||||
}
|
||||
|
||||
// Ignore is an [Option] that causes all comparisons to be ignored.
|
||||
// This value is intended to be combined with [FilterPath] or [FilterValues].
|
||||
// It is an error to pass an unfiltered Ignore option to [Equal].
|
||||
func Ignore() Option { return ignore{} }
|
||||
|
||||
type ignore struct{ core }
|
||||
|
||||
func (ignore) isFiltered() bool { return false }
|
||||
func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
|
||||
func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
|
||||
func (ignore) String() string { return "Ignore()" }
|
||||
|
||||
// validator is a sentinel Option type to indicate that some options could not
|
||||
// be evaluated due to unexported fields, missing slice elements, or
|
||||
// missing map entries. Both values are validator only for unexported fields.
|
||||
type validator struct{ core }
|
||||
|
||||
func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
return validator{}
|
||||
}
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
return validator{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (validator) apply(s *state, vx, vy reflect.Value) {
|
||||
// Implies missing slice element or map entry.
|
||||
if !vx.IsValid() || !vy.IsValid() {
|
||||
s.report(vx.IsValid() == vy.IsValid(), 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Unable to Interface implies unexported field without visibility access.
|
||||
if !vx.CanInterface() || !vy.CanInterface() {
|
||||
help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
|
||||
var name string
|
||||
if t := s.curPath.Index(-2).Type(); t.Name() != "" {
|
||||
// Named type with unexported fields.
|
||||
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
|
||||
if _, ok := reflect.New(t).Interface().(error); ok {
|
||||
help = "consider using cmpopts.EquateErrors to compare error values"
|
||||
} else if t.Comparable() {
|
||||
help = "consider using cmpopts.EquateComparable to compare comparable Go types"
|
||||
}
|
||||
} else {
|
||||
// Unnamed type with unexported fields. Derive PkgPath from field.
|
||||
var pkgPath string
|
||||
for i := 0; i < t.NumField() && pkgPath == ""; i++ {
|
||||
pkgPath = t.Field(i).PkgPath
|
||||
}
|
||||
name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
|
||||
}
|
||||
panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
|
||||
}
|
||||
|
||||
panic("not reachable")
|
||||
}
|
||||
|
||||
// identRx represents a valid identifier according to the Go specification.
|
||||
const identRx = `[_\p{L}][_\p{L}\p{N}]*`
|
||||
|
||||
var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
|
||||
|
||||
// Transformer returns an [Option] that applies a transformation function that
|
||||
// converts values of a certain type into that of another.
|
||||
//
|
||||
// The transformer f must be a function "func(T) R" that converts values of
|
||||
// type T to those of type R and is implicitly filtered to input values
|
||||
// assignable to T. The transformer must not mutate T in any way.
|
||||
//
|
||||
// To help prevent some cases of infinite recursive cycles applying the
|
||||
// same transform to the output of itself (e.g., in the case where the
|
||||
// input and output types are the same), an implicit filter is added such that
|
||||
// a transformer is applicable only if that exact transformer is not already
|
||||
// in the tail of the [Path] since the last non-[Transform] step.
|
||||
// For situations where the implicit filter is still insufficient,
|
||||
// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
|
||||
// which adds a filter to prevent the transformer from
|
||||
// being recursively applied upon itself.
|
||||
//
|
||||
// The name is a user provided label that is used as the [Transform.Name] in the
|
||||
// transformation [PathStep] (and eventually shown in the [Diff] output).
|
||||
// The name must be a valid identifier or qualified identifier in Go syntax.
|
||||
// If empty, an arbitrary name is used.
|
||||
func Transformer(name string, f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid transformer function: %T", f))
|
||||
}
|
||||
if name == "" {
|
||||
name = function.NameOf(v)
|
||||
if !identsRx.MatchString(name) {
|
||||
name = "λ" // Lambda-symbol as placeholder name
|
||||
}
|
||||
} else if !identsRx.MatchString(name) {
|
||||
panic(fmt.Sprintf("invalid name: %q", name))
|
||||
}
|
||||
tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
tr.typ = ti
|
||||
}
|
||||
return tr
|
||||
}
|
||||
|
||||
type transformer struct {
|
||||
core
|
||||
name string
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T) R
|
||||
}
|
||||
|
||||
func (tr *transformer) isFiltered() bool { return tr.typ != nil }
|
||||
|
||||
func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
for i := len(s.curPath) - 1; i >= 0; i-- {
|
||||
if t, ok := s.curPath[i].(Transform); !ok {
|
||||
break // Hit most recent non-Transform step
|
||||
} else if tr == t.trans {
|
||||
return nil // Cannot directly use same Transform
|
||||
}
|
||||
}
|
||||
if tr.typ == nil || t.AssignableTo(tr.typ) {
|
||||
return tr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
|
||||
step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
|
||||
vvx := s.callTRFunc(tr.fnc, vx, step)
|
||||
vvy := s.callTRFunc(tr.fnc, vy, step)
|
||||
step.vx, step.vy = vvx, vvy
|
||||
s.compareAny(step)
|
||||
}
|
||||
|
||||
func (tr transformer) String() string {
|
||||
return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
|
||||
}
|
||||
|
||||
// Comparer returns an [Option] that determines whether two values are equal
|
||||
// to each other.
|
||||
//
|
||||
// The comparer f must be a function "func(T, T) bool" and is implicitly
|
||||
// filtered to input values assignable to T. If T is an interface, it is
|
||||
// possible that f is called with two values of different concrete types that
|
||||
// both implement T.
|
||||
//
|
||||
// The equality function must be:
|
||||
// - Symmetric: equal(x, y) == equal(y, x)
|
||||
// - Deterministic: equal(x, y) == equal(x, y)
|
||||
// - Pure: equal(x, y) does not modify x or y
|
||||
func Comparer(f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
||||
panic(fmt.Sprintf("invalid comparer function: %T", f))
|
||||
}
|
||||
cm := &comparer{fnc: v}
|
||||
if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
|
||||
cm.typ = ti
|
||||
}
|
||||
return cm
|
||||
}
|
||||
|
||||
type comparer struct {
|
||||
core
|
||||
typ reflect.Type // T
|
||||
fnc reflect.Value // func(T, T) bool
|
||||
}
|
||||
|
||||
func (cm *comparer) isFiltered() bool { return cm.typ != nil }
|
||||
|
||||
func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
if cm.typ == nil || t.AssignableTo(cm.typ) {
|
||||
return cm
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
|
||||
eq := s.callTTBFunc(cm.fnc, vx, vy)
|
||||
s.report(eq, reportByFunc)
|
||||
}
|
||||
|
||||
func (cm comparer) String() string {
|
||||
return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
|
||||
}
|
||||
|
||||
// Exporter returns an [Option] that specifies whether [Equal] is allowed to
|
||||
// introspect into the unexported fields of certain struct types.
|
||||
//
|
||||
// Users of this option must understand that comparing on unexported fields
|
||||
// from external packages is not safe since changes in the internal
|
||||
// implementation of some external package may cause the result of [Equal]
|
||||
// to unexpectedly change. However, it may be valid to use this option on types
|
||||
// defined in an internal package where the semantic meaning of an unexported
|
||||
// field is in the control of the user.
|
||||
//
|
||||
// In many cases, a custom [Comparer] should be used instead that defines
|
||||
// equality as a function of the public API of a type rather than the underlying
|
||||
// unexported implementation.
|
||||
//
|
||||
// For example, the [reflect.Type] documentation defines equality to be determined
|
||||
// by the == operator on the interface (essentially performing a shallow pointer
|
||||
// comparison) and most attempts to compare *[regexp.Regexp] types are interested
|
||||
// in only checking that the regular expression strings are equal.
|
||||
// Both of these are accomplished using [Comparer] options:
|
||||
//
|
||||
// Comparer(func(x, y reflect.Type) bool { return x == y })
|
||||
// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
|
||||
//
|
||||
// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
|
||||
// option can be used to ignore all unexported fields on specified struct types.
|
||||
func Exporter(f func(reflect.Type) bool) Option {
|
||||
return exporter(f)
|
||||
}
|
||||
|
||||
type exporter func(reflect.Type) bool
|
||||
|
||||
func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
|
||||
// unexported fields of the specified struct types.
|
||||
//
|
||||
// See [Exporter] for the proper use of this option.
|
||||
func AllowUnexported(types ...interface{}) Option {
|
||||
m := make(map[reflect.Type]bool)
|
||||
for _, typ := range types {
|
||||
t := reflect.TypeOf(typ)
|
||||
if t.Kind() != reflect.Struct {
|
||||
panic(fmt.Sprintf("invalid struct type: %T", typ))
|
||||
}
|
||||
m[t] = true
|
||||
}
|
||||
return exporter(func(t reflect.Type) bool { return m[t] })
|
||||
}
|
||||
|
||||
// Result represents the comparison result for a single node and
|
||||
// is provided by cmp when calling Report (see [Reporter]).
|
||||
type Result struct {
|
||||
_ [0]func() // Make Result incomparable
|
||||
flags resultFlags
|
||||
}
|
||||
|
||||
// Equal reports whether the node was determined to be equal or not.
|
||||
// As a special case, ignored nodes are considered equal.
|
||||
func (r Result) Equal() bool {
|
||||
return r.flags&(reportEqual|reportByIgnore) != 0
|
||||
}
|
||||
|
||||
// ByIgnore reports whether the node is equal because it was ignored.
|
||||
// This never reports true if [Result.Equal] reports false.
|
||||
func (r Result) ByIgnore() bool {
|
||||
return r.flags&reportByIgnore != 0
|
||||
}
|
||||
|
||||
// ByMethod reports whether the Equal method determined equality.
|
||||
func (r Result) ByMethod() bool {
|
||||
return r.flags&reportByMethod != 0
|
||||
}
|
||||
|
||||
// ByFunc reports whether a [Comparer] function determined equality.
|
||||
func (r Result) ByFunc() bool {
|
||||
return r.flags&reportByFunc != 0
|
||||
}
|
||||
|
||||
// ByCycle reports whether a reference cycle was detected.
|
||||
func (r Result) ByCycle() bool {
|
||||
return r.flags&reportByCycle != 0
|
||||
}
|
||||
|
||||
type resultFlags uint
|
||||
|
||||
const (
|
||||
_ resultFlags = (1 << iota) / 2
|
||||
|
||||
reportEqual
|
||||
reportUnequal
|
||||
reportByIgnore
|
||||
reportByMethod
|
||||
reportByFunc
|
||||
reportByCycle
|
||||
)
|
||||
|
||||
// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
|
||||
// the value trees, it calls PushStep as it descends into each node in the
|
||||
// tree and PopStep as it ascend out of the node. The leaves of the tree are
|
||||
// either compared (determined to be equal or not equal) or ignored and reported
|
||||
// as such by calling the Report method.
|
||||
func Reporter(r interface {
|
||||
// PushStep is called when a tree-traversal operation is performed.
|
||||
// The PathStep itself is only valid until the step is popped.
|
||||
// The PathStep.Values are valid for the duration of the entire traversal
|
||||
// and must not be mutated.
|
||||
//
|
||||
// Equal always calls PushStep at the start to provide an operation-less
|
||||
// PathStep used to report the root values.
|
||||
//
|
||||
// Within a slice, the exact set of inserted, removed, or modified elements
|
||||
// is unspecified and may change in future implementations.
|
||||
// The entries of a map are iterated through in an unspecified order.
|
||||
PushStep(PathStep)
|
||||
|
||||
// Report is called exactly once on leaf nodes to report whether the
|
||||
// comparison identified the node as equal, unequal, or ignored.
|
||||
// A leaf node is one that is immediately preceded by and followed by
|
||||
// a pair of PushStep and PopStep calls.
|
||||
Report(Result)
|
||||
|
||||
// PopStep ascends back up the value tree.
|
||||
// There is always a matching pop call for every push call.
|
||||
PopStep()
|
||||
}) Option {
|
||||
return reporter{r}
|
||||
}
|
||||
|
||||
type reporter struct{ reporterIface }
|
||||
type reporterIface interface {
|
||||
PushStep(PathStep)
|
||||
Report(Result)
|
||||
PopStep()
|
||||
}
|
||||
|
||||
func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// normalizeOption normalizes the input options such that all Options groups
|
||||
// are flattened and groups with a single element are reduced to that element.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func normalizeOption(src Option) Option {
|
||||
switch opts := flattenOptions(nil, Options{src}); len(opts) {
|
||||
case 0:
|
||||
return nil
|
||||
case 1:
|
||||
return opts[0]
|
||||
default:
|
||||
return opts
|
||||
}
|
||||
}
|
||||
|
||||
// flattenOptions copies all options in src to dst as a flat list.
|
||||
// Only coreOptions and Options containing coreOptions are allowed.
|
||||
func flattenOptions(dst, src Options) Options {
|
||||
for _, opt := range src {
|
||||
switch opt := opt.(type) {
|
||||
case nil:
|
||||
continue
|
||||
case Options:
|
||||
dst = flattenOptions(dst, opt)
|
||||
case coreOption:
|
||||
dst = append(dst, opt)
|
||||
default:
|
||||
panic(fmt.Sprintf("invalid option type: %T", opt))
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
390
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
390
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
Normal file
@@ -0,0 +1,390 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// Path is a list of [PathStep] describing the sequence of operations to get
|
||||
// from some root type to the current position in the value tree.
|
||||
// The first Path element is always an operation-less [PathStep] that exists
|
||||
// simply to identify the initial type.
|
||||
//
|
||||
// When traversing structs with embedded structs, the embedded struct will
|
||||
// always be accessed as a field before traversing the fields of the
|
||||
// embedded struct themselves. That is, an exported field from the
|
||||
// embedded struct will never be accessed directly from the parent struct.
|
||||
type Path []PathStep
|
||||
|
||||
// PathStep is a union-type for specific operations to traverse
|
||||
// a value's tree structure. Users of this package never need to implement
|
||||
// these types as values of this type will be returned by this package.
|
||||
//
|
||||
// Implementations of this interface:
|
||||
// - [StructField]
|
||||
// - [SliceIndex]
|
||||
// - [MapIndex]
|
||||
// - [Indirect]
|
||||
// - [TypeAssertion]
|
||||
// - [Transform]
|
||||
type PathStep interface {
|
||||
String() string
|
||||
|
||||
// Type is the resulting type after performing the path step.
|
||||
Type() reflect.Type
|
||||
|
||||
// Values is the resulting values after performing the path step.
|
||||
// The type of each valid value is guaranteed to be identical to Type.
|
||||
//
|
||||
// In some cases, one or both may be invalid or have restrictions:
|
||||
// - For StructField, both are not interface-able if the current field
|
||||
// is unexported and the struct type is not explicitly permitted by
|
||||
// an Exporter to traverse unexported fields.
|
||||
// - For SliceIndex, one may be invalid if an element is missing from
|
||||
// either the x or y slice.
|
||||
// - For MapIndex, one may be invalid if an entry is missing from
|
||||
// either the x or y map.
|
||||
//
|
||||
// The provided values must not be mutated.
|
||||
Values() (vx, vy reflect.Value)
|
||||
}
|
||||
|
||||
var (
|
||||
_ PathStep = StructField{}
|
||||
_ PathStep = SliceIndex{}
|
||||
_ PathStep = MapIndex{}
|
||||
_ PathStep = Indirect{}
|
||||
_ PathStep = TypeAssertion{}
|
||||
_ PathStep = Transform{}
|
||||
)
|
||||
|
||||
func (pa *Path) push(s PathStep) {
|
||||
*pa = append(*pa, s)
|
||||
}
|
||||
|
||||
func (pa *Path) pop() {
|
||||
*pa = (*pa)[:len(*pa)-1]
|
||||
}
|
||||
|
||||
// Last returns the last [PathStep] in the Path.
|
||||
// If the path is empty, this returns a non-nil [PathStep]
|
||||
// that reports a nil [PathStep.Type].
|
||||
func (pa Path) Last() PathStep {
|
||||
return pa.Index(-1)
|
||||
}
|
||||
|
||||
// Index returns the ith step in the Path and supports negative indexing.
|
||||
// A negative index starts counting from the tail of the Path such that -1
|
||||
// refers to the last step, -2 refers to the second-to-last step, and so on.
|
||||
// If index is invalid, this returns a non-nil [PathStep]
|
||||
// that reports a nil [PathStep.Type].
|
||||
func (pa Path) Index(i int) PathStep {
|
||||
if i < 0 {
|
||||
i = len(pa) + i
|
||||
}
|
||||
if i < 0 || i >= len(pa) {
|
||||
return pathStep{}
|
||||
}
|
||||
return pa[i]
|
||||
}
|
||||
|
||||
// String returns the simplified path to a node.
|
||||
// The simplified path only contains struct field accesses.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// MyMap.MySlices.MyField
|
||||
func (pa Path) String() string {
|
||||
var ss []string
|
||||
for _, s := range pa {
|
||||
if _, ok := s.(StructField); ok {
|
||||
ss = append(ss, s.String())
|
||||
}
|
||||
}
|
||||
return strings.TrimPrefix(strings.Join(ss, ""), ".")
|
||||
}
|
||||
|
||||
// GoString returns the path to a specific node using Go syntax.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
||||
func (pa Path) GoString() string {
|
||||
var ssPre, ssPost []string
|
||||
var numIndirect int
|
||||
for i, s := range pa {
|
||||
var nextStep PathStep
|
||||
if i+1 < len(pa) {
|
||||
nextStep = pa[i+1]
|
||||
}
|
||||
switch s := s.(type) {
|
||||
case Indirect:
|
||||
numIndirect++
|
||||
pPre, pPost := "(", ")"
|
||||
switch nextStep.(type) {
|
||||
case Indirect:
|
||||
continue // Next step is indirection, so let them batch up
|
||||
case StructField:
|
||||
numIndirect-- // Automatic indirection on struct fields
|
||||
case nil:
|
||||
pPre, pPost = "", "" // Last step; no need for parenthesis
|
||||
}
|
||||
if numIndirect > 0 {
|
||||
ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
|
||||
ssPost = append(ssPost, pPost)
|
||||
}
|
||||
numIndirect = 0
|
||||
continue
|
||||
case Transform:
|
||||
ssPre = append(ssPre, s.trans.name+"(")
|
||||
ssPost = append(ssPost, ")")
|
||||
continue
|
||||
}
|
||||
ssPost = append(ssPost, s.String())
|
||||
}
|
||||
for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
|
||||
ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
|
||||
}
|
||||
return strings.Join(ssPre, "") + strings.Join(ssPost, "")
|
||||
}
|
||||
|
||||
type pathStep struct {
|
||||
typ reflect.Type
|
||||
vx, vy reflect.Value
|
||||
}
|
||||
|
||||
func (ps pathStep) Type() reflect.Type { return ps.typ }
|
||||
func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
|
||||
func (ps pathStep) String() string {
|
||||
if ps.typ == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
s := value.TypeString(ps.typ, false)
|
||||
if s == "" || strings.ContainsAny(s, "{}\n") {
|
||||
return "root" // Type too simple or complex to print
|
||||
}
|
||||
return fmt.Sprintf("{%s}", s)
|
||||
}
|
||||
|
||||
// StructField is a [PathStep] that represents a struct field access
|
||||
// on a field called [StructField.Name].
|
||||
type StructField struct{ *structField }
|
||||
type structField struct {
|
||||
pathStep
|
||||
name string
|
||||
idx int
|
||||
|
||||
// These fields are used for forcibly accessing an unexported field.
|
||||
// pvx, pvy, and field are only valid if unexported is true.
|
||||
unexported bool
|
||||
mayForce bool // Forcibly allow visibility
|
||||
paddr bool // Was parent addressable?
|
||||
pvx, pvy reflect.Value // Parent values (always addressable)
|
||||
field reflect.StructField // Field information
|
||||
}
|
||||
|
||||
func (sf StructField) Type() reflect.Type { return sf.typ }
|
||||
func (sf StructField) Values() (vx, vy reflect.Value) {
|
||||
if !sf.unexported {
|
||||
return sf.vx, sf.vy // CanInterface reports true
|
||||
}
|
||||
|
||||
// Forcibly obtain read-write access to an unexported struct field.
|
||||
if sf.mayForce {
|
||||
vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
|
||||
vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
|
||||
return vx, vy // CanInterface reports true
|
||||
}
|
||||
return sf.vx, sf.vy // CanInterface reports false
|
||||
}
|
||||
func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
|
||||
|
||||
// Name is the field name.
|
||||
func (sf StructField) Name() string { return sf.name }
|
||||
|
||||
// Index is the index of the field in the parent struct type.
|
||||
// See [reflect.Type.Field].
|
||||
func (sf StructField) Index() int { return sf.idx }
|
||||
|
||||
// SliceIndex is a [PathStep] that represents an index operation on
|
||||
// a slice or array at some index [SliceIndex.Key].
|
||||
type SliceIndex struct{ *sliceIndex }
|
||||
type sliceIndex struct {
|
||||
pathStep
|
||||
xkey, ykey int
|
||||
isSlice bool // False for reflect.Array
|
||||
}
|
||||
|
||||
func (si SliceIndex) Type() reflect.Type { return si.typ }
|
||||
func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
|
||||
func (si SliceIndex) String() string {
|
||||
switch {
|
||||
case si.xkey == si.ykey:
|
||||
return fmt.Sprintf("[%d]", si.xkey)
|
||||
case si.ykey == -1:
|
||||
// [5->?] means "I don't know where X[5] went"
|
||||
return fmt.Sprintf("[%d->?]", si.xkey)
|
||||
case si.xkey == -1:
|
||||
// [?->3] means "I don't know where Y[3] came from"
|
||||
return fmt.Sprintf("[?->%d]", si.ykey)
|
||||
default:
|
||||
// [5->3] means "X[5] moved to Y[3]"
|
||||
return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
|
||||
}
|
||||
}
|
||||
|
||||
// Key is the index key; it may return -1 if in a split state
|
||||
func (si SliceIndex) Key() int {
|
||||
if si.xkey != si.ykey {
|
||||
return -1
|
||||
}
|
||||
return si.xkey
|
||||
}
|
||||
|
||||
// SplitKeys are the indexes for indexing into slices in the
|
||||
// x and y values, respectively. These indexes may differ due to the
|
||||
// insertion or removal of an element in one of the slices, causing
|
||||
// all of the indexes to be shifted. If an index is -1, then that
|
||||
// indicates that the element does not exist in the associated slice.
|
||||
//
|
||||
// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
|
||||
// returned by SplitKeys are not the same. SplitKeys will never return -1 for
|
||||
// both indexes.
|
||||
func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
|
||||
|
||||
// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
|
||||
type MapIndex struct{ *mapIndex }
|
||||
type mapIndex struct {
|
||||
pathStep
|
||||
key reflect.Value
|
||||
}
|
||||
|
||||
func (mi MapIndex) Type() reflect.Type { return mi.typ }
|
||||
func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
|
||||
func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
|
||||
|
||||
// Key is the value of the map key.
|
||||
func (mi MapIndex) Key() reflect.Value { return mi.key }
|
||||
|
||||
// Indirect is a [PathStep] that represents pointer indirection on the parent type.
|
||||
type Indirect struct{ *indirect }
|
||||
type indirect struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (in Indirect) Type() reflect.Type { return in.typ }
|
||||
func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
|
||||
func (in Indirect) String() string { return "*" }
|
||||
|
||||
// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
|
||||
type TypeAssertion struct{ *typeAssertion }
|
||||
type typeAssertion struct {
|
||||
pathStep
|
||||
}
|
||||
|
||||
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
|
||||
|
||||
// Transform is a [PathStep] that represents a transformation
|
||||
// from the parent type to the current type.
|
||||
type Transform struct{ *transform }
|
||||
type transform struct {
|
||||
pathStep
|
||||
trans *transformer
|
||||
}
|
||||
|
||||
func (tf Transform) Type() reflect.Type { return tf.typ }
|
||||
func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
|
||||
func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
|
||||
|
||||
// Name is the name of the [Transformer].
|
||||
func (tf Transform) Name() string { return tf.trans.name }
|
||||
|
||||
// Func is the function pointer to the transformer function.
|
||||
func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
|
||||
|
||||
// Option returns the originally constructed [Transformer] option.
|
||||
// The == operator can be used to detect the exact option used.
|
||||
func (tf Transform) Option() Option { return tf.trans }
|
||||
|
||||
// pointerPath represents a dual-stack of pointers encountered when
|
||||
// recursively traversing the x and y values. This data structure supports
|
||||
// detection of cycles and determining whether the cycles are equal.
|
||||
// In Go, cycles can occur via pointers, slices, and maps.
|
||||
//
|
||||
// The pointerPath uses a map to represent a stack; where descension into a
|
||||
// pointer pushes the address onto the stack, and ascension from a pointer
|
||||
// pops the address from the stack. Thus, when traversing into a pointer from
|
||||
// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
|
||||
// by checking whether the pointer has already been visited. The cycle detection
|
||||
// uses a separate stack for the x and y values.
|
||||
//
|
||||
// If a cycle is detected we need to determine whether the two pointers
|
||||
// should be considered equal. The definition of equality chosen by Equal
|
||||
// requires two graphs to have the same structure. To determine this, both the
|
||||
// x and y values must have a cycle where the previous pointers were also
|
||||
// encountered together as a pair.
|
||||
//
|
||||
// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
|
||||
// MapIndex with pointer information for the x and y values.
|
||||
// Suppose px and py are two pointers to compare, we then search the
|
||||
// Path for whether px was ever encountered in the Path history of x, and
|
||||
// similarly so with py. If either side has a cycle, the comparison is only
|
||||
// equal if both px and py have a cycle resulting from the same PathStep.
|
||||
//
|
||||
// Using a map as a stack is more performant as we can perform cycle detection
|
||||
// in O(1) instead of O(N) where N is len(Path).
|
||||
type pointerPath struct {
|
||||
// mx is keyed by x pointers, where the value is the associated y pointer.
|
||||
mx map[value.Pointer]value.Pointer
|
||||
// my is keyed by y pointers, where the value is the associated x pointer.
|
||||
my map[value.Pointer]value.Pointer
|
||||
}
|
||||
|
||||
func (p *pointerPath) Init() {
|
||||
p.mx = make(map[value.Pointer]value.Pointer)
|
||||
p.my = make(map[value.Pointer]value.Pointer)
|
||||
}
|
||||
|
||||
// Push indicates intent to descend into pointers vx and vy where
|
||||
// visited reports whether either has been seen before. If visited before,
|
||||
// equal reports whether both pointers were encountered together.
|
||||
// Pop must be called if and only if the pointers were never visited.
|
||||
//
|
||||
// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
|
||||
// and be non-nil.
|
||||
func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
|
||||
px := value.PointerOf(vx)
|
||||
py := value.PointerOf(vy)
|
||||
_, ok1 := p.mx[px]
|
||||
_, ok2 := p.my[py]
|
||||
if ok1 || ok2 {
|
||||
equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
|
||||
return equal, true
|
||||
}
|
||||
p.mx[px] = py
|
||||
p.my[py] = px
|
||||
return false, false
|
||||
}
|
||||
|
||||
// Pop ascends from pointers vx and vy.
|
||||
func (p pointerPath) Pop(vx, vy reflect.Value) {
|
||||
delete(p.mx, value.PointerOf(vx))
|
||||
delete(p.my, value.PointerOf(vy))
|
||||
}
|
||||
|
||||
// isExported reports whether the identifier is exported.
|
||||
func isExported(id string) bool {
|
||||
r, _ := utf8.DecodeRuneInString(id)
|
||||
return unicode.IsUpper(r)
|
||||
}
|
||||
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
54
vendor/github.com/google/go-cmp/cmp/report.go
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
// defaultReporter implements the reporter interface.
|
||||
//
|
||||
// As Equal serially calls the PushStep, Report, and PopStep methods, the
|
||||
// defaultReporter constructs a tree-based representation of the compared value
|
||||
// and the result of each comparison (see valueNode).
|
||||
//
|
||||
// When the String method is called, the FormatDiff method transforms the
|
||||
// valueNode tree into a textNode tree, which is a tree-based representation
|
||||
// of the textual output (see textNode).
|
||||
//
|
||||
// Lastly, the textNode.String method produces the final report as a string.
|
||||
type defaultReporter struct {
|
||||
root *valueNode
|
||||
curr *valueNode
|
||||
}
|
||||
|
||||
func (r *defaultReporter) PushStep(ps PathStep) {
|
||||
r.curr = r.curr.PushStep(ps)
|
||||
if r.root == nil {
|
||||
r.root = r.curr
|
||||
}
|
||||
}
|
||||
func (r *defaultReporter) Report(rs Result) {
|
||||
r.curr.Report(rs)
|
||||
}
|
||||
func (r *defaultReporter) PopStep() {
|
||||
r.curr = r.curr.PopStep()
|
||||
}
|
||||
|
||||
// String provides a full report of the differences detected as a structured
|
||||
// literal in pseudo-Go syntax. String may only be called after the entire tree
|
||||
// has been traversed.
|
||||
func (r *defaultReporter) String() string {
|
||||
assert(r.root != nil && r.curr == nil)
|
||||
if r.root.NumDiff == 0 {
|
||||
return ""
|
||||
}
|
||||
ptrs := new(pointerReferences)
|
||||
text := formatOptions{}.FormatDiff(r.root, ptrs)
|
||||
resolveReferences(text)
|
||||
return text.String()
|
||||
}
|
||||
|
||||
func assert(ok bool) {
|
||||
if !ok {
|
||||
panic("assertion failure")
|
||||
}
|
||||
}
|
||||
433
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
433
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
Normal file
@@ -0,0 +1,433 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// numContextRecords is the number of surrounding equal records to print.
|
||||
const numContextRecords = 2
|
||||
|
||||
type diffMode byte
|
||||
|
||||
const (
|
||||
diffUnknown diffMode = 0
|
||||
diffIdentical diffMode = ' '
|
||||
diffRemoved diffMode = '-'
|
||||
diffInserted diffMode = '+'
|
||||
)
|
||||
|
||||
type typeMode int
|
||||
|
||||
const (
|
||||
// emitType always prints the type.
|
||||
emitType typeMode = iota
|
||||
// elideType never prints the type.
|
||||
elideType
|
||||
// autoType prints the type only for composite kinds
|
||||
// (i.e., structs, slices, arrays, and maps).
|
||||
autoType
|
||||
)
|
||||
|
||||
type formatOptions struct {
|
||||
// DiffMode controls the output mode of FormatDiff.
|
||||
//
|
||||
// If diffUnknown, then produce a diff of the x and y values.
|
||||
// If diffIdentical, then emit values as if they were equal.
|
||||
// If diffRemoved, then only emit x values (ignoring y values).
|
||||
// If diffInserted, then only emit y values (ignoring x values).
|
||||
DiffMode diffMode
|
||||
|
||||
// TypeMode controls whether to print the type for the current node.
|
||||
//
|
||||
// As a general rule of thumb, we always print the type of the next node
|
||||
// after an interface, and always elide the type of the next node after
|
||||
// a slice or map node.
|
||||
TypeMode typeMode
|
||||
|
||||
// formatValueOptions are options specific to printing reflect.Values.
|
||||
formatValueOptions
|
||||
}
|
||||
|
||||
func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
|
||||
opts.DiffMode = d
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
|
||||
opts.TypeMode = t
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) WithVerbosity(level int) formatOptions {
|
||||
opts.VerbosityLevel = level
|
||||
opts.LimitVerbosity = true
|
||||
return opts
|
||||
}
|
||||
func (opts formatOptions) verbosity() uint {
|
||||
switch {
|
||||
case opts.VerbosityLevel < 0:
|
||||
return 0
|
||||
case opts.VerbosityLevel > 16:
|
||||
return 16 // some reasonable maximum to avoid shift overflow
|
||||
default:
|
||||
return uint(opts.VerbosityLevel)
|
||||
}
|
||||
}
|
||||
|
||||
const maxVerbosityPreset = 6
|
||||
|
||||
// verbosityPreset modifies the verbosity settings given an index
|
||||
// between 0 and maxVerbosityPreset, inclusive.
|
||||
func verbosityPreset(opts formatOptions, i int) formatOptions {
|
||||
opts.VerbosityLevel = int(opts.verbosity()) + 2*i
|
||||
if i > 0 {
|
||||
opts.AvoidStringer = true
|
||||
}
|
||||
if i >= maxVerbosityPreset {
|
||||
opts.PrintAddresses = true
|
||||
opts.QualifiedNames = true
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
// FormatDiff converts a valueNode tree into a textNode tree, where the later
|
||||
// is a textual representation of the differences detected in the former.
|
||||
func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
opts = opts.WithVerbosity(1)
|
||||
} else if opts.verbosity() < 3 {
|
||||
opts = opts.WithVerbosity(3)
|
||||
}
|
||||
|
||||
// Check whether we have specialized formatting for this node.
|
||||
// This is not necessary, but helpful for producing more readable outputs.
|
||||
if opts.CanFormatDiffSlice(v) {
|
||||
return opts.FormatDiffSlice(v)
|
||||
}
|
||||
|
||||
var parentKind reflect.Kind
|
||||
if v.parent != nil && v.parent.TransformerName == "" {
|
||||
parentKind = v.parent.Type.Kind()
|
||||
}
|
||||
|
||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||
// As a special case, treat equal []byte as a leaf nodes.
|
||||
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
|
||||
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
|
||||
if v.MaxDepth == 0 || isEqualBytes {
|
||||
switch opts.DiffMode {
|
||||
case diffUnknown, diffIdentical:
|
||||
// Format Equal.
|
||||
if v.NumDiff == 0 {
|
||||
outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
if v.NumIgnored > 0 && v.NumSame == 0 {
|
||||
return textEllipsis
|
||||
} else if outx.Len() < outy.Len() {
|
||||
return outx
|
||||
} else {
|
||||
return outy
|
||||
}
|
||||
}
|
||||
|
||||
// Format unequal.
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
var list textList
|
||||
outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
|
||||
outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: '-', Value: outx})
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: '+', Value: outy})
|
||||
}
|
||||
return opts.WithTypeMode(emitType).FormatType(v.Type, list)
|
||||
case diffRemoved:
|
||||
return opts.FormatValue(v.ValueX, parentKind, ptrs)
|
||||
case diffInserted:
|
||||
return opts.FormatValue(v.ValueY, parentKind, ptrs)
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
}
|
||||
|
||||
// Register slice element to support cycle detection.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
|
||||
}
|
||||
|
||||
// Descend into the child value node.
|
||||
if v.TransformerName != "" {
|
||||
out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
|
||||
return opts.FormatType(v.Type, out)
|
||||
} else {
|
||||
switch k := v.Type.Kind(); k {
|
||||
case reflect.Struct, reflect.Array, reflect.Slice:
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Map:
|
||||
// Register map to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.formatDiffList(v.Records, k, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = opts.FormatType(v.Type, out)
|
||||
case reflect.Ptr:
|
||||
// Register pointer to support cycle detection.
|
||||
ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
|
||||
defer ptrs.Pop()
|
||||
|
||||
out = opts.FormatDiff(v.Value, ptrs)
|
||||
out = wrapTrunkReferences(ptrRefs, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
case reflect.Interface:
|
||||
out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v cannot have children", k))
|
||||
}
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
|
||||
// Derive record name based on the data structure kind.
|
||||
var name string
|
||||
var formatKey func(reflect.Value) string
|
||||
switch k {
|
||||
case reflect.Struct:
|
||||
name = "field"
|
||||
opts = opts.WithTypeMode(autoType)
|
||||
formatKey = func(v reflect.Value) string { return v.String() }
|
||||
case reflect.Slice, reflect.Array:
|
||||
name = "element"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(reflect.Value) string { return "" }
|
||||
case reflect.Map:
|
||||
name = "entry"
|
||||
opts = opts.WithTypeMode(elideType)
|
||||
formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
|
||||
}
|
||||
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
if opts.DiffMode == diffIdentical {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
} else {
|
||||
maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
// Handle unification.
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical, diffRemoved, diffInserted:
|
||||
var list textList
|
||||
var deferredEllipsis bool // Add final "..." to indicate records were dropped
|
||||
for _, r := range recs {
|
||||
if len(list) == maxLen {
|
||||
deferredEllipsis = true
|
||||
break
|
||||
}
|
||||
|
||||
// Elide struct fields that are zero value.
|
||||
if k == reflect.Struct {
|
||||
var isZero bool
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical:
|
||||
isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
|
||||
case diffRemoved:
|
||||
isZero = r.Value.ValueX.IsZero()
|
||||
case diffInserted:
|
||||
isZero = r.Value.ValueY.IsZero()
|
||||
}
|
||||
if isZero {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Elide ignored nodes.
|
||||
if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
|
||||
deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
|
||||
if !deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
continue
|
||||
}
|
||||
if out := opts.FormatDiff(r.Value, ptrs); out != nil {
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
}
|
||||
}
|
||||
if deferredEllipsis {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case diffUnknown:
|
||||
default:
|
||||
panic("invalid diff mode")
|
||||
}
|
||||
|
||||
// Handle differencing.
|
||||
var numDiffs int
|
||||
var list textList
|
||||
var keys []reflect.Value // invariant: len(list) == len(keys)
|
||||
groups := coalesceAdjacentRecords(name, recs)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle equal records.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing records to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numLo++
|
||||
}
|
||||
for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
|
||||
break
|
||||
}
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
|
||||
numHi++ // Avoid pointless coalescing of a single equal record
|
||||
}
|
||||
|
||||
// Format the equal values.
|
||||
for _, r := range recs[:numLo] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
for _, r := range recs[numEqual-numHi : numEqual] {
|
||||
out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
recs = recs[numEqual:]
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal records.
|
||||
for _, r := range recs[:ds.NumDiff()] {
|
||||
switch {
|
||||
case opts.CanFormatDiffSlice(r.Value):
|
||||
out := opts.FormatDiffSlice(r.Value)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
case r.Value.NumChildren == r.Value.MaxDepth:
|
||||
outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
|
||||
opts2 := verbosityPreset(opts, i)
|
||||
outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
|
||||
outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
|
||||
}
|
||||
if outx != nil {
|
||||
list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
if outy != nil {
|
||||
list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
default:
|
||||
out := opts.FormatDiff(r.Value, ptrs)
|
||||
list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
|
||||
keys = append(keys, r.Key)
|
||||
}
|
||||
}
|
||||
recs = recs[ds.NumDiff():]
|
||||
numDiffs += ds.NumDiff()
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(len(recs) == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
for len(keys) < len(list) {
|
||||
keys = append(keys, reflect.Value{})
|
||||
}
|
||||
}
|
||||
assert(len(list) == len(keys))
|
||||
|
||||
// For maps, the default formatting logic uses fmt.Stringer which may
|
||||
// produce ambiguous output. Avoid calling String to disambiguate.
|
||||
if k == reflect.Map {
|
||||
var ambiguous bool
|
||||
seenKeys := map[string]reflect.Value{}
|
||||
for i, currKey := range keys {
|
||||
if currKey.IsValid() {
|
||||
strKey := list[i].Key
|
||||
prevKey, seen := seenKeys[strKey]
|
||||
if seen && prevKey.CanInterface() && currKey.CanInterface() {
|
||||
ambiguous = prevKey.Interface() != currKey.Interface()
|
||||
if ambiguous {
|
||||
break
|
||||
}
|
||||
}
|
||||
seenKeys[strKey] = currKey
|
||||
}
|
||||
}
|
||||
if ambiguous {
|
||||
for i, k := range keys {
|
||||
if k.IsValid() {
|
||||
list[i].Key = formatMapKey(k, true, ptrs)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
}
|
||||
|
||||
// coalesceAdjacentRecords coalesces the list of records into groups of
|
||||
// adjacent equal, or unequal counts.
|
||||
func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
|
||||
var prevCase int // Arbitrary index into which case last occurred
|
||||
lastStats := func(i int) *diffStats {
|
||||
if prevCase != i {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevCase = i
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, r := range recs {
|
||||
switch rv := r.Value; {
|
||||
case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
|
||||
lastStats(1).NumIgnored++
|
||||
case rv.NumDiff == 0:
|
||||
lastStats(1).NumIdentical++
|
||||
case rv.NumDiff > 0 && !rv.ValueY.IsValid():
|
||||
lastStats(2).NumRemoved++
|
||||
case rv.NumDiff > 0 && !rv.ValueX.IsValid():
|
||||
lastStats(2).NumInserted++
|
||||
default:
|
||||
lastStats(2).NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
||||
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
264
vendor/github.com/google/go-cmp/cmp/report_references.go
generated
vendored
Normal file
@@ -0,0 +1,264 @@
|
||||
// Copyright 2020, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
const (
|
||||
pointerDelimPrefix = "⟪"
|
||||
pointerDelimSuffix = "⟫"
|
||||
)
|
||||
|
||||
// formatPointer prints the address of the pointer.
|
||||
func formatPointer(p value.Pointer, withDelims bool) string {
|
||||
v := p.Uintptr()
|
||||
if flags.Deterministic {
|
||||
v = 0xdeadf00f // Only used for stable testing purposes
|
||||
}
|
||||
if withDelims {
|
||||
return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
|
||||
}
|
||||
return formatHex(uint64(v))
|
||||
}
|
||||
|
||||
// pointerReferences is a stack of pointers visited so far.
|
||||
type pointerReferences [][2]value.Pointer
|
||||
|
||||
func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
|
||||
if deref && vx.IsValid() {
|
||||
vx = vx.Addr()
|
||||
}
|
||||
if deref && vy.IsValid() {
|
||||
vy = vy.Addr()
|
||||
}
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
|
||||
case diffRemoved:
|
||||
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
|
||||
case diffInserted:
|
||||
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
|
||||
}
|
||||
*ps = append(*ps, pp)
|
||||
return pp
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
|
||||
p = value.PointerOf(v)
|
||||
for _, pp := range *ps {
|
||||
if p == pp[0] || p == pp[1] {
|
||||
return p, true
|
||||
}
|
||||
}
|
||||
*ps = append(*ps, [2]value.Pointer{p, p})
|
||||
return p, false
|
||||
}
|
||||
|
||||
func (ps *pointerReferences) Pop() {
|
||||
*ps = (*ps)[:len(*ps)-1]
|
||||
}
|
||||
|
||||
// trunkReferences is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for either pointer in a pair of references.
|
||||
type trunkReferences struct{ pp [2]value.Pointer }
|
||||
|
||||
// trunkReference is metadata for a textNode indicating that the sub-tree
|
||||
// represents the value for the given pointer reference.
|
||||
type trunkReference struct{ p value.Pointer }
|
||||
|
||||
// leafReference is metadata for a textNode indicating that the value is
|
||||
// truncated as it refers to another part of the tree (i.e., a trunk).
|
||||
type leafReference struct{ p value.Pointer }
|
||||
|
||||
func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
|
||||
switch {
|
||||
case pp[0].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
|
||||
case pp[1].IsNil():
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
case pp[0] == pp[1]:
|
||||
return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
|
||||
default:
|
||||
return &textWrap{Value: s, Metadata: trunkReferences{pp}}
|
||||
}
|
||||
}
|
||||
func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
|
||||
}
|
||||
func makeLeafReference(p value.Pointer, printAddress bool) textNode {
|
||||
out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
|
||||
var prefix string
|
||||
if printAddress {
|
||||
prefix = formatPointer(p, true)
|
||||
}
|
||||
return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
|
||||
}
|
||||
|
||||
// resolveReferences walks the textNode tree searching for any leaf reference
|
||||
// metadata and resolves each against the corresponding trunk references.
|
||||
// Since pointer addresses in memory are not particularly readable to the user,
|
||||
// it replaces each pointer value with an arbitrary and unique reference ID.
|
||||
func resolveReferences(s textNode) {
|
||||
var walkNodes func(textNode, func(textNode))
|
||||
walkNodes = func(s textNode, f func(textNode)) {
|
||||
f(s)
|
||||
switch s := s.(type) {
|
||||
case *textWrap:
|
||||
walkNodes(s.Value, f)
|
||||
case textList:
|
||||
for _, r := range s {
|
||||
walkNodes(r.Value, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Collect all trunks and leaves with reference metadata.
|
||||
var trunks, leaves []*textWrap
|
||||
walkNodes(s, func(s textNode) {
|
||||
if s, ok := s.(*textWrap); ok {
|
||||
switch s.Metadata.(type) {
|
||||
case leafReference:
|
||||
leaves = append(leaves, s)
|
||||
case trunkReference, trunkReferences:
|
||||
trunks = append(trunks, s)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
// No leaf references to resolve.
|
||||
if len(leaves) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Collect the set of all leaf references to resolve.
|
||||
leafPtrs := make(map[value.Pointer]bool)
|
||||
for _, leaf := range leaves {
|
||||
leafPtrs[leaf.Metadata.(leafReference).p] = true
|
||||
}
|
||||
|
||||
// Collect the set of trunk pointers that are always paired together.
|
||||
// This allows us to assign a single ID to both pointers for brevity.
|
||||
// If a pointer in a pair ever occurs by itself or as a different pair,
|
||||
// then the pair is broken.
|
||||
pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
|
||||
unpair := func(p value.Pointer) {
|
||||
if !pairedTrunkPtrs[p].IsNil() {
|
||||
pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
|
||||
}
|
||||
pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
unpair(p.p) // standalone pointer cannot be part of a pair
|
||||
case trunkReferences:
|
||||
p0, ok0 := pairedTrunkPtrs[p.pp[0]]
|
||||
p1, ok1 := pairedTrunkPtrs[p.pp[1]]
|
||||
switch {
|
||||
case !ok0 && !ok1:
|
||||
// Register the newly seen pair.
|
||||
pairedTrunkPtrs[p.pp[0]] = p.pp[1]
|
||||
pairedTrunkPtrs[p.pp[1]] = p.pp[0]
|
||||
case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
|
||||
// Exact pair already seen; do nothing.
|
||||
default:
|
||||
// Pair conflicts with some other pair; break all pairs.
|
||||
unpair(p.pp[0])
|
||||
unpair(p.pp[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Correlate each pointer referenced by leaves to a unique identifier,
|
||||
// and print the IDs for each trunk that matches those pointers.
|
||||
var nextID uint
|
||||
ptrIDs := make(map[value.Pointer]uint)
|
||||
newID := func() uint {
|
||||
id := nextID
|
||||
nextID++
|
||||
return id
|
||||
}
|
||||
for _, trunk := range trunks {
|
||||
switch p := trunk.Metadata.(type) {
|
||||
case trunkReference:
|
||||
if print := leafPtrs[p.p]; print {
|
||||
id, ok := ptrIDs[p.p]
|
||||
if !ok {
|
||||
id = newID()
|
||||
ptrIDs[p.p] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
}
|
||||
case trunkReferences:
|
||||
print0 := leafPtrs[p.pp[0]]
|
||||
print1 := leafPtrs[p.pp[1]]
|
||||
if print0 || print1 {
|
||||
id0, ok0 := ptrIDs[p.pp[0]]
|
||||
id1, ok1 := ptrIDs[p.pp[1]]
|
||||
isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
|
||||
if isPair {
|
||||
var id uint
|
||||
assert(ok0 == ok1) // must be seen together or not at all
|
||||
if ok0 {
|
||||
assert(id0 == id1) // must have the same ID
|
||||
id = id0
|
||||
} else {
|
||||
id = newID()
|
||||
ptrIDs[p.pp[0]] = id
|
||||
ptrIDs[p.pp[1]] = id
|
||||
}
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
|
||||
} else {
|
||||
if print0 && !ok0 {
|
||||
id0 = newID()
|
||||
ptrIDs[p.pp[0]] = id0
|
||||
}
|
||||
if print1 && !ok1 {
|
||||
id1 = newID()
|
||||
ptrIDs[p.pp[1]] = id1
|
||||
}
|
||||
switch {
|
||||
case print0 && print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
|
||||
case print0:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
|
||||
case print1:
|
||||
trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update all leaf references with the unique identifier.
|
||||
for _, leaf := range leaves {
|
||||
if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
|
||||
leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatReference(id uint) string {
|
||||
return fmt.Sprintf("ref#%d", id)
|
||||
}
|
||||
|
||||
func updateReferencePrefix(prefix, ref string) string {
|
||||
if prefix == "" {
|
||||
return pointerDelimPrefix + ref + pointerDelimSuffix
|
||||
}
|
||||
suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
|
||||
return pointerDelimPrefix + ref + ": " + suffix
|
||||
}
|
||||
414
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
414
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
Normal file
@@ -0,0 +1,414 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
var (
|
||||
anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
stringType = reflect.TypeOf((*string)(nil)).Elem()
|
||||
bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
|
||||
byteType = reflect.TypeOf((*byte)(nil)).Elem()
|
||||
)
|
||||
|
||||
type formatValueOptions struct {
|
||||
// AvoidStringer controls whether to avoid calling custom stringer
|
||||
// methods like error.Error or fmt.Stringer.String.
|
||||
AvoidStringer bool
|
||||
|
||||
// PrintAddresses controls whether to print the address of all pointers,
|
||||
// slice elements, and maps.
|
||||
PrintAddresses bool
|
||||
|
||||
// QualifiedNames controls whether FormatType uses the fully qualified name
|
||||
// (including the full package path as opposed to just the package name).
|
||||
QualifiedNames bool
|
||||
|
||||
// VerbosityLevel controls the amount of output to produce.
|
||||
// A higher value produces more output. A value of zero or lower produces
|
||||
// no output (represented using an ellipsis).
|
||||
// If LimitVerbosity is false, then the level is treated as infinite.
|
||||
VerbosityLevel int
|
||||
|
||||
// LimitVerbosity specifies that formatting should respect VerbosityLevel.
|
||||
LimitVerbosity bool
|
||||
}
|
||||
|
||||
// FormatType prints the type as if it were wrapping s.
|
||||
// This may return s as-is depending on the current type and TypeMode mode.
|
||||
func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
|
||||
// Check whether to emit the type or not.
|
||||
switch opts.TypeMode {
|
||||
case autoType:
|
||||
switch t.Kind() {
|
||||
case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
|
||||
if s.Equal(textNil) {
|
||||
return s
|
||||
}
|
||||
default:
|
||||
return s
|
||||
}
|
||||
if opts.DiffMode == diffIdentical {
|
||||
return s // elide type for identical nodes
|
||||
}
|
||||
case elideType:
|
||||
return s
|
||||
}
|
||||
|
||||
// Determine the type label, applying special handling for unnamed types.
|
||||
typeName := value.TypeString(t, opts.QualifiedNames)
|
||||
if t.Name() == "" {
|
||||
// According to Go grammar, certain type literals contain symbols that
|
||||
// do not strongly bind to the next lexicographical token (e.g., *T).
|
||||
switch t.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Ptr:
|
||||
typeName = "(" + typeName + ")"
|
||||
}
|
||||
}
|
||||
return &textWrap{Prefix: typeName, Value: wrapParens(s)}
|
||||
}
|
||||
|
||||
// wrapParens wraps s with a set of parenthesis, but avoids it if the
|
||||
// wrapped node itself is already surrounded by a pair of parenthesis or braces.
|
||||
// It handles unwrapping one level of pointer-reference nodes.
|
||||
func wrapParens(s textNode) textNode {
|
||||
var refNode *textWrap
|
||||
if s2, ok := s.(*textWrap); ok {
|
||||
// Unwrap a single pointer reference node.
|
||||
switch s2.Metadata.(type) {
|
||||
case leafReference, trunkReference, trunkReferences:
|
||||
refNode = s2
|
||||
if s3, ok := refNode.Value.(*textWrap); ok {
|
||||
s2 = s3
|
||||
}
|
||||
}
|
||||
|
||||
// Already has delimiters that make parenthesis unnecessary.
|
||||
hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
|
||||
hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
|
||||
if hasParens || hasBraces {
|
||||
return s
|
||||
}
|
||||
}
|
||||
if refNode != nil {
|
||||
refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
|
||||
return s
|
||||
}
|
||||
return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
|
||||
}
|
||||
|
||||
// FormatValue prints the reflect.Value, taking extra care to avoid descending
|
||||
// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
|
||||
func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
|
||||
if !v.IsValid() {
|
||||
return nil
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
// Check slice element for cycles.
|
||||
if parentKind == reflect.Slice {
|
||||
ptrRef, visited := ptrs.Push(v.Addr())
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, false)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
|
||||
}
|
||||
|
||||
// Check whether there is an Error or String method to call.
|
||||
if !opts.AvoidStringer && v.CanInterface() {
|
||||
// Avoid calling Error or String methods on nil receivers since many
|
||||
// implementations crash when doing so.
|
||||
if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
|
||||
var prefix, strVal string
|
||||
func() {
|
||||
// Swallow and ignore any panics from String or Error.
|
||||
defer func() { recover() }()
|
||||
switch v := v.Interface().(type) {
|
||||
case error:
|
||||
strVal = v.Error()
|
||||
prefix = "e"
|
||||
case fmt.Stringer:
|
||||
strVal = v.String()
|
||||
prefix = "s"
|
||||
}
|
||||
}()
|
||||
if prefix != "" {
|
||||
return opts.formatString(prefix, strVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether to explicitly wrap the result with the type.
|
||||
var skipType bool
|
||||
defer func() {
|
||||
if !skipType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}()
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return textLine(fmt.Sprint(v.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return textLine(fmt.Sprint(v.Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uint8:
|
||||
if parentKind == reflect.Slice || parentKind == reflect.Array {
|
||||
return textLine(formatHex(v.Uint()))
|
||||
}
|
||||
return textLine(fmt.Sprint(v.Uint()))
|
||||
case reflect.Uintptr:
|
||||
return textLine(formatHex(v.Uint()))
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return textLine(fmt.Sprint(v.Float()))
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return textLine(fmt.Sprint(v.Complex()))
|
||||
case reflect.String:
|
||||
return opts.formatString("", v.String())
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
return textLine(formatPointer(value.PointerOf(v), true))
|
||||
case reflect.Struct:
|
||||
var list textList
|
||||
v := makeAddressable(v) // needed for retrieveUnexportedField
|
||||
maxLen := v.NumField()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
vv := v.Field(i)
|
||||
if vv.IsZero() {
|
||||
continue // Elide fields with zero values
|
||||
}
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sf := t.Field(i)
|
||||
if !isExported(sf.Name) {
|
||||
vv = retrieveUnexportedField(v, sf, true)
|
||||
}
|
||||
s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sf.Name, Value: s})
|
||||
}
|
||||
return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check whether this is a []byte of text data.
|
||||
if t.Elem() == byteType {
|
||||
b := v.Bytes()
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
|
||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||
out = opts.formatString("", string(b))
|
||||
skipType = true
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
|
||||
fallthrough
|
||||
case reflect.Array:
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Value: s})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if t.Kind() == reflect.Slice && opts.PrintAddresses {
|
||||
header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
|
||||
out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
|
||||
}
|
||||
return out
|
||||
case reflect.Map:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
return makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
maxLen := v.Len()
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
var list textList
|
||||
for _, k := range value.SortKeys(v.MapKeys()) {
|
||||
if len(list) == maxLen {
|
||||
list.AppendEllipsis(diffStats{})
|
||||
break
|
||||
}
|
||||
sk := formatMapKey(k, false, ptrs)
|
||||
sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
|
||||
list = append(list, textRecord{Key: sk, Value: sv})
|
||||
}
|
||||
|
||||
out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
return out
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
|
||||
// Check pointer for cycles.
|
||||
ptrRef, visited := ptrs.Push(v)
|
||||
if visited {
|
||||
out = makeLeafReference(ptrRef, opts.PrintAddresses)
|
||||
return &textWrap{Prefix: "&", Value: out}
|
||||
}
|
||||
defer ptrs.Pop()
|
||||
|
||||
// Skip the name only if this is an unnamed pointer type.
|
||||
// Otherwise taking the address of a value does not reproduce
|
||||
// the named pointer type.
|
||||
if v.Type().Name() == "" {
|
||||
skipType = true // Let the underlying value print the type instead
|
||||
}
|
||||
out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
|
||||
out = &textWrap{Prefix: "&", Value: out}
|
||||
return out
|
||||
case reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return textNil
|
||||
}
|
||||
// Interfaces accept different concrete types,
|
||||
// so configure the underlying value to explicitly print the type.
|
||||
return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
|
||||
default:
|
||||
panic(fmt.Sprintf("%v kind not handled", v.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatString(prefix, s string) textNode {
|
||||
maxLen := len(s)
|
||||
maxLines := strings.Count(s, "\n") + 1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
|
||||
maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
}
|
||||
|
||||
// For multiline strings, use the triple-quote syntax,
|
||||
// but only use it when printing removed or inserted nodes since
|
||||
// we only want the extra verbosity for those cases.
|
||||
lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
|
||||
isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
|
||||
for i := 0; i < len(lines) && isTripleQuoted; i++ {
|
||||
lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
line := lines[i]
|
||||
isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
|
||||
}
|
||||
if isTripleQuoted {
|
||||
var list textList
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
for i, line := range lines {
|
||||
if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
|
||||
comment := commentString(fmt.Sprintf("%d elided lines", numElided))
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
|
||||
break
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
|
||||
}
|
||||
list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
|
||||
return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
|
||||
}
|
||||
|
||||
// Format the string as a single-line quoted string.
|
||||
if len(s) > maxLen+len(textEllipsis) {
|
||||
return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
|
||||
}
|
||||
return textLine(prefix + formatString(s))
|
||||
}
|
||||
|
||||
// formatMapKey formats v as if it were a map key.
|
||||
// The result is guaranteed to be a single line.
|
||||
func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
|
||||
var opts formatOptions
|
||||
opts.DiffMode = diffIdentical
|
||||
opts.TypeMode = elideType
|
||||
opts.PrintAddresses = disambiguate
|
||||
opts.AvoidStringer = disambiguate
|
||||
opts.QualifiedNames = disambiguate
|
||||
opts.VerbosityLevel = maxVerbosityPreset
|
||||
opts.LimitVerbosity = true
|
||||
s := opts.FormatValue(v, reflect.Map, ptrs).String()
|
||||
return strings.TrimSpace(s)
|
||||
}
|
||||
|
||||
// formatString prints s as a double-quoted or backtick-quoted string.
|
||||
func formatString(s string) string {
|
||||
// Use quoted string if it the same length as a raw string literal.
|
||||
// Otherwise, attempt to use the raw string form.
|
||||
qs := strconv.Quote(s)
|
||||
if len(qs) == 1+len(s)+1 {
|
||||
return qs
|
||||
}
|
||||
|
||||
// Disallow newlines to ensure output is a single line.
|
||||
// Only allow printable runes for readability purposes.
|
||||
rawInvalid := func(r rune) bool {
|
||||
return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
|
||||
}
|
||||
if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
|
||||
return "`" + s + "`"
|
||||
}
|
||||
return qs
|
||||
}
|
||||
|
||||
// formatHex prints u as a hexadecimal integer in Go notation.
|
||||
func formatHex(u uint64) string {
|
||||
var f string
|
||||
switch {
|
||||
case u <= 0xff:
|
||||
f = "0x%02x"
|
||||
case u <= 0xffff:
|
||||
f = "0x%04x"
|
||||
case u <= 0xffffff:
|
||||
f = "0x%06x"
|
||||
case u <= 0xffffffff:
|
||||
f = "0x%08x"
|
||||
case u <= 0xffffffffff:
|
||||
f = "0x%010x"
|
||||
case u <= 0xffffffffffff:
|
||||
f = "0x%012x"
|
||||
case u <= 0xffffffffffffff:
|
||||
f = "0x%014x"
|
||||
case u <= 0xffffffffffffffff:
|
||||
f = "0x%016x"
|
||||
}
|
||||
return fmt.Sprintf(f, u)
|
||||
}
|
||||
614
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
614
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
Normal file
@@ -0,0 +1,614 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/diff"
|
||||
)
|
||||
|
||||
// CanFormatDiffSlice reports whether we support custom formatting for nodes
|
||||
// that are slices of primitive kinds or strings.
|
||||
func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
|
||||
switch {
|
||||
case opts.DiffMode != diffUnknown:
|
||||
return false // Must be formatting in diff mode
|
||||
case v.NumDiff == 0:
|
||||
return false // No differences detected
|
||||
case !v.ValueX.IsValid() || !v.ValueY.IsValid():
|
||||
return false // Both values must be valid
|
||||
case v.NumIgnored > 0:
|
||||
return false // Some ignore option was used
|
||||
case v.NumTransformed > 0:
|
||||
return false // Some transform option was used
|
||||
case v.NumCompared > 1:
|
||||
return false // More than one comparison was used
|
||||
case v.NumCompared == 1 && v.Type.Name() != "":
|
||||
// The need for cmp to check applicability of options on every element
|
||||
// in a slice is a significant performance detriment for large []byte.
|
||||
// The workaround is to specify Comparer(bytes.Equal),
|
||||
// which enables cmp to compare []byte more efficiently.
|
||||
// If they differ, we still want to provide batched diffing.
|
||||
// The logic disallows named types since they tend to have their own
|
||||
// String method, with nicer formatting than what this provides.
|
||||
return false
|
||||
}
|
||||
|
||||
// Check whether this is an interface with the same concrete types.
|
||||
t := v.Type
|
||||
vx, vy := v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
}
|
||||
|
||||
// Check whether we provide specialized diffing for this type.
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
case reflect.Array, reflect.Slice:
|
||||
// Only slices of primitive types have specialized handling.
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Both slice values have to be non-empty.
|
||||
if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
|
||||
return false
|
||||
}
|
||||
|
||||
// If a sufficient number of elements already differ,
|
||||
// use specialized formatting even if length requirement is not met.
|
||||
if v.NumDiff > v.NumSame {
|
||||
return true
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// Use specialized string diffing for longer slices or strings.
|
||||
const minLength = 32
|
||||
return vx.Len() >= minLength && vy.Len() >= minLength
|
||||
}
|
||||
|
||||
// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
|
||||
// This provides custom-tailored logic to make printing of differences in
|
||||
// textual strings and slices of primitive kinds more readable.
|
||||
func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
assert(opts.DiffMode == diffUnknown)
|
||||
t, vx, vy := v.Type, v.ValueX, v.ValueY
|
||||
if t.Kind() == reflect.Interface {
|
||||
vx, vy = vx.Elem(), vy.Elem()
|
||||
t = vx.Type()
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
|
||||
// Auto-detect the type of the data.
|
||||
var sx, sy string
|
||||
var ssx, ssy []string
|
||||
var isString, isMostlyText, isPureLinedText, isBinary bool
|
||||
switch {
|
||||
case t.Kind() == reflect.String:
|
||||
sx, sy = vx.String(), vy.String()
|
||||
isString = true
|
||||
case t.Kind() == reflect.Slice && t.Elem() == byteType:
|
||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
||||
isString = true
|
||||
case t.Kind() == reflect.Array:
|
||||
// Arrays need to be addressable for slice operations to work.
|
||||
vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
|
||||
vx2.Set(vx)
|
||||
vy2.Set(vy)
|
||||
vx, vy = vx2, vy2
|
||||
}
|
||||
if isString {
|
||||
var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
|
||||
for i, r := range sx + sy {
|
||||
numTotalRunes++
|
||||
if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
|
||||
numValidRunes++
|
||||
}
|
||||
if r == '\n' {
|
||||
if maxLineLen < i-lastLineIdx {
|
||||
maxLineLen = i - lastLineIdx
|
||||
}
|
||||
lastLineIdx = i + 1
|
||||
numLines++
|
||||
}
|
||||
}
|
||||
isPureText := numValidRunes == numTotalRunes
|
||||
isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
|
||||
isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
|
||||
isBinary = !isMostlyText
|
||||
|
||||
// Avoid diffing by lines if it produces a significantly more complex
|
||||
// edit script than diffing by bytes.
|
||||
if isPureLinedText {
|
||||
ssx = strings.Split(sx, "\n")
|
||||
ssy = strings.Split(sy, "\n")
|
||||
esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(ssx[ix] == ssy[iy])
|
||||
})
|
||||
esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(sx[ix] == sy[iy])
|
||||
})
|
||||
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
|
||||
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
|
||||
quotedLength := len(strconv.Quote(sx + sy))
|
||||
unquotedLength := len(sx) + len(sy)
|
||||
escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
|
||||
isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
|
||||
}
|
||||
}
|
||||
|
||||
// Format the string into printable records.
|
||||
var list textList
|
||||
var delim string
|
||||
switch {
|
||||
// If the text appears to be multi-lined text,
|
||||
// then perform differencing across individual lines.
|
||||
case isPureLinedText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.Index(0).String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
delim = "\n"
|
||||
|
||||
// If possible, use a custom triple-quote (""") syntax for printing
|
||||
// differences in a string literal. This format is more readable,
|
||||
// but has edge-cases where differences are visually indistinguishable.
|
||||
// This format is avoided under the following conditions:
|
||||
// - A line starts with `"""`
|
||||
// - A line starts with "..."
|
||||
// - A line contains non-printable characters
|
||||
// - Adjacent different lines differ only by whitespace
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// """
|
||||
// ... // 3 identical lines
|
||||
// foo
|
||||
// bar
|
||||
// - baz
|
||||
// + BAZ
|
||||
// """
|
||||
isTripleQuoted := true
|
||||
prevRemoveLines := map[string]bool{}
|
||||
prevInsertLines := map[string]bool{}
|
||||
var list2 textList
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
for _, r := range list {
|
||||
if !r.Value.Equal(textEllipsis) {
|
||||
line, _ := strconv.Unquote(string(r.Value.(textLine)))
|
||||
line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
|
||||
normLine := strings.Map(func(r rune) rune {
|
||||
if unicode.IsSpace(r) {
|
||||
return -1 // drop whitespace to avoid visually indistinguishable output
|
||||
}
|
||||
return r
|
||||
}, line)
|
||||
isPrintable := func(r rune) bool {
|
||||
return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
|
||||
}
|
||||
isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
|
||||
switch r.Diff {
|
||||
case diffRemoved:
|
||||
isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
|
||||
prevRemoveLines[normLine] = true
|
||||
case diffInserted:
|
||||
isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
|
||||
prevInsertLines[normLine] = true
|
||||
}
|
||||
if !isTripleQuoted {
|
||||
break
|
||||
}
|
||||
r.Value = textLine(line)
|
||||
r.ElideComma = true
|
||||
}
|
||||
if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
|
||||
prevRemoveLines = map[string]bool{}
|
||||
prevInsertLines = map[string]bool{}
|
||||
}
|
||||
list2 = append(list2, r)
|
||||
}
|
||||
if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
|
||||
list2 = list2[:len(list2)-1] // elide single empty line at the end
|
||||
}
|
||||
list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
|
||||
if isTripleQuoted {
|
||||
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
// Always emit type for slices since the triple-quote syntax
|
||||
// looks like a string (not a slice).
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// If the text appears to be single-lined text,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is printed as quoted strings.
|
||||
case isMostlyText:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
s := formatString(v.String())
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
|
||||
// If the text appears to be binary data,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The output is inspired by hexdump.
|
||||
case isBinary:
|
||||
list = opts.formatDiffSlice(
|
||||
reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
|
||||
return textRecord{Diff: d, Value: textLine(s), Comment: comment}
|
||||
},
|
||||
)
|
||||
|
||||
// For all other slices of primitive types,
|
||||
// then perform differencing in approximately fixed-sized chunks.
|
||||
// The size of each chunk depends on the width of the element kind.
|
||||
default:
|
||||
var chunkSize int
|
||||
if t.Elem().Kind() == reflect.Bool {
|
||||
chunkSize = 16
|
||||
} else {
|
||||
switch t.Elem().Bits() {
|
||||
case 8:
|
||||
chunkSize = 16
|
||||
case 16:
|
||||
chunkSize = 12
|
||||
case 32:
|
||||
chunkSize = 8
|
||||
default:
|
||||
chunkSize = 8
|
||||
}
|
||||
}
|
||||
list = opts.formatDiffSlice(
|
||||
vx, vy, chunkSize, t.Elem().Kind().String(),
|
||||
func(v reflect.Value, d diffMode) textRecord {
|
||||
var ss []string
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
switch t.Elem().Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Int()))
|
||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
|
||||
case reflect.Uint8, reflect.Uintptr:
|
||||
ss = append(ss, formatHex(v.Index(i).Uint()))
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
|
||||
ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
|
||||
}
|
||||
}
|
||||
s := strings.Join(ss, ", ")
|
||||
return textRecord{Diff: d, Value: textLine(s)}
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// Wrap the output with appropriate type information.
|
||||
var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
|
||||
if !isMostlyText {
|
||||
// The "{...}" byte-sequence literal is not valid Go syntax for strings.
|
||||
// Emit the type for extra clarity (e.g. "string{...}").
|
||||
if t.Kind() == reflect.String {
|
||||
opts = opts.WithTypeMode(emitType)
|
||||
}
|
||||
return opts.FormatType(t, out)
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != bytesType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// formatASCII formats s as an ASCII string.
|
||||
// This is useful for printing binary strings in a semi-legible way.
|
||||
func formatASCII(s string) string {
|
||||
b := bytes.Repeat([]byte{'.'}, len(s))
|
||||
for i := 0; i < len(s); i++ {
|
||||
if ' ' <= s[i] && s[i] <= '~' {
|
||||
b[i] = s[i]
|
||||
}
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (opts formatOptions) formatDiffSlice(
|
||||
vx, vy reflect.Value, chunkSize int, name string,
|
||||
makeRec func(reflect.Value, diffMode) textRecord,
|
||||
) (list textList) {
|
||||
eq := func(ix, iy int) bool {
|
||||
return vx.Index(ix).Interface() == vy.Index(iy).Interface()
|
||||
}
|
||||
es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
|
||||
return diff.BoolResult(eq(ix, iy))
|
||||
})
|
||||
|
||||
appendChunks := func(v reflect.Value, d diffMode) int {
|
||||
n0 := v.Len()
|
||||
for v.Len() > 0 {
|
||||
n := chunkSize
|
||||
if n > v.Len() {
|
||||
n = v.Len()
|
||||
}
|
||||
list = append(list, makeRec(v.Slice(0, n), d))
|
||||
v = v.Slice(n, v.Len())
|
||||
}
|
||||
return n0 - v.Len()
|
||||
}
|
||||
|
||||
var numDiffs int
|
||||
maxLen := -1
|
||||
if opts.LimitVerbosity {
|
||||
maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
|
||||
opts.VerbosityLevel--
|
||||
}
|
||||
|
||||
groups := coalesceAdjacentEdits(name, es)
|
||||
groups = coalesceInterveningIdentical(groups, chunkSize/4)
|
||||
groups = cleanupSurroundingIdentical(groups, eq)
|
||||
maxGroup := diffStats{Name: name}
|
||||
for i, ds := range groups {
|
||||
if maxLen >= 0 && numDiffs >= maxLen {
|
||||
maxGroup = maxGroup.Append(ds)
|
||||
continue
|
||||
}
|
||||
|
||||
// Print equal.
|
||||
if ds.NumDiff() == 0 {
|
||||
// Compute the number of leading and trailing equal bytes to print.
|
||||
var numLo, numHi int
|
||||
numEqual := ds.NumIgnored + ds.NumIdentical
|
||||
for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
|
||||
numLo++
|
||||
}
|
||||
for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
|
||||
numHi++
|
||||
}
|
||||
if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
|
||||
numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
|
||||
}
|
||||
|
||||
// Print the equal bytes.
|
||||
appendChunks(vx.Slice(0, numLo), diffIdentical)
|
||||
if numEqual > numLo+numHi {
|
||||
ds.NumIdentical -= numLo + numHi
|
||||
list.AppendEllipsis(ds)
|
||||
}
|
||||
appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
|
||||
vx = vx.Slice(numEqual, vx.Len())
|
||||
vy = vy.Slice(numEqual, vy.Len())
|
||||
continue
|
||||
}
|
||||
|
||||
// Print unequal.
|
||||
len0 := len(list)
|
||||
nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
|
||||
vx = vx.Slice(nx, vx.Len())
|
||||
ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
|
||||
vy = vy.Slice(ny, vy.Len())
|
||||
numDiffs += len(list) - len0
|
||||
}
|
||||
if maxGroup.IsZero() {
|
||||
assert(vx.Len() == 0 && vy.Len() == 0)
|
||||
} else {
|
||||
list.AppendEllipsis(maxGroup)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
|
||||
// equal or unequal counts.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: "..XXY...Y"
|
||||
// Output: [
|
||||
// {NumIdentical: 2},
|
||||
// {NumRemoved: 2, NumInserted 1},
|
||||
// {NumIdentical: 3},
|
||||
// {NumInserted: 1},
|
||||
// ]
|
||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
||||
var prevMode byte
|
||||
lastStats := func(mode byte) *diffStats {
|
||||
if prevMode != mode {
|
||||
groups = append(groups, diffStats{Name: name})
|
||||
prevMode = mode
|
||||
}
|
||||
return &groups[len(groups)-1]
|
||||
}
|
||||
for _, e := range es {
|
||||
switch e {
|
||||
case diff.Identity:
|
||||
lastStats('=').NumIdentical++
|
||||
case diff.UniqueX:
|
||||
lastStats('!').NumRemoved++
|
||||
case diff.UniqueY:
|
||||
lastStats('!').NumInserted++
|
||||
case diff.Modified:
|
||||
lastStats('!').NumModified++
|
||||
}
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
|
||||
// equal groups into adjacent unequal groups that currently result in a
|
||||
// dual inserted/removed printout. This acts as a high-pass filter to smooth
|
||||
// out high-frequency changes within the windowSize.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// WindowSize: 16,
|
||||
// Input: [
|
||||
// {NumIdentical: 61}, // group 0
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 1
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 9}, // └── coalesce
|
||||
// {NumIdentical: 64}, // group 2
|
||||
// {NumRemoved: 3, NumInserted: 1}, // group 3
|
||||
// {NumIdentical: 6}, // ├── coalesce
|
||||
// {NumInserted: 2}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 7}, // ├── coalesce
|
||||
// {NumIdentical: 1}, // ├── coalesce
|
||||
// {NumRemoved: 2}, // └── coalesce
|
||||
// {NumIdentical: 63}, // group 4
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 64},
|
||||
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 63},
|
||||
// ]
|
||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
||||
groups, groupsOrig := groups[:0], groups
|
||||
for i, ds := range groupsOrig {
|
||||
if len(groups) >= 2 && ds.NumDiff() > 0 {
|
||||
prev := &groups[len(groups)-2] // Unequal group
|
||||
curr := &groups[len(groups)-1] // Equal group
|
||||
next := &groupsOrig[i] // Unequal group
|
||||
hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
|
||||
hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
|
||||
if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
|
||||
*prev = prev.Append(*curr).Append(*next)
|
||||
groups = groups[:len(groups)-1] // Truncate off equal group
|
||||
continue
|
||||
}
|
||||
}
|
||||
groups = append(groups, ds)
|
||||
}
|
||||
return groups
|
||||
}
|
||||
|
||||
// cleanupSurroundingIdentical scans through all unequal groups, and
|
||||
// moves any leading sequence of equal elements to the preceding equal group and
|
||||
// moves and trailing sequence of equal elements to the succeeding equal group.
|
||||
//
|
||||
// This is necessary since coalesceInterveningIdentical may coalesce edit groups
|
||||
// together such that leading/trailing spans of equal elements becomes possible.
|
||||
// Note that this can occur even with an optimal diffing algorithm.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Input: [
|
||||
// {NumIdentical: 61},
|
||||
// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
|
||||
// {NumIdentical: 67},
|
||||
// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
|
||||
// {NumIdentical: 54},
|
||||
// ]
|
||||
// Output: [
|
||||
// {NumIdentical: 64}, // incremented by 3
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 67},
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 64}, // incremented by 10
|
||||
// ]
|
||||
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
|
||||
var ix, iy int // indexes into sequence x and y
|
||||
for i, ds := range groups {
|
||||
// Handle equal group.
|
||||
if ds.NumDiff() == 0 {
|
||||
ix += ds.NumIdentical
|
||||
iy += ds.NumIdentical
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle unequal group.
|
||||
nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
|
||||
ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
|
||||
var numLeadingIdentical, numTrailingIdentical int
|
||||
for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
|
||||
numLeadingIdentical++
|
||||
}
|
||||
for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
|
||||
numTrailingIdentical++
|
||||
}
|
||||
if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
|
||||
if numLeadingIdentical > 0 {
|
||||
// Remove leading identical span from this group and
|
||||
// insert it into the preceding group.
|
||||
if i-1 >= 0 {
|
||||
groups[i-1].NumIdentical += numLeadingIdentical
|
||||
} else {
|
||||
// No preceding group exists, so prepend a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
|
||||
}()
|
||||
}
|
||||
// Increment indexes since the preceding group would have handled this.
|
||||
ix += numLeadingIdentical
|
||||
iy += numLeadingIdentical
|
||||
}
|
||||
if numTrailingIdentical > 0 {
|
||||
// Remove trailing identical span from this group and
|
||||
// insert it into the succeeding group.
|
||||
if i+1 < len(groups) {
|
||||
groups[i+1].NumIdentical += numTrailingIdentical
|
||||
} else {
|
||||
// No succeeding group exists, so append a new group,
|
||||
// but do so after we finish iterating over all groups.
|
||||
defer func() {
|
||||
groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
|
||||
}()
|
||||
}
|
||||
// Do not increment indexes since the succeeding group will handle this.
|
||||
}
|
||||
|
||||
// Update this group since some identical elements were removed.
|
||||
nx -= numIdentical
|
||||
ny -= numIdentical
|
||||
groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
|
||||
}
|
||||
ix += nx
|
||||
iy += ny
|
||||
}
|
||||
return groups
|
||||
}
|
||||
432
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
432
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
Normal file
@@ -0,0 +1,432 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/flags"
|
||||
)
|
||||
|
||||
var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
|
||||
const maxColumnLength = 80
|
||||
|
||||
type indentMode int
|
||||
|
||||
func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
|
||||
// The output of Diff is documented as being unstable to provide future
|
||||
// flexibility in changing the output for more humanly readable reports.
|
||||
// This logic intentionally introduces instability to the exact output
|
||||
// so that users can detect accidental reliance on stability early on,
|
||||
// rather than much later when an actual change to the format occurs.
|
||||
if flags.Deterministic || randBool {
|
||||
// Use regular spaces (U+0020).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
} else {
|
||||
// Use non-breaking spaces (U+00a0).
|
||||
switch d {
|
||||
case diffUnknown, diffIdentical:
|
||||
b = append(b, " "...)
|
||||
case diffRemoved:
|
||||
b = append(b, "- "...)
|
||||
case diffInserted:
|
||||
b = append(b, "+ "...)
|
||||
}
|
||||
}
|
||||
return repeatCount(n).appendChar(b, '\t')
|
||||
}
|
||||
|
||||
type repeatCount int
|
||||
|
||||
func (n repeatCount) appendChar(b []byte, c byte) []byte {
|
||||
for ; n > 0; n-- {
|
||||
b = append(b, c)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// textNode is a simplified tree-based representation of structured text.
|
||||
// Possible node types are textWrap, textList, or textLine.
|
||||
type textNode interface {
|
||||
// Len reports the length in bytes of a single-line version of the tree.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are ignored.
|
||||
Len() int
|
||||
// Equal reports whether the two trees are structurally identical.
|
||||
// Nested textRecord.Diff and textRecord.Comment fields are compared.
|
||||
Equal(textNode) bool
|
||||
// String returns the string representation of the text tree.
|
||||
// It is not guaranteed that len(x.String()) == x.Len(),
|
||||
// nor that x.String() == y.String() implies that x.Equal(y).
|
||||
String() string
|
||||
|
||||
// formatCompactTo formats the contents of the tree as a single-line string
|
||||
// to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
|
||||
// fields are ignored.
|
||||
//
|
||||
// However, not all nodes in the tree should be collapsed as a single-line.
|
||||
// If a node can be collapsed as a single-line, it is replaced by a textLine
|
||||
// node. Since the top-level node cannot replace itself, this also returns
|
||||
// the current node itself.
|
||||
//
|
||||
// This does not mutate the receiver.
|
||||
formatCompactTo([]byte, diffMode) ([]byte, textNode)
|
||||
// formatExpandedTo formats the contents of the tree as a multi-line string
|
||||
// to the provided buffer. In order for column alignment to operate well,
|
||||
// formatCompactTo must be called before calling formatExpandedTo.
|
||||
formatExpandedTo([]byte, diffMode, indentMode) []byte
|
||||
}
|
||||
|
||||
// textWrap is a wrapper that concatenates a prefix and/or a suffix
|
||||
// to the underlying node.
|
||||
type textWrap struct {
|
||||
Prefix string // e.g., "bytes.Buffer{"
|
||||
Value textNode // textWrap | textList | textLine
|
||||
Suffix string // e.g., "}"
|
||||
Metadata interface{} // arbitrary metadata; has no effect on formatting
|
||||
}
|
||||
|
||||
func (s *textWrap) Len() int {
|
||||
return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
|
||||
}
|
||||
func (s1 *textWrap) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(*textWrap); ok {
|
||||
return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s *textWrap) String() string {
|
||||
var d diffMode
|
||||
var n indentMode
|
||||
_, s2 := s.formatCompactTo(nil, d)
|
||||
b := n.appendIndent(nil, d) // Leading indent
|
||||
b = s2.formatExpandedTo(b, d, n) // Main body
|
||||
b = append(b, '\n') // Trailing newline
|
||||
return string(b)
|
||||
}
|
||||
func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
n0 := len(b) // Original buffer length
|
||||
b = append(b, s.Prefix...)
|
||||
b, s.Value = s.Value.formatCompactTo(b, d)
|
||||
b = append(b, s.Suffix...)
|
||||
if _, ok := s.Value.(textLine); ok {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
b = append(b, s.Prefix...)
|
||||
b = s.Value.formatExpandedTo(b, d, n)
|
||||
b = append(b, s.Suffix...)
|
||||
return b
|
||||
}
|
||||
|
||||
// textList is a comma-separated list of textWrap or textLine nodes.
|
||||
// The list may be formatted as multi-lines or single-line at the discretion
|
||||
// of the textList.formatCompactTo method.
|
||||
type textList []textRecord
|
||||
type textRecord struct {
|
||||
Diff diffMode // e.g., 0 or '-' or '+'
|
||||
Key string // e.g., "MyField"
|
||||
Value textNode // textWrap | textLine
|
||||
ElideComma bool // avoid trailing comma
|
||||
Comment fmt.Stringer // e.g., "6 identical fields"
|
||||
}
|
||||
|
||||
// AppendEllipsis appends a new ellipsis node to the list if none already
|
||||
// exists at the end. If cs is non-zero it coalesces the statistics with the
|
||||
// previous diffStats.
|
||||
func (s *textList) AppendEllipsis(ds diffStats) {
|
||||
hasStats := !ds.IsZero()
|
||||
if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
|
||||
if hasStats {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
|
||||
} else {
|
||||
*s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
|
||||
}
|
||||
return
|
||||
}
|
||||
if hasStats {
|
||||
(*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
|
||||
}
|
||||
}
|
||||
|
||||
func (s textList) Len() (n int) {
|
||||
for i, r := range s {
|
||||
n += len(r.Key)
|
||||
if r.Key != "" {
|
||||
n += len(": ")
|
||||
}
|
||||
n += r.Value.Len()
|
||||
if i < len(s)-1 {
|
||||
n += len(", ")
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (s1 textList) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textList); ok {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
for i := range s1 {
|
||||
r1, r2 := s1[i], s2[i]
|
||||
if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s textList) String() string {
|
||||
return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
|
||||
}
|
||||
|
||||
func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
s = append(textList(nil), s...) // Avoid mutating original
|
||||
|
||||
// Determine whether we can collapse this list as a single line.
|
||||
n0 := len(b) // Original buffer length
|
||||
var multiLine bool
|
||||
for i, r := range s {
|
||||
if r.Diff == diffInserted || r.Diff == diffRemoved {
|
||||
multiLine = true
|
||||
}
|
||||
b = append(b, r.Key...)
|
||||
if r.Key != "" {
|
||||
b = append(b, ": "...)
|
||||
}
|
||||
b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
|
||||
if _, ok := s[i].Value.(textLine); !ok {
|
||||
multiLine = true
|
||||
}
|
||||
if r.Comment != nil {
|
||||
multiLine = true
|
||||
}
|
||||
if i < len(s)-1 {
|
||||
b = append(b, ", "...)
|
||||
}
|
||||
}
|
||||
// Force multi-lined output when printing a removed/inserted node that
|
||||
// is sufficiently long.
|
||||
if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
|
||||
multiLine = true
|
||||
}
|
||||
if !multiLine {
|
||||
return b, textLine(b[n0:])
|
||||
}
|
||||
return b, s
|
||||
}
|
||||
|
||||
func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
|
||||
alignKeyLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return r.Key == "" || !isLine
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
|
||||
)
|
||||
alignValueLens := s.alignLens(
|
||||
func(r textRecord) bool {
|
||||
_, isLine := r.Value.(textLine)
|
||||
return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
|
||||
},
|
||||
func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
|
||||
)
|
||||
|
||||
// Format lists of simple lists in a batched form.
|
||||
// If the list is sequence of only textLine values,
|
||||
// then batch multiple values on a single line.
|
||||
var isSimple bool
|
||||
for _, r := range s {
|
||||
_, isLine := r.Value.(textLine)
|
||||
isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
|
||||
if !isSimple {
|
||||
break
|
||||
}
|
||||
}
|
||||
if isSimple {
|
||||
n++
|
||||
var batch []byte
|
||||
emitBatch := func() {
|
||||
if len(batch) > 0 {
|
||||
b = n.appendIndent(append(b, '\n'), d)
|
||||
b = append(b, bytes.TrimRight(batch, " ")...)
|
||||
batch = batch[:0]
|
||||
}
|
||||
}
|
||||
for _, r := range s {
|
||||
line := r.Value.(textLine)
|
||||
if len(batch)+len(line)+len(", ") > maxColumnLength {
|
||||
emitBatch()
|
||||
}
|
||||
batch = append(batch, line...)
|
||||
batch = append(batch, ", "...)
|
||||
}
|
||||
emitBatch()
|
||||
n--
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
// Format the list as a multi-lined output.
|
||||
n++
|
||||
for i, r := range s {
|
||||
b = n.appendIndent(append(b, '\n'), d|r.Diff)
|
||||
if r.Key != "" {
|
||||
b = append(b, r.Key+": "...)
|
||||
}
|
||||
b = alignKeyLens[i].appendChar(b, ' ')
|
||||
|
||||
b = r.Value.formatExpandedTo(b, d|r.Diff, n)
|
||||
if !r.ElideComma {
|
||||
b = append(b, ',')
|
||||
}
|
||||
b = alignValueLens[i].appendChar(b, ' ')
|
||||
|
||||
if r.Comment != nil {
|
||||
b = append(b, " // "+r.Comment.String()...)
|
||||
}
|
||||
}
|
||||
n--
|
||||
|
||||
return n.appendIndent(append(b, '\n'), d)
|
||||
}
|
||||
|
||||
func (s textList) alignLens(
|
||||
skipFunc func(textRecord) bool,
|
||||
lenFunc func(textRecord) int,
|
||||
) []repeatCount {
|
||||
var startIdx, endIdx, maxLen int
|
||||
lens := make([]repeatCount, len(s))
|
||||
for i, r := range s {
|
||||
if skipFunc(r) {
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
startIdx, endIdx, maxLen = i+1, i+1, 0
|
||||
} else {
|
||||
if maxLen < lenFunc(r) {
|
||||
maxLen = lenFunc(r)
|
||||
}
|
||||
endIdx = i + 1
|
||||
}
|
||||
}
|
||||
for j := startIdx; j < endIdx && j < len(s); j++ {
|
||||
lens[j] = repeatCount(maxLen - lenFunc(s[j]))
|
||||
}
|
||||
return lens
|
||||
}
|
||||
|
||||
// textLine is a single-line segment of text and is always a leaf node
|
||||
// in the textNode tree.
|
||||
type textLine []byte
|
||||
|
||||
var (
|
||||
textNil = textLine("nil")
|
||||
textEllipsis = textLine("...")
|
||||
)
|
||||
|
||||
func (s textLine) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
func (s1 textLine) Equal(s2 textNode) bool {
|
||||
if s2, ok := s2.(textLine); ok {
|
||||
return bytes.Equal([]byte(s1), []byte(s2))
|
||||
}
|
||||
return false
|
||||
}
|
||||
func (s textLine) String() string {
|
||||
return string(s)
|
||||
}
|
||||
func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
|
||||
return append(b, s...), s
|
||||
}
|
||||
func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
|
||||
return append(b, s...)
|
||||
}
|
||||
|
||||
type diffStats struct {
|
||||
Name string
|
||||
NumIgnored int
|
||||
NumIdentical int
|
||||
NumRemoved int
|
||||
NumInserted int
|
||||
NumModified int
|
||||
}
|
||||
|
||||
func (s diffStats) IsZero() bool {
|
||||
s.Name = ""
|
||||
return s == diffStats{}
|
||||
}
|
||||
|
||||
func (s diffStats) NumDiff() int {
|
||||
return s.NumRemoved + s.NumInserted + s.NumModified
|
||||
}
|
||||
|
||||
func (s diffStats) Append(ds diffStats) diffStats {
|
||||
assert(s.Name == ds.Name)
|
||||
s.NumIgnored += ds.NumIgnored
|
||||
s.NumIdentical += ds.NumIdentical
|
||||
s.NumRemoved += ds.NumRemoved
|
||||
s.NumInserted += ds.NumInserted
|
||||
s.NumModified += ds.NumModified
|
||||
return s
|
||||
}
|
||||
|
||||
// String prints a humanly-readable summary of coalesced records.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
||||
func (s diffStats) String() string {
|
||||
var ss []string
|
||||
var sum int
|
||||
labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
|
||||
counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
|
||||
for i, n := range counts {
|
||||
if n > 0 {
|
||||
ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
|
||||
}
|
||||
sum += n
|
||||
}
|
||||
|
||||
// Pluralize the name (adjusting for some obscure English grammar rules).
|
||||
name := s.Name
|
||||
if sum > 1 {
|
||||
name += "s"
|
||||
if strings.HasSuffix(name, "ys") {
|
||||
name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
|
||||
}
|
||||
}
|
||||
|
||||
// Format the list according to English grammar (with Oxford comma).
|
||||
switch n := len(ss); n {
|
||||
case 0:
|
||||
return ""
|
||||
case 1, 2:
|
||||
return strings.Join(ss, " and ") + " " + name
|
||||
default:
|
||||
return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
|
||||
}
|
||||
}
|
||||
|
||||
type commentString string
|
||||
|
||||
func (s commentString) String() string { return string(s) }
|
||||
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
121
vendor/github.com/google/go-cmp/cmp/report_value.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
// Copyright 2019, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cmp
|
||||
|
||||
import "reflect"
|
||||
|
||||
// valueNode represents a single node within a report, which is a
|
||||
// structured representation of the value tree, containing information
|
||||
// regarding which nodes are equal or not.
|
||||
type valueNode struct {
|
||||
parent *valueNode
|
||||
|
||||
Type reflect.Type
|
||||
ValueX reflect.Value
|
||||
ValueY reflect.Value
|
||||
|
||||
// NumSame is the number of leaf nodes that are equal.
|
||||
// All descendants are equal only if NumDiff is 0.
|
||||
NumSame int
|
||||
// NumDiff is the number of leaf nodes that are not equal.
|
||||
NumDiff int
|
||||
// NumIgnored is the number of leaf nodes that are ignored.
|
||||
NumIgnored int
|
||||
// NumCompared is the number of leaf nodes that were compared
|
||||
// using an Equal method or Comparer function.
|
||||
NumCompared int
|
||||
// NumTransformed is the number of non-leaf nodes that were transformed.
|
||||
NumTransformed int
|
||||
// NumChildren is the number of transitive descendants of this node.
|
||||
// This counts from zero; thus, leaf nodes have no descendants.
|
||||
NumChildren int
|
||||
// MaxDepth is the maximum depth of the tree. This counts from zero;
|
||||
// thus, leaf nodes have a depth of zero.
|
||||
MaxDepth int
|
||||
|
||||
// Records is a list of struct fields, slice elements, or map entries.
|
||||
Records []reportRecord // If populated, implies Value is not populated
|
||||
|
||||
// Value is the result of a transformation, pointer indirect, of
|
||||
// type assertion.
|
||||
Value *valueNode // If populated, implies Records is not populated
|
||||
|
||||
// TransformerName is the name of the transformer.
|
||||
TransformerName string // If non-empty, implies Value is populated
|
||||
}
|
||||
type reportRecord struct {
|
||||
Key reflect.Value // Invalid for slice element
|
||||
Value *valueNode
|
||||
}
|
||||
|
||||
func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
|
||||
vx, vy := ps.Values()
|
||||
child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
|
||||
switch s := ps.(type) {
|
||||
case StructField:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
|
||||
case SliceIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Value: child})
|
||||
case MapIndex:
|
||||
assert(parent.Value == nil)
|
||||
parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
|
||||
case Indirect:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case TypeAssertion:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
case Transform:
|
||||
assert(parent.Value == nil && parent.Records == nil)
|
||||
parent.Value = child
|
||||
parent.TransformerName = s.Name()
|
||||
parent.NumTransformed++
|
||||
default:
|
||||
assert(parent == nil) // Must be the root step
|
||||
}
|
||||
return child
|
||||
}
|
||||
|
||||
func (r *valueNode) Report(rs Result) {
|
||||
assert(r.MaxDepth == 0) // May only be called on leaf nodes
|
||||
|
||||
if rs.ByIgnore() {
|
||||
r.NumIgnored++
|
||||
} else {
|
||||
if rs.Equal() {
|
||||
r.NumSame++
|
||||
} else {
|
||||
r.NumDiff++
|
||||
}
|
||||
}
|
||||
assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
|
||||
|
||||
if rs.ByMethod() {
|
||||
r.NumCompared++
|
||||
}
|
||||
if rs.ByFunc() {
|
||||
r.NumCompared++
|
||||
}
|
||||
assert(r.NumCompared <= 1)
|
||||
}
|
||||
|
||||
func (child *valueNode) PopStep() (parent *valueNode) {
|
||||
if child.parent == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child.parent
|
||||
parent.NumSame += child.NumSame
|
||||
parent.NumDiff += child.NumDiff
|
||||
parent.NumIgnored += child.NumIgnored
|
||||
parent.NumCompared += child.NumCompared
|
||||
parent.NumTransformed += child.NumTransformed
|
||||
parent.NumChildren += child.NumChildren + 1
|
||||
if parent.MaxDepth < child.MaxDepth+1 {
|
||||
parent.MaxDepth = child.MaxDepth + 1
|
||||
}
|
||||
return parent
|
||||
}
|
||||
5
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
5
vendor/github.com/google/pprof/profile/encode.go
generated
vendored
@@ -122,6 +122,7 @@ func (p *Profile) preEncode() {
|
||||
}
|
||||
|
||||
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
|
||||
p.docURLX = addString(strings, p.DocURL)
|
||||
|
||||
p.stringTable = make([]string, len(strings))
|
||||
for s, i := range strings {
|
||||
@@ -156,6 +157,7 @@ func (p *Profile) encode(b *buffer) {
|
||||
encodeInt64Opt(b, 12, p.Period)
|
||||
encodeInt64s(b, 13, p.commentX)
|
||||
encodeInt64(b, 14, p.defaultSampleTypeX)
|
||||
encodeInt64Opt(b, 15, p.docURLX)
|
||||
}
|
||||
|
||||
var profileDecoder = []decoder{
|
||||
@@ -237,6 +239,8 @@ var profileDecoder = []decoder{
|
||||
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
|
||||
// int64 defaultSampleType = 14
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
|
||||
// string doc_link = 15;
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) },
|
||||
}
|
||||
|
||||
// postDecode takes the unexported fields populated by decode (with
|
||||
@@ -384,6 +388,7 @@ func (p *Profile) postDecode() error {
|
||||
|
||||
p.commentX = nil
|
||||
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
|
||||
p.DocURL, err = getString(p.stringTable, &p.docURLX, err)
|
||||
p.stringTable = nil
|
||||
return err
|
||||
}
|
||||
|
||||
5
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
5
vendor/github.com/google/pprof/profile/merge.go
generated
vendored
@@ -476,6 +476,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||
var timeNanos, durationNanos, period int64
|
||||
var comments []string
|
||||
seenComments := map[string]bool{}
|
||||
var docURL string
|
||||
var defaultSampleType string
|
||||
for _, s := range srcs {
|
||||
if timeNanos == 0 || s.TimeNanos < timeNanos {
|
||||
@@ -494,6 +495,9 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||
if defaultSampleType == "" {
|
||||
defaultSampleType = s.DefaultSampleType
|
||||
}
|
||||
if docURL == "" {
|
||||
docURL = s.DocURL
|
||||
}
|
||||
}
|
||||
|
||||
p := &Profile{
|
||||
@@ -509,6 +513,7 @@ func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||
|
||||
Comments: comments,
|
||||
DefaultSampleType: defaultSampleType,
|
||||
DocURL: docURL,
|
||||
}
|
||||
copy(p.SampleType, srcs[0].SampleType)
|
||||
return p, nil
|
||||
|
||||
9
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
9
vendor/github.com/google/pprof/profile/profile.go
generated
vendored
@@ -39,6 +39,7 @@ type Profile struct {
|
||||
Location []*Location
|
||||
Function []*Function
|
||||
Comments []string
|
||||
DocURL string
|
||||
|
||||
DropFrames string
|
||||
KeepFrames string
|
||||
@@ -53,6 +54,7 @@ type Profile struct {
|
||||
encodeMu sync.Mutex
|
||||
|
||||
commentX []int64
|
||||
docURLX int64
|
||||
dropFramesX int64
|
||||
keepFramesX int64
|
||||
stringTable []string
|
||||
@@ -555,6 +557,9 @@ func (p *Profile) String() string {
|
||||
for _, c := range p.Comments {
|
||||
ss = append(ss, "Comment: "+c)
|
||||
}
|
||||
if url := p.DocURL; url != "" {
|
||||
ss = append(ss, fmt.Sprintf("Doc: %s", url))
|
||||
}
|
||||
if pt := p.PeriodType; pt != nil {
|
||||
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
|
||||
}
|
||||
@@ -844,10 +849,10 @@ func (p *Profile) HasFileLines() bool {
|
||||
|
||||
// Unsymbolizable returns true if a mapping points to a binary for which
|
||||
// locations can't be symbolized in principle, at least now. Examples are
|
||||
// "[vdso]", [vsyscall]" and some others, see the code.
|
||||
// "[vdso]", "[vsyscall]" and some others, see the code.
|
||||
func (m *Mapping) Unsymbolizable() bool {
|
||||
name := filepath.Base(m.File)
|
||||
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
|
||||
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon"
|
||||
}
|
||||
|
||||
// Copy makes a fully independent copy of a profile.
|
||||
|
||||
13
vendor/github.com/x448/float16/.travis.yml
generated
vendored
Normal file
13
vendor/github.com/x448/float16/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.11.x
|
||||
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
|
||||
script:
|
||||
- go test -short -coverprofile=coverage.txt -covermode=count ./...
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
||||
22
vendor/github.com/x448/float16/LICENSE
generated
vendored
Normal file
22
vendor/github.com/x448/float16/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
133
vendor/github.com/x448/float16/README.md
generated
vendored
Normal file
133
vendor/github.com/x448/float16/README.md
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
# Float16 (Binary16) in Go/Golang
|
||||
[](https://travis-ci.org/x448/float16)
|
||||
[](https://codecov.io/gh/x448/float16)
|
||||
[](https://goreportcard.com/report/github.com/x448/float16)
|
||||
[](https://github.com/x448/float16/releases)
|
||||
[](https://raw.githubusercontent.com/x448/float16/master/LICENSE)
|
||||
|
||||
`float16` package provides [IEEE 754 half-precision floating-point format (binary16)](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) with IEEE 754 default rounding for conversions. IEEE 754-2008 refers to this 16-bit floating-point format as binary16.
|
||||
|
||||
IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven") is considered the most accurate and statistically unbiased estimate of the true result.
|
||||
|
||||
All possible 4+ billion floating-point conversions with this library are verified to be correct.
|
||||
|
||||
Lowercase "float16" refers to IEEE 754 binary16. And capitalized "Float16" refers to exported Go data type provided by this library.
|
||||
|
||||
## Features
|
||||
Current features include:
|
||||
|
||||
* float16 to float32 conversions use lossless conversion.
|
||||
* float32 to float16 conversions use IEEE 754-2008 "Round-to-Nearest RoundTiesToEven".
|
||||
* conversions using pure Go take about 2.65 ns/op on a desktop amd64.
|
||||
* unit tests provide 100% code coverage and check all possible 4+ billion conversions.
|
||||
* other functions include: IsInf(), IsNaN(), IsNormal(), PrecisionFromfloat32(), String(), etc.
|
||||
* all functions in this library use zero allocs except String().
|
||||
|
||||
## Status
|
||||
This library is used by [fxamacker/cbor](https://github.com/fxamacker/cbor) and is ready for production use on supported platforms. The version number < 1.0 indicates more functions and options are planned but not yet published.
|
||||
|
||||
Current status:
|
||||
|
||||
* core API is done and breaking API changes are unlikely.
|
||||
* 100% of unit tests pass:
|
||||
* short mode (`go test -short`) tests around 65765 conversions in 0.005s.
|
||||
* normal mode (`go test`) tests all possible 4+ billion conversions in about 95s.
|
||||
* 100% code coverage with both short mode and normal mode.
|
||||
* tested on amd64 but it should work on all little-endian platforms supported by Go.
|
||||
|
||||
Roadmap:
|
||||
|
||||
* add functions for fast batch conversions leveraging SIMD when supported by hardware.
|
||||
* speed up unit test when verifying all possible 4+ billion conversions.
|
||||
* test on additional platforms.
|
||||
|
||||
## Float16 to Float32 Conversion
|
||||
Conversions from float16 to float32 are lossless conversions. All 65536 possible float16 to float32 conversions (in pure Go) are confirmed to be correct.
|
||||
|
||||
Unit tests take a fraction of a second to check all 65536 expected values for float16 to float32 conversions.
|
||||
|
||||
## Float32 to Float16 Conversion
|
||||
Conversions from float32 to float16 use IEEE 754 default rounding ("Round-to-Nearest RoundTiesToEven"). All 4294967296 possible float32 to float16 conversions (in pure Go) are confirmed to be correct.
|
||||
|
||||
Unit tests in normal mode take about 1-2 minutes to check all 4+ billion float32 input values and results for Fromfloat32(), FromNaN32ps(), and PrecisionFromfloat32().
|
||||
|
||||
Unit tests in short mode use a small subset (around 229 float32 inputs) and finish in under 0.01 second while still reaching 100% code coverage.
|
||||
|
||||
## Usage
|
||||
Install with `go get github.com/x448/float16`.
|
||||
```
|
||||
// Convert float32 to float16
|
||||
pi := float32(math.Pi)
|
||||
pi16 := float16.Fromfloat32(pi)
|
||||
|
||||
// Convert float16 to float32
|
||||
pi32 := pi16.Float32()
|
||||
|
||||
// PrecisionFromfloat32() is faster than the overhead of calling a function.
|
||||
// This example only converts if there's no data loss and input is not a subnormal.
|
||||
if float16.PrecisionFromfloat32(pi) == float16.PrecisionExact {
|
||||
pi16 := float16.Fromfloat32(pi)
|
||||
}
|
||||
```
|
||||
|
||||
## Float16 Type and API
|
||||
Float16 (capitalized) is a Go type with uint16 as the underlying state. There are 6 exported functions and 9 exported methods.
|
||||
```
|
||||
package float16 // import "github.com/x448/float16"
|
||||
|
||||
// Exported types and consts
|
||||
type Float16 uint16
|
||||
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||
|
||||
// Exported functions
|
||||
Fromfloat32(f32 float32) Float16 // Float16 number converted from f32 using IEEE 754 default rounding
|
||||
with identical results to AMD and Intel F16C hardware. NaN inputs
|
||||
are converted with quiet bit always set on, to be like F16C.
|
||||
|
||||
FromNaN32ps(nan float32) (Float16, error) // Float16 NaN without modifying quiet bit.
|
||||
// The "ps" suffix means "preserve signaling".
|
||||
// Returns sNaN and ErrInvalidNaNValue if nan isn't a NaN.
|
||||
|
||||
Frombits(b16 uint16) Float16 // Float16 number corresponding to b16 (IEEE 754 binary16 rep.)
|
||||
NaN() Float16 // Float16 of IEEE 754 binary16 not-a-number
|
||||
Inf(sign int) Float16 // Float16 of IEEE 754 binary16 infinity according to sign
|
||||
|
||||
PrecisionFromfloat32(f32 float32) Precision // quickly indicates exact, ..., overflow, underflow
|
||||
// (inline and < 1 ns/op)
|
||||
// Exported methods
|
||||
(f Float16) Float32() float32 // float32 number converted from f16 using lossless conversion
|
||||
(f Float16) Bits() uint16 // the IEEE 754 binary16 representation of f
|
||||
(f Float16) IsNaN() bool // true if f is not-a-number (NaN)
|
||||
(f Float16) IsQuietNaN() bool // true if f is a quiet not-a-number (NaN)
|
||||
(f Float16) IsInf(sign int) bool // true if f is infinite based on sign (-1=NegInf, 0=any, 1=PosInf)
|
||||
(f Float16) IsFinite() bool // true if f is not infinite or NaN
|
||||
(f Float16) IsNormal() bool // true if f is not zero, infinite, subnormal, or NaN.
|
||||
(f Float16) Signbit() bool // true if f is negative or negative zero
|
||||
(f Float16) String() string // string representation of f to satisfy fmt.Stringer interface
|
||||
```
|
||||
See [API](https://godoc.org/github.com/x448/float16) at godoc.org for more info.
|
||||
|
||||
## Benchmarks
|
||||
Conversions (in pure Go) are around 2.65 ns/op for float16 -> float32 and float32 -> float16 on amd64. Speeds can vary depending on input value.
|
||||
|
||||
```
|
||||
All functions have zero allocations except float16.String().
|
||||
|
||||
FromFloat32pi-2 2.59ns ± 0% // speed using Fromfloat32() to convert a float32 of math.Pi to Float16
|
||||
ToFloat32pi-2 2.69ns ± 0% // speed using Float32() to convert a float16 of math.Pi to float32
|
||||
Frombits-2 0.29ns ± 5% // speed using Frombits() to cast a uint16 to Float16
|
||||
|
||||
PrecisionFromFloat32-2 0.29ns ± 1% // speed using PrecisionFromfloat32() to check for overflows, etc.
|
||||
```
|
||||
|
||||
## System Requirements
|
||||
* Tested on Go 1.11, 1.12, and 1.13 but it should also work with older versions.
|
||||
* Tested on amd64 but it should also work on all little-endian platforms supported by Go.
|
||||
|
||||
## Special Thanks
|
||||
Special thanks to Kathryn Long (starkat99) for creating [half-rs](https://github.com/starkat99/half-rs), a very nice rust implementation of float16.
|
||||
|
||||
## License
|
||||
Copyright (c) 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||
|
||||
Licensed under [MIT License](LICENSE)
|
||||
302
vendor/github.com/x448/float16/float16.go
generated
vendored
Normal file
302
vendor/github.com/x448/float16/float16.go
generated
vendored
Normal file
@@ -0,0 +1,302 @@
|
||||
// Copyright 2019 Montgomery Edwards⁴⁴⁸ and Faye Amacker
|
||||
//
|
||||
// Special thanks to Kathryn Long for her Rust implementation
|
||||
// of float16 at github.com/starkat99/half-rs (MIT license)
|
||||
|
||||
package float16
|
||||
|
||||
import (
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Float16 represents IEEE 754 half-precision floating-point numbers (binary16).
|
||||
type Float16 uint16
|
||||
|
||||
// Precision indicates whether the conversion to Float16 is
|
||||
// exact, subnormal without dropped bits, inexact, underflow, or overflow.
|
||||
type Precision int
|
||||
|
||||
const (
|
||||
|
||||
// PrecisionExact is for non-subnormals that don't drop bits during conversion.
|
||||
// All of these can round-trip. Should always convert to float16.
|
||||
PrecisionExact Precision = iota
|
||||
|
||||
// PrecisionUnknown is for subnormals that don't drop bits during conversion but
|
||||
// not all of these can round-trip so precision is unknown without more effort.
|
||||
// Only 2046 of these can round-trip and the rest cannot round-trip.
|
||||
PrecisionUnknown
|
||||
|
||||
// PrecisionInexact is for dropped significand bits and cannot round-trip.
|
||||
// Some of these are subnormals. Cannot round-trip float32->float16->float32.
|
||||
PrecisionInexact
|
||||
|
||||
// PrecisionUnderflow is for Underflows. Cannot round-trip float32->float16->float32.
|
||||
PrecisionUnderflow
|
||||
|
||||
// PrecisionOverflow is for Overflows. Cannot round-trip float32->float16->float32.
|
||||
PrecisionOverflow
|
||||
)
|
||||
|
||||
// PrecisionFromfloat32 returns Precision without performing
|
||||
// the conversion. Conversions from both Infinity and NaN
|
||||
// values will always report PrecisionExact even if NaN payload
|
||||
// or NaN-Quiet-Bit is lost. This function is kept simple to
|
||||
// allow inlining and run < 0.5 ns/op, to serve as a fast filter.
|
||||
func PrecisionFromfloat32(f32 float32) Precision {
|
||||
u32 := math.Float32bits(f32)
|
||||
|
||||
if u32 == 0 || u32 == 0x80000000 {
|
||||
// +- zero will always be exact conversion
|
||||
return PrecisionExact
|
||||
}
|
||||
|
||||
const COEFMASK uint32 = 0x7fffff // 23 least significant bits
|
||||
const EXPSHIFT uint32 = 23
|
||||
const EXPBIAS uint32 = 127
|
||||
const EXPMASK uint32 = uint32(0xff) << EXPSHIFT
|
||||
const DROPMASK uint32 = COEFMASK >> 10
|
||||
|
||||
exp := int32(((u32 & EXPMASK) >> EXPSHIFT) - EXPBIAS)
|
||||
coef := u32 & COEFMASK
|
||||
|
||||
if exp == 128 {
|
||||
// +- infinity or NaN
|
||||
// apps may want to do extra checks for NaN separately
|
||||
return PrecisionExact
|
||||
}
|
||||
|
||||
// https://en.wikipedia.org/wiki/Half-precision_floating-point_format says,
|
||||
// "Decimals between 2^−24 (minimum positive subnormal) and 2^−14 (maximum subnormal): fixed interval 2^−24"
|
||||
if exp < -24 {
|
||||
return PrecisionUnderflow
|
||||
}
|
||||
if exp > 15 {
|
||||
return PrecisionOverflow
|
||||
}
|
||||
if (coef & DROPMASK) != uint32(0) {
|
||||
// these include subnormals and non-subnormals that dropped bits
|
||||
return PrecisionInexact
|
||||
}
|
||||
|
||||
if exp < -14 {
|
||||
// Subnormals. Caller may want to test these further.
|
||||
// There are 2046 subnormals that can successfully round-trip f32->f16->f32
|
||||
// and 20 of those 2046 have 32-bit input coef == 0.
|
||||
// RFC 7049 and 7049bis Draft 12 don't precisely define "preserves value"
|
||||
// so some protocols and libraries will choose to handle subnormals differently
|
||||
// when deciding to encode them to CBOR float32 vs float16.
|
||||
return PrecisionUnknown
|
||||
}
|
||||
|
||||
return PrecisionExact
|
||||
}
|
||||
|
||||
// Frombits returns the float16 number corresponding to the IEEE 754 binary16
|
||||
// representation u16, with the sign bit of u16 and the result in the same bit
|
||||
// position. Frombits(Bits(x)) == x.
|
||||
func Frombits(u16 uint16) Float16 {
|
||||
return Float16(u16)
|
||||
}
|
||||
|
||||
// Fromfloat32 returns a Float16 value converted from f32. Conversion uses
|
||||
// IEEE default rounding (nearest int, with ties to even).
|
||||
func Fromfloat32(f32 float32) Float16 {
|
||||
return Float16(f32bitsToF16bits(math.Float32bits(f32)))
|
||||
}
|
||||
|
||||
// ErrInvalidNaNValue indicates a NaN was not received.
|
||||
const ErrInvalidNaNValue = float16Error("float16: invalid NaN value, expected IEEE 754 NaN")
|
||||
|
||||
type float16Error string
|
||||
|
||||
func (e float16Error) Error() string { return string(e) }
|
||||
|
||||
// FromNaN32ps converts nan to IEEE binary16 NaN while preserving both
|
||||
// signaling and payload. Unlike Fromfloat32(), which can only return
|
||||
// qNaN because it sets quiet bit = 1, this can return both sNaN and qNaN.
|
||||
// If the result is infinity (sNaN with empty payload), then the
|
||||
// lowest bit of payload is set to make the result a NaN.
|
||||
// Returns ErrInvalidNaNValue and 0x7c01 (sNaN) if nan isn't IEEE 754 NaN.
|
||||
// This function was kept simple to be able to inline.
|
||||
func FromNaN32ps(nan float32) (Float16, error) {
|
||||
const SNAN = Float16(uint16(0x7c01)) // signalling NaN
|
||||
|
||||
u32 := math.Float32bits(nan)
|
||||
sign := u32 & 0x80000000
|
||||
exp := u32 & 0x7f800000
|
||||
coef := u32 & 0x007fffff
|
||||
|
||||
if (exp != 0x7f800000) || (coef == 0) {
|
||||
return SNAN, ErrInvalidNaNValue
|
||||
}
|
||||
|
||||
u16 := uint16((sign >> 16) | uint32(0x7c00) | (coef >> 13))
|
||||
|
||||
if (u16 & 0x03ff) == 0 {
|
||||
// result became infinity, make it NaN by setting lowest bit in payload
|
||||
u16 = u16 | 0x0001
|
||||
}
|
||||
|
||||
return Float16(u16), nil
|
||||
}
|
||||
|
||||
// NaN returns a Float16 of IEEE 754 binary16 not-a-number (NaN).
|
||||
// Returned NaN value 0x7e01 has all exponent bits = 1 with the
|
||||
// first and last bits = 1 in the significand. This is consistent
|
||||
// with Go's 64-bit math.NaN(). Canonical CBOR in RFC 7049 uses 0x7e00.
|
||||
func NaN() Float16 {
|
||||
return Float16(0x7e01)
|
||||
}
|
||||
|
||||
// Inf returns a Float16 with an infinity value with the specified sign.
|
||||
// A sign >= returns positive infinity.
|
||||
// A sign < 0 returns negative infinity.
|
||||
func Inf(sign int) Float16 {
|
||||
if sign >= 0 {
|
||||
return Float16(0x7c00)
|
||||
}
|
||||
return Float16(0x8000 | 0x7c00)
|
||||
}
|
||||
|
||||
// Float32 returns a float32 converted from f (Float16).
|
||||
// This is a lossless conversion.
|
||||
func (f Float16) Float32() float32 {
|
||||
u32 := f16bitsToF32bits(uint16(f))
|
||||
return math.Float32frombits(u32)
|
||||
}
|
||||
|
||||
// Bits returns the IEEE 754 binary16 representation of f, with the sign bit
|
||||
// of f and the result in the same bit position. Bits(Frombits(x)) == x.
|
||||
func (f Float16) Bits() uint16 {
|
||||
return uint16(f)
|
||||
}
|
||||
|
||||
// IsNaN reports whether f is an IEEE 754 binary16 “not-a-number” value.
|
||||
func (f Float16) IsNaN() bool {
|
||||
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0)
|
||||
}
|
||||
|
||||
// IsQuietNaN reports whether f is a quiet (non-signaling) IEEE 754 binary16
|
||||
// “not-a-number” value.
|
||||
func (f Float16) IsQuietNaN() bool {
|
||||
return (f&0x7c00 == 0x7c00) && (f&0x03ff != 0) && (f&0x0200 != 0)
|
||||
}
|
||||
|
||||
// IsInf reports whether f is an infinity (inf).
|
||||
// A sign > 0 reports whether f is positive inf.
|
||||
// A sign < 0 reports whether f is negative inf.
|
||||
// A sign == 0 reports whether f is either inf.
|
||||
func (f Float16) IsInf(sign int) bool {
|
||||
return ((f == 0x7c00) && sign >= 0) ||
|
||||
(f == 0xfc00 && sign <= 0)
|
||||
}
|
||||
|
||||
// IsFinite returns true if f is neither infinite nor NaN.
|
||||
func (f Float16) IsFinite() bool {
|
||||
return (uint16(f) & uint16(0x7c00)) != uint16(0x7c00)
|
||||
}
|
||||
|
||||
// IsNormal returns true if f is neither zero, infinite, subnormal, or NaN.
|
||||
func (f Float16) IsNormal() bool {
|
||||
exp := uint16(f) & uint16(0x7c00)
|
||||
return (exp != uint16(0x7c00)) && (exp != 0)
|
||||
}
|
||||
|
||||
// Signbit reports whether f is negative or negative zero.
|
||||
func (f Float16) Signbit() bool {
|
||||
return (uint16(f) & uint16(0x8000)) != 0
|
||||
}
|
||||
|
||||
// String satisfies the fmt.Stringer interface.
|
||||
func (f Float16) String() string {
|
||||
return strconv.FormatFloat(float64(f.Float32()), 'f', -1, 32)
|
||||
}
|
||||
|
||||
// f16bitsToF32bits returns uint32 (float32 bits) converted from specified uint16.
|
||||
func f16bitsToF32bits(in uint16) uint32 {
|
||||
// All 65536 conversions with this were confirmed to be correct
|
||||
// by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||
|
||||
sign := uint32(in&0x8000) << 16 // sign for 32-bit
|
||||
exp := uint32(in&0x7c00) >> 10 // exponenent for 16-bit
|
||||
coef := uint32(in&0x03ff) << 13 // significand for 32-bit
|
||||
|
||||
if exp == 0x1f {
|
||||
if coef == 0 {
|
||||
// infinity
|
||||
return sign | 0x7f800000 | coef
|
||||
}
|
||||
// NaN
|
||||
return sign | 0x7fc00000 | coef
|
||||
}
|
||||
|
||||
if exp == 0 {
|
||||
if coef == 0 {
|
||||
// zero
|
||||
return sign
|
||||
}
|
||||
|
||||
// normalize subnormal numbers
|
||||
exp++
|
||||
for coef&0x7f800000 == 0 {
|
||||
coef <<= 1
|
||||
exp--
|
||||
}
|
||||
coef &= 0x007fffff
|
||||
}
|
||||
|
||||
return sign | ((exp + (0x7f - 0xf)) << 23) | coef
|
||||
}
|
||||
|
||||
// f32bitsToF16bits returns uint16 (Float16 bits) converted from the specified float32.
|
||||
// Conversion rounds to nearest integer with ties to even.
|
||||
func f32bitsToF16bits(u32 uint32) uint16 {
|
||||
// Translated from Rust to Go by Montgomery Edwards⁴⁴⁸ (github.com/x448).
|
||||
// All 4294967296 conversions with this were confirmed to be correct by x448.
|
||||
// Original Rust implementation is by Kathryn Long (github.com/starkat99) with MIT license.
|
||||
|
||||
sign := u32 & 0x80000000
|
||||
exp := u32 & 0x7f800000
|
||||
coef := u32 & 0x007fffff
|
||||
|
||||
if exp == 0x7f800000 {
|
||||
// NaN or Infinity
|
||||
nanBit := uint32(0)
|
||||
if coef != 0 {
|
||||
nanBit = uint32(0x0200)
|
||||
}
|
||||
return uint16((sign >> 16) | uint32(0x7c00) | nanBit | (coef >> 13))
|
||||
}
|
||||
|
||||
halfSign := sign >> 16
|
||||
|
||||
unbiasedExp := int32(exp>>23) - 127
|
||||
halfExp := unbiasedExp + 15
|
||||
|
||||
if halfExp >= 0x1f {
|
||||
return uint16(halfSign | uint32(0x7c00))
|
||||
}
|
||||
|
||||
if halfExp <= 0 {
|
||||
if 14-halfExp > 24 {
|
||||
return uint16(halfSign)
|
||||
}
|
||||
coef := coef | uint32(0x00800000)
|
||||
halfCoef := coef >> uint32(14-halfExp)
|
||||
roundBit := uint32(1) << uint32(13-halfExp)
|
||||
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||
halfCoef++
|
||||
}
|
||||
return uint16(halfSign | halfCoef)
|
||||
}
|
||||
|
||||
uHalfExp := uint32(halfExp) << 10
|
||||
halfCoef := coef >> 13
|
||||
roundBit := uint32(0x00001000)
|
||||
if (coef&roundBit) != 0 && (coef&(3*roundBit-1)) != 0 {
|
||||
return uint16((halfSign | uHalfExp | halfCoef) + 1)
|
||||
}
|
||||
return uint16(halfSign | uHalfExp | halfCoef)
|
||||
}
|
||||
122
vendor/golang.org/x/net/http2/config.go
generated
vendored
Normal file
122
vendor/golang.org/x/net/http2/config.go
generated
vendored
Normal file
@@ -0,0 +1,122 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package http2
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// http2Config is a package-internal version of net/http.HTTP2Config.
|
||||
//
|
||||
// http.HTTP2Config was added in Go 1.24.
|
||||
// When running with a version of net/http that includes HTTP2Config,
|
||||
// we merge the configuration with the fields in Transport or Server
|
||||
// to produce an http2Config.
|
||||
//
|
||||
// Zero valued fields in http2Config are interpreted as in the
|
||||
// net/http.HTTPConfig documentation.
|
||||
//
|
||||
// Precedence order for reconciling configurations is:
|
||||
//
|
||||
// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero.
|
||||
// - Otherwise use the http2.{Server.Transport} value.
|
||||
// - If the resulting value is zero or out of range, use a default.
|
||||
type http2Config struct {
|
||||
MaxConcurrentStreams uint32
|
||||
MaxDecoderHeaderTableSize uint32
|
||||
MaxEncoderHeaderTableSize uint32
|
||||
MaxReadFrameSize uint32
|
||||
MaxUploadBufferPerConnection int32
|
||||
MaxUploadBufferPerStream int32
|
||||
SendPingTimeout time.Duration
|
||||
PingTimeout time.Duration
|
||||
WriteByteTimeout time.Duration
|
||||
PermitProhibitedCipherSuites bool
|
||||
CountError func(errType string)
|
||||
}
|
||||
|
||||
// configFromServer merges configuration settings from
|
||||
// net/http.Server.HTTP2Config and http2.Server.
|
||||
func configFromServer(h1 *http.Server, h2 *Server) http2Config {
|
||||
conf := http2Config{
|
||||
MaxConcurrentStreams: h2.MaxConcurrentStreams,
|
||||
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
|
||||
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
|
||||
MaxReadFrameSize: h2.MaxReadFrameSize,
|
||||
MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection,
|
||||
MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream,
|
||||
SendPingTimeout: h2.ReadIdleTimeout,
|
||||
PingTimeout: h2.PingTimeout,
|
||||
WriteByteTimeout: h2.WriteByteTimeout,
|
||||
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
|
||||
CountError: h2.CountError,
|
||||
}
|
||||
fillNetHTTPServerConfig(&conf, h1)
|
||||
setConfigDefaults(&conf, true)
|
||||
return conf
|
||||
}
|
||||
|
||||
// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
|
||||
// (the net/http Transport).
|
||||
func configFromTransport(h2 *Transport) http2Config {
|
||||
conf := http2Config{
|
||||
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
|
||||
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
|
||||
MaxReadFrameSize: h2.MaxReadFrameSize,
|
||||
SendPingTimeout: h2.ReadIdleTimeout,
|
||||
PingTimeout: h2.PingTimeout,
|
||||
WriteByteTimeout: h2.WriteByteTimeout,
|
||||
}
|
||||
|
||||
// Unlike most config fields, where out-of-range values revert to the default,
|
||||
// Transport.MaxReadFrameSize clips.
|
||||
if conf.MaxReadFrameSize < minMaxFrameSize {
|
||||
conf.MaxReadFrameSize = minMaxFrameSize
|
||||
} else if conf.MaxReadFrameSize > maxFrameSize {
|
||||
conf.MaxReadFrameSize = maxFrameSize
|
||||
}
|
||||
|
||||
if h2.t1 != nil {
|
||||
fillNetHTTPTransportConfig(&conf, h2.t1)
|
||||
}
|
||||
setConfigDefaults(&conf, false)
|
||||
return conf
|
||||
}
|
||||
|
||||
func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) {
|
||||
if *v < minval || *v > maxval {
|
||||
*v = defval
|
||||
}
|
||||
}
|
||||
|
||||
func setConfigDefaults(conf *http2Config, server bool) {
|
||||
setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams)
|
||||
setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
|
||||
setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize)
|
||||
if server {
|
||||
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20)
|
||||
} else {
|
||||
setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow)
|
||||
}
|
||||
if server {
|
||||
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20)
|
||||
} else {
|
||||
setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow)
|
||||
}
|
||||
setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize)
|
||||
setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second)
|
||||
}
|
||||
|
||||
// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header
|
||||
// to an HTTP/2 MAX_HEADER_LIST_SIZE value.
|
||||
func adjustHTTP1MaxHeaderSize(n int64) int64 {
|
||||
// http2's count is in a slightly different unit and includes 32 bytes per pair.
|
||||
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
|
||||
const perFieldOverhead = 32 // per http2 spec
|
||||
const typicalHeaders = 10 // conservative
|
||||
return n + typicalHeaders*perFieldOverhead
|
||||
}
|
||||
61
vendor/golang.org/x/net/http2/config_go124.go
generated
vendored
Normal file
61
vendor/golang.org/x/net/http2/config_go124.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.24
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
|
||||
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
|
||||
fillNetHTTPConfig(conf, srv.HTTP2)
|
||||
}
|
||||
|
||||
// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
|
||||
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
|
||||
fillNetHTTPConfig(conf, tr.HTTP2)
|
||||
}
|
||||
|
||||
func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
|
||||
if h2 == nil {
|
||||
return
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if h2.MaxEncoderHeaderTableSize != 0 {
|
||||
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxDecoderHeaderTableSize != 0 {
|
||||
conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if h2.MaxReadFrameSize != 0 {
|
||||
conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerConnection != 0 {
|
||||
conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerStream != 0 {
|
||||
conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
|
||||
}
|
||||
if h2.SendPingTimeout != 0 {
|
||||
conf.SendPingTimeout = h2.SendPingTimeout
|
||||
}
|
||||
if h2.PingTimeout != 0 {
|
||||
conf.PingTimeout = h2.PingTimeout
|
||||
}
|
||||
if h2.WriteByteTimeout != 0 {
|
||||
conf.WriteByteTimeout = h2.WriteByteTimeout
|
||||
}
|
||||
if h2.PermitProhibitedCipherSuites {
|
||||
conf.PermitProhibitedCipherSuites = true
|
||||
}
|
||||
if h2.CountError != nil {
|
||||
conf.CountError = h2.CountError
|
||||
}
|
||||
}
|
||||
16
vendor/golang.org/x/net/http2/config_pre_go124.go
generated
vendored
Normal file
16
vendor/golang.org/x/net/http2/config_pre_go124.go
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.24
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Pre-Go 1.24 fallback.
|
||||
// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
|
||||
|
||||
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
|
||||
|
||||
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
|
||||
53
vendor/golang.org/x/net/http2/http2.go
generated
vendored
53
vendor/golang.org/x/net/http2/http2.go
generated
vendored
@@ -19,8 +19,9 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
@@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() {
|
||||
// Its buffered writer is lazily allocated as needed, to minimize
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
_ incomparable
|
||||
w io.Writer // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
_ incomparable
|
||||
group synctestGroupInterface // immutable
|
||||
conn net.Conn // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
byteTimeout time.Duration // immutable, WriteByteTimeout
|
||||
}
|
||||
|
||||
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
return &bufferedWriter{w: w}
|
||||
func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
|
||||
return &bufferedWriter{
|
||||
group: group,
|
||||
conn: conn,
|
||||
byteTimeout: timeout,
|
||||
}
|
||||
}
|
||||
|
||||
// bufWriterPoolBufferSize is the size of bufio.Writer's
|
||||
@@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int {
|
||||
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
||||
if w.bw == nil {
|
||||
bw := bufWriterPool.Get().(*bufio.Writer)
|
||||
bw.Reset(w.w)
|
||||
bw.Reset((*bufferedWriterTimeoutWriter)(w))
|
||||
w.bw = bw
|
||||
}
|
||||
return w.bw.Write(p)
|
||||
@@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error {
|
||||
return err
|
||||
}
|
||||
|
||||
type bufferedWriterTimeoutWriter bufferedWriter
|
||||
|
||||
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
|
||||
return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
|
||||
}
|
||||
|
||||
// writeWithByteTimeout writes to conn.
|
||||
// If more than timeout passes without any bytes being written to the connection,
|
||||
// the write fails.
|
||||
func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
|
||||
if timeout <= 0 {
|
||||
return conn.Write(p)
|
||||
}
|
||||
for {
|
||||
var now time.Time
|
||||
if group == nil {
|
||||
now = time.Now()
|
||||
} else {
|
||||
now = group.Now()
|
||||
}
|
||||
conn.SetWriteDeadline(now.Add(timeout))
|
||||
nn, err := conn.Write(p[n:])
|
||||
n += nn
|
||||
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
|
||||
// Either we finished the write, made no progress, or hit the deadline.
|
||||
// Whichever it is, we're done now.
|
||||
conn.SetWriteDeadline(time.Time{})
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func mustUint31(v int32) uint32 {
|
||||
if v < 0 || v > 2147483647 {
|
||||
panic("out of range")
|
||||
|
||||
181
vendor/golang.org/x/net/http2/server.go
generated
vendored
181
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -29,6 +29,7 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -52,10 +53,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
prefaceTimeout = 10 * time.Second
|
||||
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
|
||||
handlerChunkWriteSize = 4 << 10
|
||||
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
|
||||
prefaceTimeout = 10 * time.Second
|
||||
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
|
||||
handlerChunkWriteSize = 4 << 10
|
||||
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
|
||||
|
||||
// maxQueuedControlFrames is the maximum number of control frames like
|
||||
// SETTINGS, PING and RST_STREAM that will be queued for writing before
|
||||
// the connection is closed to prevent memory exhaustion attacks.
|
||||
maxQueuedControlFrames = 10000
|
||||
)
|
||||
|
||||
@@ -127,6 +132,22 @@ type Server struct {
|
||||
// If zero or negative, there is no timeout.
|
||||
IdleTimeout time.Duration
|
||||
|
||||
// ReadIdleTimeout is the timeout after which a health check using a ping
|
||||
// frame will be carried out if no frame is received on the connection.
|
||||
// If zero, no health check is performed.
|
||||
ReadIdleTimeout time.Duration
|
||||
|
||||
// PingTimeout is the timeout after which the connection will be closed
|
||||
// if a response to a ping is not received.
|
||||
// If zero, a default of 15 seconds is used.
|
||||
PingTimeout time.Duration
|
||||
|
||||
// WriteByteTimeout is the timeout after which a connection will be
|
||||
// closed if no data can be written to it. The timeout begins when data is
|
||||
// available to write, and is extended whenever any bytes are written.
|
||||
// If zero or negative, there is no timeout.
|
||||
WriteByteTimeout time.Duration
|
||||
|
||||
// MaxUploadBufferPerConnection is the size of the initial flow
|
||||
// control window for each connections. The HTTP/2 spec does not
|
||||
// allow this to be smaller than 65535 or larger than 2^32-1.
|
||||
@@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer {
|
||||
return timeTimer{time.AfterFunc(d, f)}
|
||||
}
|
||||
|
||||
func (s *Server) initialConnRecvWindowSize() int32 {
|
||||
if s.MaxUploadBufferPerConnection >= initialWindowSize {
|
||||
return s.MaxUploadBufferPerConnection
|
||||
}
|
||||
return 1 << 20
|
||||
}
|
||||
|
||||
func (s *Server) initialStreamRecvWindowSize() int32 {
|
||||
if s.MaxUploadBufferPerStream > 0 {
|
||||
return s.MaxUploadBufferPerStream
|
||||
}
|
||||
return 1 << 20
|
||||
}
|
||||
|
||||
func (s *Server) maxReadFrameSize() uint32 {
|
||||
if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
|
||||
return v
|
||||
}
|
||||
return defaultMaxReadFrameSize
|
||||
}
|
||||
|
||||
func (s *Server) maxConcurrentStreams() uint32 {
|
||||
if v := s.MaxConcurrentStreams; v > 0 {
|
||||
return v
|
||||
}
|
||||
return defaultMaxStreams
|
||||
}
|
||||
|
||||
func (s *Server) maxDecoderHeaderTableSize() uint32 {
|
||||
if v := s.MaxDecoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (s *Server) maxEncoderHeaderTableSize() uint32 {
|
||||
if v := s.MaxEncoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
// maxQueuedControlFrames is the maximum number of control frames like
|
||||
// SETTINGS, PING and RST_STREAM that will be queued for writing before
|
||||
// the connection is closed to prevent memory exhaustion attacks.
|
||||
func (s *Server) maxQueuedControlFrames() int {
|
||||
// TODO: if anybody asks, add a Server field, and remember to define the
|
||||
// behavior of negative values.
|
||||
return maxQueuedControlFrames
|
||||
}
|
||||
|
||||
type serverInternalState struct {
|
||||
mu sync.Mutex
|
||||
activeConns map[*serverConn]struct{}
|
||||
@@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
baseCtx, cancel := serverConnBaseContext(c, opts)
|
||||
defer cancel()
|
||||
|
||||
http1srv := opts.baseConfig()
|
||||
conf := configFromServer(http1srv, s)
|
||||
sc := &serverConn{
|
||||
srv: s,
|
||||
hs: opts.baseConfig(),
|
||||
hs: http1srv,
|
||||
conn: c,
|
||||
baseCtx: baseCtx,
|
||||
remoteAddrStr: c.RemoteAddr().String(),
|
||||
bw: newBufferedWriter(c),
|
||||
bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
|
||||
handler: opts.handler(),
|
||||
streams: make(map[uint32]*stream),
|
||||
readFrameCh: make(chan readFrameResult),
|
||||
@@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
|
||||
doneServing: make(chan struct{}),
|
||||
clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value"
|
||||
advMaxStreams: s.maxConcurrentStreams(),
|
||||
advMaxStreams: conf.MaxConcurrentStreams,
|
||||
initialStreamSendWindowSize: initialWindowSize,
|
||||
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
|
||||
maxFrameSize: initialMaxFrameSize,
|
||||
pingTimeout: conf.PingTimeout,
|
||||
countErrorFunc: conf.CountError,
|
||||
serveG: newGoroutineLock(),
|
||||
pushEnabled: true,
|
||||
sawClientPreface: opts.SawClientPreface,
|
||||
@@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
sc.flow.add(initialWindowSize)
|
||||
sc.inflow.init(initialWindowSize)
|
||||
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
||||
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize())
|
||||
sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
|
||||
|
||||
fr := NewFramer(sc.bw, c)
|
||||
if s.CountError != nil {
|
||||
fr.countError = s.CountError
|
||||
if conf.CountError != nil {
|
||||
fr.countError = conf.CountError
|
||||
}
|
||||
fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil)
|
||||
fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil)
|
||||
fr.MaxHeaderListSize = sc.maxHeaderListSize()
|
||||
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
|
||||
fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
|
||||
sc.framer = fr
|
||||
|
||||
if tc, ok := c.(connectionStater); ok {
|
||||
@@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
// So for now, do nothing here again.
|
||||
}
|
||||
|
||||
if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
|
||||
if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
|
||||
// "Endpoints MAY choose to generate a connection error
|
||||
// (Section 5.4.1) of type INADEQUATE_SECURITY if one of
|
||||
// the prohibited cipher suites are negotiated."
|
||||
@@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
opts.UpgradeRequest = nil
|
||||
}
|
||||
|
||||
sc.serve()
|
||||
sc.serve(conf)
|
||||
}
|
||||
|
||||
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) {
|
||||
@@ -609,6 +584,7 @@ type serverConn struct {
|
||||
tlsState *tls.ConnectionState // shared by all handlers, like net/http
|
||||
remoteAddrStr string
|
||||
writeSched WriteScheduler
|
||||
countErrorFunc func(errType string)
|
||||
|
||||
// Everything following is owned by the serve loop; use serveG.check():
|
||||
serveG goroutineLock // used to verify funcs are on serve()
|
||||
@@ -628,6 +604,7 @@ type serverConn struct {
|
||||
streams map[uint32]*stream
|
||||
unstartedHandlers []unstartedHandler
|
||||
initialStreamSendWindowSize int32
|
||||
initialStreamRecvWindowSize int32
|
||||
maxFrameSize int32
|
||||
peerMaxHeaderListSize uint32 // zero means unknown (default)
|
||||
canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
|
||||
@@ -638,9 +615,14 @@ type serverConn struct {
|
||||
inGoAway bool // we've started to or sent GOAWAY
|
||||
inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop
|
||||
needToSendGoAway bool // we need to schedule a GOAWAY frame write
|
||||
pingSent bool
|
||||
sentPingData [8]byte
|
||||
goAwayCode ErrCode
|
||||
shutdownTimer timer // nil until used
|
||||
idleTimer timer // nil if unused
|
||||
readIdleTimeout time.Duration
|
||||
pingTimeout time.Duration
|
||||
readIdleTimer timer // nil if unused
|
||||
|
||||
// Owned by the writeFrameAsync goroutine:
|
||||
headerWriteBuf bytes.Buffer
|
||||
@@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 {
|
||||
if n <= 0 {
|
||||
n = http.DefaultMaxHeaderBytes
|
||||
}
|
||||
// http2's count is in a slightly different unit and includes 32 bytes per pair.
|
||||
// So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
|
||||
const perFieldOverhead = 32 // per http2 spec
|
||||
const typicalHeaders = 10 // conservative
|
||||
return uint32(n + typicalHeaders*perFieldOverhead)
|
||||
return uint32(adjustHTTP1MaxHeaderSize(int64(n)))
|
||||
}
|
||||
|
||||
func (sc *serverConn) curOpenStreams() uint32 {
|
||||
@@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *serverConn) serve() {
|
||||
func (sc *serverConn) serve(conf http2Config) {
|
||||
sc.serveG.check()
|
||||
defer sc.notePanic()
|
||||
defer sc.conn.Close()
|
||||
@@ -937,18 +915,18 @@ func (sc *serverConn) serve() {
|
||||
|
||||
sc.writeFrame(FrameWriteRequest{
|
||||
write: writeSettings{
|
||||
{SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
|
||||
{SettingMaxFrameSize, conf.MaxReadFrameSize},
|
||||
{SettingMaxConcurrentStreams, sc.advMaxStreams},
|
||||
{SettingMaxHeaderListSize, sc.maxHeaderListSize()},
|
||||
{SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()},
|
||||
{SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())},
|
||||
{SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
|
||||
{SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
|
||||
},
|
||||
})
|
||||
sc.unackedSettings++
|
||||
|
||||
// Each connection starts with initialWindowSize inflow tokens.
|
||||
// If a higher value is configured, we add more tokens.
|
||||
if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 {
|
||||
if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 {
|
||||
sc.sendWindowUpdate(nil, int(diff))
|
||||
}
|
||||
|
||||
@@ -968,11 +946,18 @@ func (sc *serverConn) serve() {
|
||||
defer sc.idleTimer.Stop()
|
||||
}
|
||||
|
||||
if conf.SendPingTimeout > 0 {
|
||||
sc.readIdleTimeout = conf.SendPingTimeout
|
||||
sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
|
||||
defer sc.readIdleTimer.Stop()
|
||||
}
|
||||
|
||||
go sc.readFrames() // closed by defer sc.conn.Close above
|
||||
|
||||
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
|
||||
defer settingsTimer.Stop()
|
||||
|
||||
lastFrameTime := sc.srv.now()
|
||||
loopNum := 0
|
||||
for {
|
||||
loopNum++
|
||||
@@ -986,6 +971,7 @@ func (sc *serverConn) serve() {
|
||||
case res := <-sc.wroteFrameCh:
|
||||
sc.wroteFrame(res)
|
||||
case res := <-sc.readFrameCh:
|
||||
lastFrameTime = sc.srv.now()
|
||||
// Process any written frames before reading new frames from the client since a
|
||||
// written frame could have triggered a new stream to be started.
|
||||
if sc.writingFrameAsync {
|
||||
@@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() {
|
||||
case idleTimerMsg:
|
||||
sc.vlogf("connection is idle")
|
||||
sc.goAway(ErrCodeNo)
|
||||
case readIdleTimerMsg:
|
||||
sc.handlePingTimer(lastFrameTime)
|
||||
case shutdownTimerMsg:
|
||||
sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
|
||||
return
|
||||
@@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() {
|
||||
// If the peer is causing us to generate a lot of control frames,
|
||||
// but not reading them from us, assume they are trying to make us
|
||||
// run out of memory.
|
||||
if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
|
||||
if sc.queuedControlFrames > maxQueuedControlFrames {
|
||||
sc.vlogf("http2: too many control frames in send queue, closing connection")
|
||||
return
|
||||
}
|
||||
@@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
|
||||
if sc.pingSent {
|
||||
sc.vlogf("timeout waiting for PING response")
|
||||
sc.conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
|
||||
now := sc.srv.now()
|
||||
if pingAt.After(now) {
|
||||
// We received frames since arming the ping timer.
|
||||
// Reset it for the next possible timeout.
|
||||
sc.readIdleTimer.Reset(pingAt.Sub(now))
|
||||
return
|
||||
}
|
||||
|
||||
sc.pingSent = true
|
||||
// Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does
|
||||
// is we send a PING frame containing 0s.
|
||||
_, _ = rand.Read(sc.sentPingData[:])
|
||||
sc.writeFrame(FrameWriteRequest{
|
||||
write: &writePing{data: sc.sentPingData},
|
||||
})
|
||||
sc.readIdleTimer.Reset(sc.pingTimeout)
|
||||
}
|
||||
|
||||
type serverMessage int
|
||||
|
||||
// Message values sent to serveMsgCh.
|
||||
var (
|
||||
settingsTimerMsg = new(serverMessage)
|
||||
idleTimerMsg = new(serverMessage)
|
||||
readIdleTimerMsg = new(serverMessage)
|
||||
shutdownTimerMsg = new(serverMessage)
|
||||
gracefulShutdownMsg = new(serverMessage)
|
||||
handlerDoneMsg = new(serverMessage)
|
||||
@@ -1068,6 +1083,7 @@ var (
|
||||
|
||||
func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) }
|
||||
func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) }
|
||||
func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) }
|
||||
func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) }
|
||||
|
||||
func (sc *serverConn) sendServeMsg(msg interface{}) {
|
||||
@@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
|
||||
sc.writingFrame = false
|
||||
sc.writingFrameAsync = false
|
||||
|
||||
if res.err != nil {
|
||||
sc.conn.Close()
|
||||
}
|
||||
|
||||
wr := res.wr
|
||||
|
||||
if writeEndsStream(wr.write) {
|
||||
@@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error {
|
||||
func (sc *serverConn) processPing(f *PingFrame) error {
|
||||
sc.serveG.check()
|
||||
if f.IsAck() {
|
||||
if sc.pingSent && sc.sentPingData == f.Data {
|
||||
// This is a response to a PING we sent.
|
||||
sc.pingSent = false
|
||||
sc.readIdleTimer.Reset(sc.readIdleTimeout)
|
||||
}
|
||||
// 6.7 PING: " An endpoint MUST NOT respond to PING frames
|
||||
// containing this flag."
|
||||
return nil
|
||||
@@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||
st.cw.Init()
|
||||
st.flow.conn = &sc.flow // link to conn-level counter
|
||||
st.flow.add(sc.initialStreamSendWindowSize)
|
||||
st.inflow.init(sc.srv.initialStreamRecvWindowSize())
|
||||
st.inflow.init(sc.initialStreamRecvWindowSize)
|
||||
if sc.hs.WriteTimeout > 0 {
|
||||
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
}
|
||||
@@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error {
|
||||
if sc == nil || sc.srv == nil {
|
||||
return err
|
||||
}
|
||||
f := sc.srv.CountError
|
||||
f := sc.countErrorFunc
|
||||
if f == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
141
vendor/golang.org/x/net/http2/transport.go
generated
vendored
141
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -25,7 +25,6 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co
|
||||
}
|
||||
|
||||
func (t *Transport) maxHeaderListSize() uint32 {
|
||||
if t.MaxHeaderListSize == 0 {
|
||||
n := int64(t.MaxHeaderListSize)
|
||||
if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 {
|
||||
n = t.t1.MaxResponseHeaderBytes
|
||||
if n > 0 {
|
||||
n = adjustHTTP1MaxHeaderSize(n)
|
||||
}
|
||||
}
|
||||
if n <= 0 {
|
||||
return 10 << 20
|
||||
}
|
||||
if t.MaxHeaderListSize == 0xffffffff {
|
||||
if n >= 0xffffffff {
|
||||
return 0
|
||||
}
|
||||
return t.MaxHeaderListSize
|
||||
}
|
||||
|
||||
func (t *Transport) maxFrameReadSize() uint32 {
|
||||
if t.MaxReadFrameSize == 0 {
|
||||
return 0 // use the default provided by the peer
|
||||
}
|
||||
if t.MaxReadFrameSize < minMaxFrameSize {
|
||||
return minMaxFrameSize
|
||||
}
|
||||
if t.MaxReadFrameSize > maxFrameSize {
|
||||
return maxFrameSize
|
||||
}
|
||||
return t.MaxReadFrameSize
|
||||
return uint32(n)
|
||||
}
|
||||
|
||||
func (t *Transport) disableCompression() bool {
|
||||
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
|
||||
}
|
||||
|
||||
func (t *Transport) pingTimeout() time.Duration {
|
||||
if t.PingTimeout == 0 {
|
||||
return 15 * time.Second
|
||||
}
|
||||
return t.PingTimeout
|
||||
|
||||
}
|
||||
|
||||
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
|
||||
// It returns an error if t1 has already been HTTP/2-enabled.
|
||||
//
|
||||
@@ -370,11 +355,14 @@ type ClientConn struct {
|
||||
lastActive time.Time
|
||||
lastIdle time.Time // time last idle
|
||||
// Settings from peer: (also guarded by wmu)
|
||||
maxFrameSize uint32
|
||||
maxConcurrentStreams uint32
|
||||
peerMaxHeaderListSize uint64
|
||||
peerMaxHeaderTableSize uint32
|
||||
initialWindowSize uint32
|
||||
maxFrameSize uint32
|
||||
maxConcurrentStreams uint32
|
||||
peerMaxHeaderListSize uint64
|
||||
peerMaxHeaderTableSize uint32
|
||||
initialWindowSize uint32
|
||||
initialStreamRecvWindowSize int32
|
||||
readIdleTimeout time.Duration
|
||||
pingTimeout time.Duration
|
||||
|
||||
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
|
||||
// Write to reqHeaderMu to lock it, read from it to unlock.
|
||||
@@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() {
|
||||
}
|
||||
|
||||
type stickyErrWriter struct {
|
||||
group synctestGroupInterface
|
||||
conn net.Conn
|
||||
timeout time.Duration
|
||||
err *error
|
||||
@@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
|
||||
if *sew.err != nil {
|
||||
return 0, *sew.err
|
||||
}
|
||||
for {
|
||||
if sew.timeout != 0 {
|
||||
sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout))
|
||||
}
|
||||
nn, err := sew.conn.Write(p[n:])
|
||||
n += nn
|
||||
if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) {
|
||||
// Keep extending the deadline so long as we're making progress.
|
||||
continue
|
||||
}
|
||||
if sew.timeout != 0 {
|
||||
sew.conn.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
*sew.err = err
|
||||
return n, err
|
||||
}
|
||||
n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
|
||||
*sew.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
// noCachedConnError is the concrete type of ErrNoCachedConn, which
|
||||
@@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration {
|
||||
return t.t1.ExpectContinueTimeout
|
||||
}
|
||||
|
||||
func (t *Transport) maxDecoderHeaderTableSize() uint32 {
|
||||
if v := t.MaxDecoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (t *Transport) maxEncoderHeaderTableSize() uint32 {
|
||||
if v := t.MaxEncoderHeaderTableSize; v > 0 {
|
||||
return v
|
||||
}
|
||||
return initialHeaderTableSize
|
||||
}
|
||||
|
||||
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
|
||||
return t.newClientConn(c, t.disableKeepAlives())
|
||||
}
|
||||
|
||||
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
|
||||
conf := configFromTransport(t)
|
||||
cc := &ClientConn{
|
||||
t: t,
|
||||
tconn: c,
|
||||
readerDone: make(chan struct{}),
|
||||
nextStreamID: 1,
|
||||
maxFrameSize: 16 << 10, // spec default
|
||||
initialWindowSize: 65535, // spec default
|
||||
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
|
||||
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
|
||||
streams: make(map[uint32]*clientStream),
|
||||
singleUse: singleUse,
|
||||
wantSettingsAck: true,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
reqHeaderMu: make(chan struct{}, 1),
|
||||
t: t,
|
||||
tconn: c,
|
||||
readerDone: make(chan struct{}),
|
||||
nextStreamID: 1,
|
||||
maxFrameSize: 16 << 10, // spec default
|
||||
initialWindowSize: 65535, // spec default
|
||||
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
|
||||
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
|
||||
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
|
||||
streams: make(map[uint32]*clientStream),
|
||||
singleUse: singleUse,
|
||||
wantSettingsAck: true,
|
||||
readIdleTimeout: conf.SendPingTimeout,
|
||||
pingTimeout: conf.PingTimeout,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
reqHeaderMu: make(chan struct{}, 1),
|
||||
}
|
||||
var group synctestGroupInterface
|
||||
if t.transportTestHooks != nil {
|
||||
t.markNewGoroutine()
|
||||
t.transportTestHooks.newclientconn(cc)
|
||||
c = cc.tconn
|
||||
group = t.group
|
||||
}
|
||||
if VerboseLogs {
|
||||
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
||||
@@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
// TODO: adjust this writer size to account for frame size +
|
||||
// MTU + crypto/tls record padding.
|
||||
cc.bw = bufio.NewWriter(stickyErrWriter{
|
||||
group: group,
|
||||
conn: c,
|
||||
timeout: t.WriteByteTimeout,
|
||||
timeout: conf.WriteByteTimeout,
|
||||
err: &cc.werr,
|
||||
})
|
||||
cc.br = bufio.NewReader(c)
|
||||
cc.fr = NewFramer(cc.bw, cc.br)
|
||||
if t.maxFrameReadSize() != 0 {
|
||||
cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize())
|
||||
}
|
||||
cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize)
|
||||
if t.CountError != nil {
|
||||
cc.fr.countError = t.CountError
|
||||
}
|
||||
maxHeaderTableSize := t.maxDecoderHeaderTableSize()
|
||||
maxHeaderTableSize := conf.MaxDecoderHeaderTableSize
|
||||
cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil)
|
||||
cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
|
||||
|
||||
cc.henc = hpack.NewEncoder(&cc.hbuf)
|
||||
cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
|
||||
cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize)
|
||||
cc.peerMaxHeaderTableSize = initialHeaderTableSize
|
||||
|
||||
if cs, ok := c.(connectionStater); ok {
|
||||
@@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
|
||||
initialSettings := []Setting{
|
||||
{ID: SettingEnablePush, Val: 0},
|
||||
{ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
|
||||
}
|
||||
if max := t.maxFrameReadSize(); max != 0 {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max})
|
||||
{ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)},
|
||||
}
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize})
|
||||
if max := t.maxHeaderListSize(); max != 0 {
|
||||
initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
|
||||
}
|
||||
@@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
|
||||
cc.bw.Write(clientPreface)
|
||||
cc.fr.WriteSettings(initialSettings...)
|
||||
cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
|
||||
cc.inflow.init(transportDefaultConnFlow + initialWindowSize)
|
||||
cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection))
|
||||
cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize)
|
||||
cc.bw.Flush()
|
||||
if cc.werr != nil {
|
||||
cc.Close()
|
||||
@@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
}
|
||||
|
||||
func (cc *ClientConn) healthCheck() {
|
||||
pingTimeout := cc.t.pingTimeout()
|
||||
pingTimeout := cc.pingTimeout
|
||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||
// trigger the healthCheck again if there is no frame received.
|
||||
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
|
||||
@@ -2199,7 +2164,7 @@ type resAndError struct {
|
||||
func (cc *ClientConn) addStreamLocked(cs *clientStream) {
|
||||
cs.flow.add(int32(cc.initialWindowSize))
|
||||
cs.flow.setConnFlow(&cc.flow)
|
||||
cs.inflow.init(transportDefaultStreamFlow)
|
||||
cs.inflow.init(cc.initialStreamRecvWindowSize)
|
||||
cs.ID = cc.nextStreamID
|
||||
cc.nextStreamID += 2
|
||||
cc.streams[cs.ID] = cs
|
||||
@@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) {
|
||||
func (rl *clientConnReadLoop) run() error {
|
||||
cc := rl.cc
|
||||
gotSettings := false
|
||||
readIdleTimeout := cc.t.ReadIdleTimeout
|
||||
readIdleTimeout := cc.readIdleTimeout
|
||||
var t timer
|
||||
if readIdleTimeout != 0 {
|
||||
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
|
||||
|
||||
10
vendor/golang.org/x/net/http2/write.go
generated
vendored
10
vendor/golang.org/x/net/http2/write.go
generated
vendored
@@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error {
|
||||
|
||||
func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max }
|
||||
|
||||
type writePing struct {
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (w writePing) writeFrame(ctx writeContext) error {
|
||||
return ctx.Framer().WritePing(false, w.data)
|
||||
}
|
||||
|
||||
func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max }
|
||||
|
||||
type writePingAck struct{ pf *PingFrame }
|
||||
|
||||
func (w writePingAck) writeFrame(ctx writeContext) error {
|
||||
|
||||
17
vendor/golang.org/x/time/rate/rate.go
generated
vendored
17
vendor/golang.org/x/time/rate/rate.go
generated
vendored
@@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 {
|
||||
// bursts of at most b tokens.
|
||||
func NewLimiter(r Limit, b int) *Limiter {
|
||||
return &Limiter{
|
||||
limit: r,
|
||||
burst: b,
|
||||
limit: r,
|
||||
burst: b,
|
||||
tokens: float64(b),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
|
||||
tokens: n,
|
||||
timeToAct: t,
|
||||
}
|
||||
} else if lim.limit == 0 {
|
||||
var ok bool
|
||||
if lim.burst >= n {
|
||||
ok = true
|
||||
lim.burst -= n
|
||||
}
|
||||
return Reservation{
|
||||
ok: ok,
|
||||
lim: lim,
|
||||
tokens: lim.burst,
|
||||
timeToAct: t,
|
||||
}
|
||||
}
|
||||
|
||||
t, tokens := lim.advance(t)
|
||||
|
||||
6
vendor/gopkg.in/evanphx/json-patch.v4/.gitignore
generated
vendored
Normal file
6
vendor/gopkg.in/evanphx/json-patch.v4/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
# editor and IDE paraphernalia
|
||||
.idea
|
||||
.vscode
|
||||
|
||||
# macOS paraphernalia
|
||||
.DS_Store
|
||||
25
vendor/gopkg.in/evanphx/json-patch.v4/LICENSE
generated
vendored
Normal file
25
vendor/gopkg.in/evanphx/json-patch.v4/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
Copyright (c) 2014, Evan Phoenix
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the Evan Phoenix nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
317
vendor/gopkg.in/evanphx/json-patch.v4/README.md
generated
vendored
Normal file
317
vendor/gopkg.in/evanphx/json-patch.v4/README.md
generated
vendored
Normal file
@@ -0,0 +1,317 @@
|
||||
# JSON-Patch
|
||||
`jsonpatch` is a library which provides functionality for both applying
|
||||
[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
|
||||
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
|
||||
|
||||
[](http://godoc.org/github.com/evanphx/json-patch)
|
||||
[](https://travis-ci.org/evanphx/json-patch)
|
||||
[](https://goreportcard.com/report/github.com/evanphx/json-patch)
|
||||
|
||||
# Get It!
|
||||
|
||||
**Latest and greatest**:
|
||||
```bash
|
||||
go get -u github.com/evanphx/json-patch/v5
|
||||
```
|
||||
|
||||
**Stable Versions**:
|
||||
* Version 5: `go get -u gopkg.in/evanphx/json-patch.v5`
|
||||
* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
|
||||
|
||||
(previous versions below `v3` are unavailable)
|
||||
|
||||
# Use It!
|
||||
* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
|
||||
* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
|
||||
* [Comparing JSON documents](#comparing-json-documents)
|
||||
* [Combine merge patches](#combine-merge-patches)
|
||||
|
||||
|
||||
# Configuration
|
||||
|
||||
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
|
||||
This defaults to `true` and enables the non-standard practice of allowing
|
||||
negative indices to mean indices starting at the end of an array. This
|
||||
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
|
||||
false`.
|
||||
|
||||
* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
|
||||
which limits the total size increase in bytes caused by "copy" operations in a
|
||||
patch. It defaults to 0, which means there is no limit.
|
||||
|
||||
These global variables control the behavior of `jsonpatch.Apply`.
|
||||
|
||||
An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior
|
||||
is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`.
|
||||
|
||||
Structure `jsonpatch.ApplyOptions` includes the configuration options above
|
||||
and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`.
|
||||
|
||||
When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore
|
||||
`remove` operations whose `path` points to a non-existent location in the JSON document.
|
||||
`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions`
|
||||
returning an error when hitting a missing `path` on `remove`.
|
||||
|
||||
When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure
|
||||
that `add` operations produce all the `path` elements that are missing from the target object.
|
||||
|
||||
Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions`
|
||||
whose values are populated from the global configuration variables.
|
||||
|
||||
## Create and apply a merge patch
|
||||
Given both an original JSON document and a modified JSON document, you can create
|
||||
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
|
||||
|
||||
It can describe the changes needed to convert from the original to the
|
||||
modified JSON document.
|
||||
|
||||
Once you have a merge patch, you can apply it to other JSON documents using the
|
||||
`jsonpatch.MergePatch(document, patch)` function.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Let's create a merge patch from these two documents...
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
target := []byte(`{"name": "Jane", "age": 24}`)
|
||||
|
||||
patch, err := jsonpatch.CreateMergePatch(original, target)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Now lets apply the patch against a different JSON document...
|
||||
|
||||
alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
|
||||
modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
|
||||
|
||||
fmt.Printf("patch document: %s\n", patch)
|
||||
fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
patch document: {"height":null,"name":"Jane"}
|
||||
updated alternative doc: {"age":28,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Create and apply a JSON Patch
|
||||
You can create patch objects using `DecodePatch([]byte)`, which can then
|
||||
be applied against JSON documents.
|
||||
|
||||
The following is an example of creating a patch from two operations, and
|
||||
applying it against a JSON document.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
patchJSON := []byte(`[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]`)
|
||||
|
||||
patch, err := jsonpatch.DecodePatch(patchJSON)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
modified, err := patch.Apply(original)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Original document: %s\n", original)
|
||||
fmt.Printf("Modified document: %s\n", modified)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
Original document: {"name": "John", "age": 24, "height": 3.21}
|
||||
Modified document: {"age":24,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Comparing JSON documents
|
||||
Due to potential whitespace and ordering differences, one cannot simply compare
|
||||
JSON strings or byte-arrays directly.
|
||||
|
||||
As such, you can instead use `jsonpatch.Equal(document1, document2)` to
|
||||
determine if two JSON documents are _structurally_ equal. This ignores
|
||||
whitespace differences, and key-value ordering.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
similar := []byte(`
|
||||
{
|
||||
"age": 24,
|
||||
"height": 3.21,
|
||||
"name": "John"
|
||||
}
|
||||
`)
|
||||
different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
|
||||
|
||||
if jsonpatch.Equal(original, similar) {
|
||||
fmt.Println(`"original" is structurally equal to "similar"`)
|
||||
}
|
||||
|
||||
if !jsonpatch.Equal(original, different) {
|
||||
fmt.Println(`"original" is _not_ structurally equal to "different"`)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
"original" is structurally equal to "similar"
|
||||
"original" is _not_ structurally equal to "different"
|
||||
```
|
||||
|
||||
## Combine merge patches
|
||||
Given two JSON merge patch documents, it is possible to combine them into a
|
||||
single merge patch which can describe both set of changes.
|
||||
|
||||
The resulting merge patch can be used such that applying it results in a
|
||||
document structurally similar as merging each merge patch to the document
|
||||
in succession.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
|
||||
nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
|
||||
ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
|
||||
|
||||
// Let's combine these merge patch documents...
|
||||
combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply each patch individual against the original document
|
||||
withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply the combined patch against the original document
|
||||
|
||||
withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Do both result in the same thing? They should!
|
||||
if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
|
||||
fmt.Println("Both JSON documents are structurally the same!")
|
||||
}
|
||||
|
||||
fmt.Printf("combined merge patch: %s", combinedPatch)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
Both JSON documents are structurally the same!
|
||||
combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
|
||||
```
|
||||
|
||||
# CLI for comparing JSON documents
|
||||
You can install the commandline program `json-patch`.
|
||||
|
||||
This program can take multiple JSON patch documents as arguments,
|
||||
and fed a JSON document from `stdin`. It will apply the patch(es) against
|
||||
the document and output the modified doc.
|
||||
|
||||
**patch.1.json**
|
||||
```json
|
||||
[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]
|
||||
```
|
||||
|
||||
**patch.2.json**
|
||||
```json
|
||||
[
|
||||
{"op": "add", "path": "/address", "value": "123 Main St"},
|
||||
{"op": "replace", "path": "/age", "value": "21"}
|
||||
]
|
||||
```
|
||||
|
||||
**document.json**
|
||||
```json
|
||||
{
|
||||
"name": "John",
|
||||
"age": 24,
|
||||
"height": 3.21
|
||||
}
|
||||
```
|
||||
|
||||
You can then run:
|
||||
|
||||
```bash
|
||||
$ go install github.com/evanphx/json-patch/cmd/json-patch
|
||||
$ cat document.json | json-patch -p patch.1.json -p patch.2.json
|
||||
{"address":"123 Main St","age":"21","name":"Jane"}
|
||||
```
|
||||
|
||||
# Help It!
|
||||
Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
|
||||
or [create a PR](https://github.com/evanphx/json-patch/compare).
|
||||
|
||||
|
||||
Before creating a pull request, we'd ask that you make sure tests are passing
|
||||
and that you have added new tests when applicable.
|
||||
|
||||
Contributors can run tests using:
|
||||
|
||||
```bash
|
||||
go test -cover ./...
|
||||
```
|
||||
|
||||
Builds for pull requests are tested automatically
|
||||
using [TravisCI](https://travis-ci.org/evanphx/json-patch).
|
||||
38
vendor/gopkg.in/evanphx/json-patch.v4/errors.go
generated
vendored
Normal file
38
vendor/gopkg.in/evanphx/json-patch.v4/errors.go
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
package jsonpatch
|
||||
|
||||
import "fmt"
|
||||
|
||||
// AccumulatedCopySizeError is an error type returned when the accumulated size
|
||||
// increase caused by copy operations in a patch operation has exceeded the
|
||||
// limit.
|
||||
type AccumulatedCopySizeError struct {
|
||||
limit int64
|
||||
accumulated int64
|
||||
}
|
||||
|
||||
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
|
||||
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
|
||||
return &AccumulatedCopySizeError{limit: l, accumulated: a}
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (a *AccumulatedCopySizeError) Error() string {
|
||||
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
|
||||
}
|
||||
|
||||
// ArraySizeError is an error type returned when the array size has exceeded
|
||||
// the limit.
|
||||
type ArraySizeError struct {
|
||||
limit int
|
||||
size int
|
||||
}
|
||||
|
||||
// NewArraySizeError returns an ArraySizeError.
|
||||
func NewArraySizeError(l, s int) *ArraySizeError {
|
||||
return &ArraySizeError{limit: l, size: s}
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (a *ArraySizeError) Error() string {
|
||||
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
|
||||
}
|
||||
389
vendor/gopkg.in/evanphx/json-patch.v4/merge.go
generated
vendored
Normal file
389
vendor/gopkg.in/evanphx/json-patch.v4/merge.go
generated
vendored
Normal file
@@ -0,0 +1,389 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
|
||||
curDoc, err := cur.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
pruneNulls(patch)
|
||||
return patch
|
||||
}
|
||||
|
||||
patchDoc, err := patch.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return patch
|
||||
}
|
||||
|
||||
mergeDocs(curDoc, patchDoc, mergeMerge)
|
||||
|
||||
return cur
|
||||
}
|
||||
|
||||
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
|
||||
for k, v := range *patch {
|
||||
if v == nil {
|
||||
if mergeMerge {
|
||||
(*doc)[k] = nil
|
||||
} else {
|
||||
delete(*doc, k)
|
||||
}
|
||||
} else {
|
||||
cur, ok := (*doc)[k]
|
||||
|
||||
if !ok || cur == nil {
|
||||
if !mergeMerge {
|
||||
pruneNulls(v)
|
||||
}
|
||||
|
||||
(*doc)[k] = v
|
||||
} else {
|
||||
(*doc)[k] = merge(cur, v, mergeMerge)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneNulls(n *lazyNode) {
|
||||
sub, err := n.intoDoc()
|
||||
|
||||
if err == nil {
|
||||
pruneDocNulls(sub)
|
||||
} else {
|
||||
ary, err := n.intoAry()
|
||||
|
||||
if err == nil {
|
||||
pruneAryNulls(ary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneDocNulls(doc *partialDoc) *partialDoc {
|
||||
for k, v := range *doc {
|
||||
if v == nil {
|
||||
delete(*doc, k)
|
||||
} else {
|
||||
pruneNulls(v)
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
||||
newAry := []*lazyNode{}
|
||||
|
||||
for _, v := range *ary {
|
||||
if v != nil {
|
||||
pruneNulls(v)
|
||||
}
|
||||
newAry = append(newAry, v)
|
||||
}
|
||||
|
||||
*ary = newAry
|
||||
|
||||
return ary
|
||||
}
|
||||
|
||||
var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document")
|
||||
var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
|
||||
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
|
||||
|
||||
// MergeMergePatches merges two merge patches together, such that
|
||||
// applying this resulting merged merge patch to a document yields the same
|
||||
// as merging each merge patch to the document in succession.
|
||||
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
|
||||
return doMergePatch(patch1Data, patch2Data, true)
|
||||
}
|
||||
|
||||
// MergePatch merges the patchData into the docData.
|
||||
func MergePatch(docData, patchData []byte) ([]byte, error) {
|
||||
return doMergePatch(docData, patchData, false)
|
||||
}
|
||||
|
||||
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
doc := &partialDoc{}
|
||||
|
||||
docErr := json.Unmarshal(docData, doc)
|
||||
|
||||
patch := &partialDoc{}
|
||||
|
||||
patchErr := json.Unmarshal(patchData, patch)
|
||||
|
||||
if _, ok := docErr.(*json.SyntaxError); ok {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
if _, ok := patchErr.(*json.SyntaxError); ok {
|
||||
return nil, ErrBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr == nil && *doc == nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
if patchErr == nil && *patch == nil {
|
||||
return nil, ErrBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr != nil || patchErr != nil {
|
||||
// Not an error, just not a doc, so we turn straight into the patch
|
||||
if patchErr == nil {
|
||||
if mergeMerge {
|
||||
doc = patch
|
||||
} else {
|
||||
doc = pruneDocNulls(patch)
|
||||
}
|
||||
} else {
|
||||
patchAry := &partialArray{}
|
||||
patchErr = json.Unmarshal(patchData, patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, ErrBadJSONPatch
|
||||
}
|
||||
|
||||
pruneAryNulls(patchAry)
|
||||
|
||||
out, patchErr := json.Marshal(patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, ErrBadJSONPatch
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
} else {
|
||||
mergeDocs(doc, patch, mergeMerge)
|
||||
}
|
||||
|
||||
return json.Marshal(doc)
|
||||
}
|
||||
|
||||
// resemblesJSONArray indicates whether the byte-slice "appears" to be
|
||||
// a JSON array or not.
|
||||
// False-positives are possible, as this function does not check the internal
|
||||
// structure of the array. It only checks that the outer syntax is present and
|
||||
// correct.
|
||||
func resemblesJSONArray(input []byte) bool {
|
||||
input = bytes.TrimSpace(input)
|
||||
|
||||
hasPrefix := bytes.HasPrefix(input, []byte("["))
|
||||
hasSuffix := bytes.HasSuffix(input, []byte("]"))
|
||||
|
||||
return hasPrefix && hasSuffix
|
||||
}
|
||||
|
||||
// CreateMergePatch will return a merge patch document capable of converting
|
||||
// the original document(s) to the modified document(s).
|
||||
// The parameters can be bytes of either two JSON Documents, or two arrays of
|
||||
// JSON documents.
|
||||
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
|
||||
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalResemblesArray := resemblesJSONArray(originalJSON)
|
||||
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
|
||||
|
||||
// Do both byte-slices seem like JSON arrays?
|
||||
if originalResemblesArray && modifiedResemblesArray {
|
||||
return createArrayMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// Are both byte-slices are not arrays? Then they are likely JSON objects...
|
||||
if !originalResemblesArray && !modifiedResemblesArray {
|
||||
return createObjectMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// None of the above? Then return an error because of mismatched types.
|
||||
return nil, errBadMergeTypes
|
||||
}
|
||||
|
||||
// createObjectMergePatch will return a merge-patch document capable of
|
||||
// converting the original document to the modified document.
|
||||
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDoc := map[string]interface{}{}
|
||||
modifiedDoc := map[string]interface{}{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDoc)
|
||||
if err != nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
dest, err := getDiff(originalDoc, modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return json.Marshal(dest)
|
||||
}
|
||||
|
||||
// createArrayMergePatch will return an array of merge-patch documents capable
|
||||
// of converting the original document to the modified document for each
|
||||
// pair of JSON documents provided in the arrays.
|
||||
// Arrays of mismatched sizes will result in an error.
|
||||
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDocs := []json.RawMessage{}
|
||||
modifiedDocs := []json.RawMessage{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDocs)
|
||||
if err != nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
|
||||
if err != nil {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
total := len(originalDocs)
|
||||
if len(modifiedDocs) != total {
|
||||
return nil, ErrBadJSONDoc
|
||||
}
|
||||
|
||||
result := []json.RawMessage{}
|
||||
for i := 0; i < len(originalDocs); i++ {
|
||||
original := originalDocs[i]
|
||||
modified := modifiedDocs[i]
|
||||
|
||||
patch, err := createObjectMergePatch(original, modified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, json.RawMessage(patch))
|
||||
}
|
||||
|
||||
return json.Marshal(result)
|
||||
}
|
||||
|
||||
// Returns true if the array matches (must be json types).
|
||||
// As is idiomatic for go, an empty array is not the same as a nil array.
|
||||
func matchesArray(a, b []interface{}) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
if (a == nil && b != nil) || (a != nil && b == nil) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !matchesValue(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the values matches (must be json types)
|
||||
// The types of the values must match, otherwise it will always return false
|
||||
// If two map[string]interface{} are given, all elements must match.
|
||||
func matchesValue(av, bv interface{}) bool {
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
return false
|
||||
}
|
||||
switch at := av.(type) {
|
||||
case string:
|
||||
bt := bv.(string)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
bt := bv.(float64)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case bool:
|
||||
bt := bv.(bool)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
return true
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
if len(bt) != len(at) {
|
||||
return false
|
||||
}
|
||||
for key := range bt {
|
||||
av, aOK := at[key]
|
||||
bv, bOK := bt[key]
|
||||
if aOK != bOK {
|
||||
return false
|
||||
}
|
||||
if !matchesValue(av, bv) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
return matchesArray(at, bt)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
|
||||
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
||||
into := map[string]interface{}{}
|
||||
for key, bv := range b {
|
||||
av, ok := a[key]
|
||||
// value was added
|
||||
if !ok {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// If types have changed, replace completely
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// Types are the same, compare values
|
||||
switch at := av.(type) {
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
dst := make(map[string]interface{}, len(bt))
|
||||
dst, err := getDiff(at, bt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) > 0 {
|
||||
into[key] = dst
|
||||
}
|
||||
case string, float64, bool:
|
||||
if !matchesValue(av, bv) {
|
||||
into[key] = bv
|
||||
}
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
if !matchesArray(at, bt) {
|
||||
into[key] = bv
|
||||
}
|
||||
case nil:
|
||||
switch bv.(type) {
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
default:
|
||||
into[key] = bv
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
|
||||
}
|
||||
}
|
||||
// Now add all deleted values as nil
|
||||
for key := range a {
|
||||
_, found := b[key]
|
||||
if !found {
|
||||
into[key] = nil
|
||||
}
|
||||
}
|
||||
return into, nil
|
||||
}
|
||||
851
vendor/gopkg.in/evanphx/json-patch.v4/patch.go
generated
vendored
Normal file
851
vendor/gopkg.in/evanphx/json-patch.v4/patch.go
generated
vendored
Normal file
@@ -0,0 +1,851 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
eRaw = iota
|
||||
eDoc
|
||||
eAry
|
||||
)
|
||||
|
||||
var (
|
||||
// SupportNegativeIndices decides whether to support non-standard practice of
|
||||
// allowing negative indices to mean indices starting at the end of an array.
|
||||
// Default to true.
|
||||
SupportNegativeIndices bool = true
|
||||
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
|
||||
// "copy" operations in a patch.
|
||||
AccumulatedCopySizeLimit int64 = 0
|
||||
)
|
||||
|
||||
var (
|
||||
ErrTestFailed = errors.New("test failed")
|
||||
ErrMissing = errors.New("missing value")
|
||||
ErrUnknownType = errors.New("unknown object type")
|
||||
ErrInvalid = errors.New("invalid state detected")
|
||||
ErrInvalidIndex = errors.New("invalid index referenced")
|
||||
)
|
||||
|
||||
type lazyNode struct {
|
||||
raw *json.RawMessage
|
||||
doc partialDoc
|
||||
ary partialArray
|
||||
which int
|
||||
}
|
||||
|
||||
// Operation is a single JSON-Patch step, such as a single 'add' operation.
|
||||
type Operation map[string]*json.RawMessage
|
||||
|
||||
// Patch is an ordered collection of Operations.
|
||||
type Patch []Operation
|
||||
|
||||
type partialDoc map[string]*lazyNode
|
||||
type partialArray []*lazyNode
|
||||
|
||||
type container interface {
|
||||
get(key string) (*lazyNode, error)
|
||||
set(key string, val *lazyNode) error
|
||||
add(key string, val *lazyNode) error
|
||||
remove(key string) error
|
||||
}
|
||||
|
||||
func newLazyNode(raw *json.RawMessage) *lazyNode {
|
||||
return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
|
||||
}
|
||||
|
||||
func (n *lazyNode) MarshalJSON() ([]byte, error) {
|
||||
switch n.which {
|
||||
case eRaw:
|
||||
return json.Marshal(n.raw)
|
||||
case eDoc:
|
||||
return json.Marshal(n.doc)
|
||||
case eAry:
|
||||
return json.Marshal(n.ary)
|
||||
default:
|
||||
return nil, ErrUnknownType
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
||||
dest := make(json.RawMessage, len(data))
|
||||
copy(dest, data)
|
||||
n.raw = &dest
|
||||
n.which = eRaw
|
||||
return nil
|
||||
}
|
||||
|
||||
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
|
||||
if src == nil {
|
||||
return nil, 0, nil
|
||||
}
|
||||
a, err := src.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
sz := len(a)
|
||||
ra := make(json.RawMessage, sz)
|
||||
copy(ra, a)
|
||||
return newLazyNode(&ra), sz, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
if n.which == eDoc {
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoAry() (*partialArray, error) {
|
||||
if n.which == eAry {
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, ErrInvalid
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) compact() []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := json.Compact(buf, *n.raw)
|
||||
|
||||
if err != nil {
|
||||
return *n.raw
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryDoc() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryAry() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
if n.which == eRaw {
|
||||
if !n.tryDoc() && !n.tryAry() {
|
||||
if o.which != eRaw {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(n.compact(), o.compact())
|
||||
}
|
||||
}
|
||||
|
||||
if n.which == eDoc {
|
||||
if o.which == eRaw {
|
||||
if !o.tryDoc() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if o.which != eDoc {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.doc) != len(o.doc) {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range n.doc {
|
||||
ov, ok := o.doc[k]
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if (v == nil) != (ov == nil) {
|
||||
return false
|
||||
}
|
||||
|
||||
if v == nil && ov == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !v.equal(ov) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if o.which != eAry && !o.tryAry() {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.ary) != len(o.ary) {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx, val := range n.ary {
|
||||
if !val.equal(o.ary[idx]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Kind reads the "op" field of the Operation.
|
||||
func (o Operation) Kind() string {
|
||||
if obj, ok := o["op"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// Path reads the "path" field of the Operation.
|
||||
func (o Operation) Path() (string, error) {
|
||||
if obj, ok := o["path"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown", err
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
return "unknown", errors.Wrapf(ErrMissing, "operation missing path field")
|
||||
}
|
||||
|
||||
// From reads the "from" field of the Operation.
|
||||
func (o Operation) From() (string, error) {
|
||||
if obj, ok := o["from"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown", err
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field")
|
||||
}
|
||||
|
||||
func (o Operation) value() *lazyNode {
|
||||
if obj, ok := o["value"]; ok {
|
||||
return newLazyNode(obj)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValueInterface decodes the operation value into an interface.
|
||||
func (o Operation) ValueInterface() (interface{}, error) {
|
||||
if obj, ok := o["value"]; ok && obj != nil {
|
||||
var v interface{}
|
||||
|
||||
err := json.Unmarshal(*obj, &v)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(ErrMissing, "operation, missing value field")
|
||||
}
|
||||
|
||||
func isArray(buf []byte) bool {
|
||||
Loop:
|
||||
for _, c := range buf {
|
||||
switch c {
|
||||
case ' ':
|
||||
case '\n':
|
||||
case '\t':
|
||||
continue
|
||||
case '[':
|
||||
return true
|
||||
default:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func findObject(pd *container, path string) (container, string) {
|
||||
doc := *pd
|
||||
|
||||
split := strings.Split(path, "/")
|
||||
|
||||
if len(split) < 2 {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
parts := split[1 : len(split)-1]
|
||||
|
||||
key := split[len(split)-1]
|
||||
|
||||
var err error
|
||||
|
||||
for _, part := range parts {
|
||||
|
||||
next, ok := doc.get(decodePatchKey(part))
|
||||
|
||||
if next == nil || ok != nil {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
if isArray(*next.raw) {
|
||||
doc, err = next.intoAry()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
} else {
|
||||
doc, err = next.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return doc, decodePatchKey(key)
|
||||
}
|
||||
|
||||
func (d *partialDoc) set(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) add(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) get(key string) (*lazyNode, error) {
|
||||
return (*d)[key], nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) remove(key string) error {
|
||||
_, ok := (*d)[key]
|
||||
if !ok {
|
||||
return errors.Wrapf(ErrMissing, "Unable to remove nonexistent key: %s", key)
|
||||
}
|
||||
|
||||
delete(*d, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// set should only be used to implement the "replace" operation, so "key" must
|
||||
// be an already existing index in "d".
|
||||
func (d *partialArray) set(key string, val *lazyNode) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(*d) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(*d)
|
||||
}
|
||||
|
||||
(*d)[idx] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) add(key string, val *lazyNode) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "value was not a proper array index: '%s'", key)
|
||||
}
|
||||
|
||||
sz := len(*d) + 1
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(ary) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(ary) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(ary)
|
||||
}
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
ary[idx] = val
|
||||
copy(ary[idx+1:], cur[idx:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) get(key string) (*lazyNode, error) {
|
||||
idx, err := strconv.Atoi(key)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(*d) {
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(*d)
|
||||
}
|
||||
|
||||
if idx >= len(*d) {
|
||||
return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
return (*d)[idx], nil
|
||||
}
|
||||
|
||||
func (d *partialArray) remove(key string) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(cur) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
if !SupportNegativeIndices {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
if idx < -len(cur) {
|
||||
return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx)
|
||||
}
|
||||
idx += len(cur)
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
copy(ary[idx:], cur[idx+1:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p Patch) add(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(ErrMissing, "add operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
err = con.add(key, op.value())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in add for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(ErrMissing, "remove operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in remove for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) replace(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "replace operation failed to decode path")
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
val := op.value()
|
||||
|
||||
if val.which == eRaw {
|
||||
if !val.tryDoc() {
|
||||
if !val.tryAry() {
|
||||
return errors.Wrapf(err, "replace operation value must be object or array")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch val.which {
|
||||
case eAry:
|
||||
*doc = &val.ary
|
||||
case eDoc:
|
||||
*doc = &val.doc
|
||||
case eRaw:
|
||||
return errors.Wrapf(err, "replace operation hit impossible case")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
_, ok := con.get(key)
|
||||
if ok != nil {
|
||||
return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path)
|
||||
}
|
||||
|
||||
err = con.set(key, op.value())
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in remove for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) move(doc *container, op Operation) error {
|
||||
from, err := op.From()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "move operation failed to decode from")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in move for path: '%s'", key)
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in move for path: '%s'", key)
|
||||
}
|
||||
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "move operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
err = con.add(key, val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in move for path: '%s'", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p Patch) test(doc *container, op Operation) error {
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "test operation failed to decode path")
|
||||
}
|
||||
|
||||
if path == "" {
|
||||
var self lazyNode
|
||||
|
||||
switch sv := (*doc).(type) {
|
||||
case *partialDoc:
|
||||
self.doc = *sv
|
||||
self.which = eDoc
|
||||
case *partialArray:
|
||||
self.ary = *sv
|
||||
self.which = eAry
|
||||
}
|
||||
|
||||
if self.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in test for path: '%s'", path)
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
if op.value().raw == nil {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
} else if op.value() == nil {
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
if val.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(ErrTestFailed, "testing value %s failed", path)
|
||||
}
|
||||
|
||||
func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64) error {
|
||||
from, err := op.From()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "copy operation failed to decode from")
|
||||
}
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error in copy for from: '%s'", from)
|
||||
}
|
||||
|
||||
path, err := op.Path()
|
||||
if err != nil {
|
||||
return errors.Wrapf(ErrMissing, "copy operation failed to decode path")
|
||||
}
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
valCopy, sz, err := deepCopy(val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error while performing deep copy")
|
||||
}
|
||||
|
||||
(*accumulatedCopySize) += int64(sz)
|
||||
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
|
||||
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
|
||||
}
|
||||
|
||||
err = con.add(key, valCopy)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error while adding value during copy")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Equal indicates if 2 JSON documents have the same structural equality.
|
||||
func Equal(a, b []byte) bool {
|
||||
ra := make(json.RawMessage, len(a))
|
||||
copy(ra, a)
|
||||
la := newLazyNode(&ra)
|
||||
|
||||
rb := make(json.RawMessage, len(b))
|
||||
copy(rb, b)
|
||||
lb := newLazyNode(&rb)
|
||||
|
||||
return la.equal(lb)
|
||||
}
|
||||
|
||||
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
|
||||
func DecodePatch(buf []byte) (Patch, error) {
|
||||
var p Patch
|
||||
|
||||
err := json.Unmarshal(buf, &p)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Apply mutates a JSON document according to the patch, and returns the new
|
||||
// document.
|
||||
func (p Patch) Apply(doc []byte) ([]byte, error) {
|
||||
return p.ApplyIndent(doc, "")
|
||||
}
|
||||
|
||||
// ApplyIndent mutates a JSON document according to the patch, and returns the new
|
||||
// document indented.
|
||||
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
|
||||
if len(doc) == 0 {
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
var pd container
|
||||
if doc[0] == '[' {
|
||||
pd = &partialArray{}
|
||||
} else {
|
||||
pd = &partialDoc{}
|
||||
}
|
||||
|
||||
err := json.Unmarshal(doc, pd)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = nil
|
||||
|
||||
var accumulatedCopySize int64
|
||||
|
||||
for _, op := range p {
|
||||
switch op.Kind() {
|
||||
case "add":
|
||||
err = p.add(&pd, op)
|
||||
case "remove":
|
||||
err = p.remove(&pd, op)
|
||||
case "replace":
|
||||
err = p.replace(&pd, op)
|
||||
case "move":
|
||||
err = p.move(&pd, op)
|
||||
case "test":
|
||||
err = p.test(&pd, op)
|
||||
case "copy":
|
||||
err = p.copy(&pd, op, &accumulatedCopySize)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected kind: %s", op.Kind())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if indent != "" {
|
||||
return json.MarshalIndent(pd, "", indent)
|
||||
}
|
||||
|
||||
return json.Marshal(pd)
|
||||
}
|
||||
|
||||
// From http://tools.ietf.org/html/rfc6901#section-4 :
|
||||
//
|
||||
// Evaluation of each reference token begins by decoding any escaped
|
||||
// character sequence. This is performed by first transforming any
|
||||
// occurrence of the sequence '~1' to '/', and then transforming any
|
||||
// occurrence of the sequence '~0' to '~'.
|
||||
|
||||
var (
|
||||
rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
|
||||
)
|
||||
|
||||
func decodePatchKey(k string) string {
|
||||
return rfc6901Decoder.Replace(k)
|
||||
}
|
||||
1
vendor/k8s.io/api/admissionregistration/v1/doc.go
generated
vendored
1
vendor/k8s.io/api/admissionregistration/v1/doc.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:prerelease-lifecycle-gen=true
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
|
||||
// Package v1 is the v1 version of the API.
|
||||
|
||||
4407
vendor/k8s.io/api/admissionregistration/v1/generated.pb.go
generated
vendored
4407
vendor/k8s.io/api/admissionregistration/v1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
583
vendor/k8s.io/api/admissionregistration/v1/generated.proto
generated
vendored
583
vendor/k8s.io/api/admissionregistration/v1/generated.proto
generated
vendored
@@ -28,6 +28,56 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "k8s.io/api/admissionregistration/v1";
|
||||
|
||||
// AuditAnnotation describes how to produce an audit annotation for an API request.
|
||||
message AuditAnnotation {
|
||||
// key specifies the audit annotation key. The audit annotation keys of
|
||||
// a ValidatingAdmissionPolicy must be unique. The key must be a qualified
|
||||
// name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.
|
||||
//
|
||||
// The key is combined with the resource name of the
|
||||
// ValidatingAdmissionPolicy to construct an audit annotation key:
|
||||
// "{ValidatingAdmissionPolicy name}/{key}".
|
||||
//
|
||||
// If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy
|
||||
// and the same audit annotation key, the annotation key will be identical.
|
||||
// In this case, the first annotation written with the key will be included
|
||||
// in the audit event and all subsequent annotations with the same key
|
||||
// will be discarded.
|
||||
//
|
||||
// Required.
|
||||
optional string key = 1;
|
||||
|
||||
// valueExpression represents the expression which is evaluated by CEL to
|
||||
// produce an audit annotation value. The expression must evaluate to either
|
||||
// a string or null value. If the expression evaluates to a string, the
|
||||
// audit annotation is included with the string value. If the expression
|
||||
// evaluates to null or empty string the audit annotation will be omitted.
|
||||
// The valueExpression may be no longer than 5kb in length.
|
||||
// If the result of the valueExpression is more than 10kb in length, it
|
||||
// will be truncated to 10kb.
|
||||
//
|
||||
// If multiple ValidatingAdmissionPolicyBinding resources match an
|
||||
// API request, then the valueExpression will be evaluated for
|
||||
// each binding. All unique values produced by the valueExpressions
|
||||
// will be joined together in a comma-separated list.
|
||||
//
|
||||
// Required.
|
||||
optional string valueExpression = 2;
|
||||
}
|
||||
|
||||
// ExpressionWarning is a warning information that targets a specific expression.
|
||||
message ExpressionWarning {
|
||||
// The path to the field that refers the expression.
|
||||
// For example, the reference to the expression of the first item of
|
||||
// validations is "spec.validations[0].expression"
|
||||
optional string fieldRef = 2;
|
||||
|
||||
// The content of type checking information in a human-readable form.
|
||||
// Each line of the warning contains the type that the expression is checked
|
||||
// against, followed by the type check error from the compiler.
|
||||
optional string warning = 3;
|
||||
}
|
||||
|
||||
// MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.
|
||||
message MatchCondition {
|
||||
// Name is an identifier for this match condition, used for strategic merging of MatchConditions,
|
||||
@@ -57,6 +107,101 @@ message MatchCondition {
|
||||
optional string expression = 2;
|
||||
}
|
||||
|
||||
// MatchResources decides whether to run the admission control policy on an object based
|
||||
// on whether it meets the match criteria.
|
||||
// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
|
||||
// +structType=atomic
|
||||
message MatchResources {
|
||||
// NamespaceSelector decides whether to run the admission control policy on an object based
|
||||
// on whether the namespace for that object matches the selector. If the
|
||||
// object itself is a namespace, the matching is performed on
|
||||
// object.metadata.labels. If the object is another cluster scoped resource,
|
||||
// it never skips the policy.
|
||||
//
|
||||
// For example, to run the webhook on any objects whose namespace is not
|
||||
// associated with "runlevel" of "0" or "1"; you will set the selector as
|
||||
// follows:
|
||||
// "namespaceSelector": {
|
||||
// "matchExpressions": [
|
||||
// {
|
||||
// "key": "runlevel",
|
||||
// "operator": "NotIn",
|
||||
// "values": [
|
||||
// "0",
|
||||
// "1"
|
||||
// ]
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// If instead you want to only run the policy on any objects whose
|
||||
// namespace is associated with the "environment" of "prod" or "staging";
|
||||
// you will set the selector as follows:
|
||||
// "namespaceSelector": {
|
||||
// "matchExpressions": [
|
||||
// {
|
||||
// "key": "environment",
|
||||
// "operator": "In",
|
||||
// "values": [
|
||||
// "prod",
|
||||
// "staging"
|
||||
// ]
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// See
|
||||
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
// for more examples of label selectors.
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
|
||||
|
||||
// ObjectSelector decides whether to run the validation based on if the
|
||||
// object has matching labels. objectSelector is evaluated against both
|
||||
// the oldObject and newObject that would be sent to the cel validation, and
|
||||
// is considered to match if either object matches the selector. A null
|
||||
// object (oldObject in the case of create, or newObject in the case of
|
||||
// delete) or an object that cannot have labels (like a
|
||||
// DeploymentRollback or a PodProxyOptions object) is not considered to
|
||||
// match.
|
||||
// Use the object selector only if the webhook is opt-in, because end
|
||||
// users may skip the admission webhook by setting the labels.
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
|
||||
|
||||
// ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
|
||||
// The policy cares about an operation if it matches _any_ Rule.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated NamedRuleWithOperations resourceRules = 3;
|
||||
|
||||
// ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
|
||||
// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated NamedRuleWithOperations excludeResourceRules = 4;
|
||||
|
||||
// matchPolicy defines how the "MatchResources" list is used to match incoming requests.
|
||||
// Allowed values are "Exact" or "Equivalent".
|
||||
//
|
||||
// - Exact: match a request only if it exactly matches a specified rule.
|
||||
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
|
||||
// but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
|
||||
// a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
|
||||
//
|
||||
// - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
|
||||
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
|
||||
// and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
|
||||
// a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
|
||||
//
|
||||
// Defaults to "Equivalent"
|
||||
// +optional
|
||||
optional string matchPolicy = 7;
|
||||
}
|
||||
|
||||
// MutatingWebhook describes an admission webhook and the resources and operations it applies to.
|
||||
message MutatingWebhook {
|
||||
// The name of the admission webhook.
|
||||
@@ -76,6 +221,7 @@ message MutatingWebhook {
|
||||
// from putting the cluster in a state which cannot be recovered from without completely
|
||||
// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
|
||||
// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
|
||||
// +listType=atomic
|
||||
repeated RuleWithOperations rules = 3;
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
@@ -144,7 +290,7 @@ message MutatingWebhook {
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
|
||||
// ObjectSelector decides whether to run the webhook based on if the
|
||||
// object has matching labels. objectSelector is evaluated against both
|
||||
@@ -158,7 +304,7 @@ message MutatingWebhook {
|
||||
// users may skip the admission webhook by setting the labels.
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
|
||||
|
||||
// SideEffects states whether this webhook has side effects.
|
||||
// Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown).
|
||||
@@ -183,6 +329,7 @@ message MutatingWebhook {
|
||||
// If a persisted webhook configuration specifies allowed versions and does not
|
||||
// include any versions known to the API Server, calls to the webhook will fail
|
||||
// and be subject to the failure policy.
|
||||
// +listType=atomic
|
||||
repeated string admissionReviewVersions = 8;
|
||||
|
||||
// reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation.
|
||||
@@ -215,13 +362,10 @@ message MutatingWebhook {
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
|
||||
//
|
||||
// This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +featureGate=AdmissionWebhookMatchConditions
|
||||
// +optional
|
||||
repeated MatchCondition matchConditions = 12;
|
||||
}
|
||||
@@ -230,12 +374,14 @@ message MutatingWebhook {
|
||||
message MutatingWebhookConfiguration {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Webhooks is a list of webhooks and the affected resources and operations.
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
repeated MutatingWebhook Webhooks = 2;
|
||||
}
|
||||
|
||||
@@ -244,12 +390,94 @@ message MutatingWebhookConfigurationList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of MutatingWebhookConfiguration.
|
||||
repeated MutatingWebhookConfiguration items = 2;
|
||||
}
|
||||
|
||||
// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
|
||||
// +structType=atomic
|
||||
message NamedRuleWithOperations {
|
||||
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated string resourceNames = 1;
|
||||
|
||||
// RuleWithOperations is a tuple of Operations and Resources.
|
||||
optional RuleWithOperations ruleWithOperations = 2;
|
||||
}
|
||||
|
||||
// ParamKind is a tuple of Group Kind and Version.
|
||||
// +structType=atomic
|
||||
message ParamKind {
|
||||
// APIVersion is the API group version the resources belong to.
|
||||
// In format of "group/version".
|
||||
// Required.
|
||||
optional string apiVersion = 1;
|
||||
|
||||
// Kind is the API kind the resources belong to.
|
||||
// Required.
|
||||
optional string kind = 2;
|
||||
}
|
||||
|
||||
// ParamRef describes how to locate the params to be used as input to
|
||||
// expressions of rules applied by a policy binding.
|
||||
// +structType=atomic
|
||||
message ParamRef {
|
||||
// name is the name of the resource being referenced.
|
||||
//
|
||||
// One of `name` or `selector` must be set, but `name` and `selector` are
|
||||
// mutually exclusive properties. If one is set, the other must be unset.
|
||||
//
|
||||
// A single parameter used for all admission requests can be configured
|
||||
// by setting the `name` field, leaving `selector` blank, and setting namespace
|
||||
// if `paramKind` is namespace-scoped.
|
||||
optional string name = 1;
|
||||
|
||||
// namespace is the namespace of the referenced resource. Allows limiting
|
||||
// the search for params to a specific namespace. Applies to both `name` and
|
||||
// `selector` fields.
|
||||
//
|
||||
// A per-namespace parameter may be used by specifying a namespace-scoped
|
||||
// `paramKind` in the policy and leaving this field empty.
|
||||
//
|
||||
// - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
|
||||
// field results in a configuration error.
|
||||
//
|
||||
// - If `paramKind` is namespace-scoped, the namespace of the object being
|
||||
// evaluated for admission will be used when this field is left unset. Take
|
||||
// care that if this is left empty the binding must not match any cluster-scoped
|
||||
// resources, which will result in an error.
|
||||
//
|
||||
// +optional
|
||||
optional string namespace = 2;
|
||||
|
||||
// selector can be used to match multiple param objects based on their labels.
|
||||
// Supply selector: {} to match all resources of the ParamKind.
|
||||
//
|
||||
// If multiple params are found, they are all evaluated with the policy expressions
|
||||
// and the results are ANDed together.
|
||||
//
|
||||
// One of `name` or `selector` must be set, but `name` and `selector` are
|
||||
// mutually exclusive properties. If one is set, the other must be unset.
|
||||
//
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
|
||||
|
||||
// `parameterNotFoundAction` controls the behavior of the binding when the resource
|
||||
// exists, and name or selector is valid, but there are no parameters
|
||||
// matched by the binding. If the value is set to `Allow`, then no
|
||||
// matched parameters will be treated as successful validation by the binding.
|
||||
// If set to `Deny`, then no matched parameters will be subject to the
|
||||
// `failurePolicy` of the policy.
|
||||
//
|
||||
// Allowed values are `Allow` or `Deny`
|
||||
//
|
||||
// Required
|
||||
optional string parameterNotFoundAction = 4;
|
||||
}
|
||||
|
||||
// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
|
||||
// to make sure that all the tuple expansions are valid.
|
||||
message Rule {
|
||||
@@ -333,6 +561,241 @@ message ServiceReference {
|
||||
optional int32 port = 4;
|
||||
}
|
||||
|
||||
// TypeChecking contains results of type checking the expressions in the
|
||||
// ValidatingAdmissionPolicy
|
||||
message TypeChecking {
|
||||
// The type checking warnings for each expression.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
repeated ExpressionWarning expressionWarnings = 1;
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
|
||||
message ValidatingAdmissionPolicy {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the ValidatingAdmissionPolicy.
|
||||
optional ValidatingAdmissionPolicySpec spec = 2;
|
||||
|
||||
// The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy
|
||||
// behaves in the expected way.
|
||||
// Populated by the system.
|
||||
// Read-only.
|
||||
// +optional
|
||||
optional ValidatingAdmissionPolicyStatus status = 3;
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
|
||||
// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
|
||||
//
|
||||
// For a given admission request, each binding will cause its policy to be
|
||||
// evaluated N times, where N is 1 for policies/bindings that don't use
|
||||
// params, otherwise N is the number of parameters selected by the binding.
|
||||
//
|
||||
// The CEL expressions of a policy must have a computed CEL cost below the maximum
|
||||
// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
|
||||
// Adding/removing policies, bindings, or params can not affect whether a
|
||||
// given (policy, binding, param) combination is within its own CEL budget.
|
||||
message ValidatingAdmissionPolicyBinding {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
|
||||
optional ValidatingAdmissionPolicyBindingSpec spec = 2;
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.
|
||||
message ValidatingAdmissionPolicyBindingList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of PolicyBinding.
|
||||
repeated ValidatingAdmissionPolicyBinding items = 2;
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
|
||||
message ValidatingAdmissionPolicyBindingSpec {
|
||||
// PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to.
|
||||
// If the referenced resource does not exist, this binding is considered invalid and will be ignored
|
||||
// Required.
|
||||
optional string policyName = 1;
|
||||
|
||||
// paramRef specifies the parameter resource used to configure the admission control policy.
|
||||
// It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
|
||||
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
|
||||
// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
|
||||
// +optional
|
||||
optional ParamRef paramRef = 2;
|
||||
|
||||
// MatchResources declares what resources match this binding and will be validated by it.
|
||||
// Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this.
|
||||
// If this is unset, all resources matched by the policy are validated by this binding
|
||||
// When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated.
|
||||
// Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.
|
||||
// +optional
|
||||
optional MatchResources matchResources = 3;
|
||||
|
||||
// validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced.
|
||||
// If a validation evaluates to false it is always enforced according to these actions.
|
||||
//
|
||||
// Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according
|
||||
// to these actions only if the FailurePolicy is set to Fail, otherwise the failures are
|
||||
// ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.
|
||||
//
|
||||
// validationActions is declared as a set of action values. Order does
|
||||
// not matter. validationActions may not contain duplicates of the same action.
|
||||
//
|
||||
// The supported actions values are:
|
||||
//
|
||||
// "Deny" specifies that a validation failure results in a denied request.
|
||||
//
|
||||
// "Warn" specifies that a validation failure is reported to the request client
|
||||
// in HTTP Warning headers, with a warning code of 299. Warnings can be sent
|
||||
// both for allowed or denied admission responses.
|
||||
//
|
||||
// "Audit" specifies that a validation failure is included in the published
|
||||
// audit event for the request. The audit event will contain a
|
||||
// `validation.policy.admission.k8s.io/validation_failure` audit annotation
|
||||
// with a value containing the details of the validation failures, formatted as
|
||||
// a JSON list of objects, each with the following fields:
|
||||
// - message: The validation failure message string
|
||||
// - policy: The resource name of the ValidatingAdmissionPolicy
|
||||
// - binding: The resource name of the ValidatingAdmissionPolicyBinding
|
||||
// - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy
|
||||
// - validationActions: The enforcement actions enacted for the validation failure
|
||||
// Example audit annotation:
|
||||
// `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"`
|
||||
//
|
||||
// Clients should expect to handle additional values by ignoring
|
||||
// any values not recognized.
|
||||
//
|
||||
// "Deny" and "Warn" may not be used together since this combination
|
||||
// needlessly duplicates the validation failure both in the
|
||||
// API response body and the HTTP warning headers.
|
||||
//
|
||||
// Required.
|
||||
// +listType=set
|
||||
repeated string validationActions = 4;
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.
|
||||
message ValidatingAdmissionPolicyList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of ValidatingAdmissionPolicy.
|
||||
repeated ValidatingAdmissionPolicy items = 2;
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
|
||||
message ValidatingAdmissionPolicySpec {
|
||||
// ParamKind specifies the kind of resources used to parameterize this policy.
|
||||
// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
|
||||
// If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
|
||||
// If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.
|
||||
// +optional
|
||||
optional ParamKind paramKind = 1;
|
||||
|
||||
// MatchConstraints specifies what resources this policy is designed to validate.
|
||||
// The AdmissionPolicy cares about a request if it matches _all_ Constraints.
|
||||
// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
|
||||
// ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding.
|
||||
// Required.
|
||||
optional MatchResources matchConstraints = 2;
|
||||
|
||||
// Validations contain CEL expressions which is used to apply the validation.
|
||||
// Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is
|
||||
// required.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated Validation validations = 3;
|
||||
|
||||
// failurePolicy defines how to handle failures for the admission policy. Failures can
|
||||
// occur from CEL expression parse errors, type check errors, runtime errors and invalid
|
||||
// or mis-configured policy definitions or bindings.
|
||||
//
|
||||
// A policy is invalid if spec.paramKind refers to a non-existent Kind.
|
||||
// A binding is invalid if spec.paramRef.name refers to a non-existent resource.
|
||||
//
|
||||
// failurePolicy does not define how validations that evaluate to false are handled.
|
||||
//
|
||||
// When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions
|
||||
// define how failures are enforced.
|
||||
//
|
||||
// Allowed values are Ignore or Fail. Defaults to Fail.
|
||||
// +optional
|
||||
optional string failurePolicy = 4;
|
||||
|
||||
// auditAnnotations contains CEL expressions which are used to produce audit
|
||||
// annotations for the audit event of the API request.
|
||||
// validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is
|
||||
// required.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated AuditAnnotation auditAnnotations = 5;
|
||||
|
||||
// MatchConditions is a list of conditions that must be met for a request to be validated.
|
||||
// Match conditions filter requests that have already been matched by the rules,
|
||||
// namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
|
||||
// There are a maximum of 64 match conditions allowed.
|
||||
//
|
||||
// If a parameter object is provided, it can be accessed via the `params` handle in the same
|
||||
// manner as validation expressions.
|
||||
//
|
||||
// The exact matching logic is (in order):
|
||||
// 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
|
||||
// 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
|
||||
// 3. If any matchCondition evaluates to an error (but none are FALSE):
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the policy is skipped
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
repeated MatchCondition matchConditions = 6;
|
||||
|
||||
// Variables contain definitions of variables that can be used in composition of other expressions.
|
||||
// Each variable is defined as a named CEL expression.
|
||||
// The variables defined here will be available under `variables` in other expressions of the policy
|
||||
// except MatchConditions because MatchConditions are evaluated before the rest of the policy.
|
||||
//
|
||||
// The expression of a variable can refer to other variables defined earlier in the list but not those after.
|
||||
// Thus, Variables must be sorted by the order of first appearance and acyclic.
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
repeated Variable variables = 7;
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.
|
||||
message ValidatingAdmissionPolicyStatus {
|
||||
// The generation observed by the controller.
|
||||
// +optional
|
||||
optional int64 observedGeneration = 1;
|
||||
|
||||
// The results of type checking for each expression.
|
||||
// Presence of this field indicates the completion of the type checking.
|
||||
// +optional
|
||||
optional TypeChecking typeChecking = 2;
|
||||
|
||||
// The conditions represent the latest available observations of a policy's current state.
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
|
||||
}
|
||||
|
||||
// ValidatingWebhook describes an admission webhook and the resources and operations it applies to.
|
||||
message ValidatingWebhook {
|
||||
// The name of the admission webhook.
|
||||
@@ -352,6 +815,7 @@ message ValidatingWebhook {
|
||||
// from putting the cluster in a state which cannot be recovered from without completely
|
||||
// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
|
||||
// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
|
||||
// +listType=atomic
|
||||
repeated RuleWithOperations rules = 3;
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
@@ -420,7 +884,7 @@ message ValidatingWebhook {
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
|
||||
// ObjectSelector decides whether to run the webhook based on if the
|
||||
// object has matching labels. objectSelector is evaluated against both
|
||||
@@ -434,7 +898,7 @@ message ValidatingWebhook {
|
||||
// users may skip the admission webhook by setting the labels.
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
|
||||
|
||||
// SideEffects states whether this webhook has side effects.
|
||||
// Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown).
|
||||
@@ -459,6 +923,7 @@ message ValidatingWebhook {
|
||||
// If a persisted webhook configuration specifies allowed versions and does not
|
||||
// include any versions known to the API Server, calls to the webhook will fail
|
||||
// and be subject to the failure policy.
|
||||
// +listType=atomic
|
||||
repeated string admissionReviewVersions = 8;
|
||||
|
||||
// MatchConditions is a list of conditions that must be met for a request to be sent to this
|
||||
@@ -473,13 +938,10 @@ message ValidatingWebhook {
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
|
||||
//
|
||||
// This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +featureGate=AdmissionWebhookMatchConditions
|
||||
// +optional
|
||||
repeated MatchCondition matchConditions = 11;
|
||||
}
|
||||
@@ -488,12 +950,14 @@ message ValidatingWebhook {
|
||||
message ValidatingWebhookConfiguration {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Webhooks is a list of webhooks and the affected resources and operations.
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
repeated ValidatingWebhook Webhooks = 2;
|
||||
}
|
||||
|
||||
@@ -502,12 +966,103 @@ message ValidatingWebhookConfigurationList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of ValidatingWebhookConfiguration.
|
||||
repeated ValidatingWebhookConfiguration items = 2;
|
||||
}
|
||||
|
||||
// Validation specifies the CEL expression which is used to apply the validation.
|
||||
message Validation {
|
||||
// Expression represents the expression which will be evaluated by CEL.
|
||||
// ref: https://github.com/google/cel-spec
|
||||
// CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:
|
||||
//
|
||||
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
|
||||
// - 'oldObject' - The existing object. The value is null for CREATE requests.
|
||||
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
|
||||
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
|
||||
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
|
||||
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
|
||||
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
|
||||
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
|
||||
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
|
||||
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
|
||||
// request resource.
|
||||
//
|
||||
// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
|
||||
// object. No other metadata properties are accessible.
|
||||
//
|
||||
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
|
||||
// Accessible property names are escaped according to the following rules when accessed in the expression:
|
||||
// - '__' escapes to '__underscores__'
|
||||
// - '.' escapes to '__dot__'
|
||||
// - '-' escapes to '__dash__'
|
||||
// - '/' escapes to '__slash__'
|
||||
// - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
|
||||
// "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
|
||||
// "import", "let", "loop", "package", "namespace", "return".
|
||||
// Examples:
|
||||
// - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"}
|
||||
// - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"}
|
||||
// - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"}
|
||||
//
|
||||
// Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
|
||||
// Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
|
||||
// - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
|
||||
// non-intersecting elements in `Y` are appended, retaining their partial order.
|
||||
// - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
|
||||
// are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
|
||||
// non-intersecting keys are appended, retaining their partial order.
|
||||
// Required.
|
||||
optional string Expression = 1;
|
||||
|
||||
// Message represents the message displayed when validation fails. The message is required if the Expression contains
|
||||
// line breaks. The message must not contain line breaks.
|
||||
// If unset, the message is "failed rule: {Rule}".
|
||||
// e.g. "must be a URL with the host matching spec.host"
|
||||
// If the Expression contains line breaks. Message is required.
|
||||
// The message must not contain line breaks.
|
||||
// If unset, the message is "failed Expression: {Expression}".
|
||||
// +optional
|
||||
optional string message = 2;
|
||||
|
||||
// Reason represents a machine-readable description of why this validation failed.
|
||||
// If this is the first validation in the list to fail, this reason, as well as the
|
||||
// corresponding HTTP response code, are used in the
|
||||
// HTTP response to the client.
|
||||
// The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge".
|
||||
// If not set, StatusReasonInvalid is used in the response to the client.
|
||||
// +optional
|
||||
optional string reason = 3;
|
||||
|
||||
// messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.
|
||||
// Since messageExpression is used as a failure message, it must evaluate to a string.
|
||||
// If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails.
|
||||
// If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced
|
||||
// as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string
|
||||
// that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and
|
||||
// the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.
|
||||
// messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'.
|
||||
// Example:
|
||||
// "object.x must be less than max ("+string(params.max)+")"
|
||||
// +optional
|
||||
optional string messageExpression = 4;
|
||||
}
|
||||
|
||||
// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.
|
||||
// +structType=atomic
|
||||
message Variable {
|
||||
// Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
|
||||
// The variable can be accessed in other expressions through `variables`
|
||||
// For example, if name is "foo", the variable will be available as `variables.foo`
|
||||
optional string Name = 1;
|
||||
|
||||
// Expression is the expression that will be evaluated as the value of the variable.
|
||||
// The CEL expression has access to the same identifiers as the CEL expressions in Validation.
|
||||
optional string Expression = 2;
|
||||
}
|
||||
|
||||
// WebhookClientConfig contains the information to make a TLS
|
||||
// connection with the webhook
|
||||
message WebhookClientConfig {
|
||||
|
||||
4
vendor/k8s.io/api/admissionregistration/v1/register.go
generated
vendored
4
vendor/k8s.io/api/admissionregistration/v1/register.go
generated
vendored
@@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&ValidatingWebhookConfigurationList{},
|
||||
&MutatingWebhookConfiguration{},
|
||||
&MutatingWebhookConfigurationList{},
|
||||
&ValidatingAdmissionPolicy{},
|
||||
&ValidatingAdmissionPolicyList{},
|
||||
&ValidatingAdmissionPolicyBinding{},
|
||||
&ValidatingAdmissionPolicyBindingList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
|
||||
610
vendor/k8s.io/api/admissionregistration/v1/types.go
generated
vendored
610
vendor/k8s.io/api/admissionregistration/v1/types.go
generated
vendored
@@ -91,6 +91,18 @@ const (
|
||||
Fail FailurePolicyType = "Fail"
|
||||
)
|
||||
|
||||
// ParameterNotFoundActionType specifies a failure policy that defines how a binding
|
||||
// is evaluated when the param referred by its perNamespaceParamRef is not found.
|
||||
type ParameterNotFoundActionType string
|
||||
|
||||
const (
|
||||
// Allow means all requests will be admitted if no param resources
|
||||
// could be found.
|
||||
AllowAction ParameterNotFoundActionType = "Allow"
|
||||
// Deny means all requests will be denied if no param resources are found.
|
||||
DenyAction ParameterNotFoundActionType = "Deny"
|
||||
)
|
||||
|
||||
// MatchPolicyType specifies the type of match policy.
|
||||
// +enum
|
||||
type MatchPolicyType string
|
||||
@@ -120,9 +132,590 @@ const (
|
||||
SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.30
|
||||
|
||||
// ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.
|
||||
type ValidatingAdmissionPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// Specification of the desired behavior of the ValidatingAdmissionPolicy.
|
||||
Spec ValidatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
// The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy
|
||||
// behaves in the expected way.
|
||||
// Populated by the system.
|
||||
// Read-only.
|
||||
// +optional
|
||||
Status ValidatingAdmissionPolicyStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.
|
||||
type ValidatingAdmissionPolicyStatus struct {
|
||||
// The generation observed by the controller.
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"`
|
||||
// The results of type checking for each expression.
|
||||
// Presence of this field indicates the completion of the type checking.
|
||||
// +optional
|
||||
TypeChecking *TypeChecking `json:"typeChecking,omitempty" protobuf:"bytes,2,opt,name=typeChecking"`
|
||||
// The conditions represent the latest available observations of a policy's current state.
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"`
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyConditionType is the condition type of admission validation policy.
|
||||
type ValidatingAdmissionPolicyConditionType string
|
||||
|
||||
// TypeChecking contains results of type checking the expressions in the
|
||||
// ValidatingAdmissionPolicy
|
||||
type TypeChecking struct {
|
||||
// The type checking warnings for each expression.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
ExpressionWarnings []ExpressionWarning `json:"expressionWarnings,omitempty" protobuf:"bytes,1,rep,name=expressionWarnings"`
|
||||
}
|
||||
|
||||
// ExpressionWarning is a warning information that targets a specific expression.
|
||||
type ExpressionWarning struct {
|
||||
// The path to the field that refers the expression.
|
||||
// For example, the reference to the expression of the first item of
|
||||
// validations is "spec.validations[0].expression"
|
||||
FieldRef string `json:"fieldRef" protobuf:"bytes,2,opt,name=fieldRef"`
|
||||
// The content of type checking information in a human-readable form.
|
||||
// Each line of the warning contains the type that the expression is checked
|
||||
// against, followed by the type check error from the compiler.
|
||||
Warning string `json:"warning" protobuf:"bytes,3,opt,name=warning"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.30
|
||||
|
||||
// ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.
|
||||
type ValidatingAdmissionPolicyList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of ValidatingAdmissionPolicy.
|
||||
Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
|
||||
type ValidatingAdmissionPolicySpec struct {
|
||||
// ParamKind specifies the kind of resources used to parameterize this policy.
|
||||
// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
|
||||
// If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
|
||||
// If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.
|
||||
// +optional
|
||||
ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"`
|
||||
|
||||
// MatchConstraints specifies what resources this policy is designed to validate.
|
||||
// The AdmissionPolicy cares about a request if it matches _all_ Constraints.
|
||||
// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
|
||||
// ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding.
|
||||
// Required.
|
||||
MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"`
|
||||
|
||||
// Validations contain CEL expressions which is used to apply the validation.
|
||||
// Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is
|
||||
// required.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
Validations []Validation `json:"validations,omitempty" protobuf:"bytes,3,rep,name=validations"`
|
||||
|
||||
// failurePolicy defines how to handle failures for the admission policy. Failures can
|
||||
// occur from CEL expression parse errors, type check errors, runtime errors and invalid
|
||||
// or mis-configured policy definitions or bindings.
|
||||
//
|
||||
// A policy is invalid if spec.paramKind refers to a non-existent Kind.
|
||||
// A binding is invalid if spec.paramRef.name refers to a non-existent resource.
|
||||
//
|
||||
// failurePolicy does not define how validations that evaluate to false are handled.
|
||||
//
|
||||
// When failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions
|
||||
// define how failures are enforced.
|
||||
//
|
||||
// Allowed values are Ignore or Fail. Defaults to Fail.
|
||||
// +optional
|
||||
FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"`
|
||||
|
||||
// auditAnnotations contains CEL expressions which are used to produce audit
|
||||
// annotations for the audit event of the API request.
|
||||
// validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is
|
||||
// required.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
AuditAnnotations []AuditAnnotation `json:"auditAnnotations,omitempty" protobuf:"bytes,5,rep,name=auditAnnotations"`
|
||||
|
||||
// MatchConditions is a list of conditions that must be met for a request to be validated.
|
||||
// Match conditions filter requests that have already been matched by the rules,
|
||||
// namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests.
|
||||
// There are a maximum of 64 match conditions allowed.
|
||||
//
|
||||
// If a parameter object is provided, it can be accessed via the `params` handle in the same
|
||||
// manner as validation expressions.
|
||||
//
|
||||
// The exact matching logic is (in order):
|
||||
// 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
|
||||
// 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
|
||||
// 3. If any matchCondition evaluates to an error (but none are FALSE):
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the policy is skipped
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"`
|
||||
|
||||
// Variables contain definitions of variables that can be used in composition of other expressions.
|
||||
// Each variable is defined as a named CEL expression.
|
||||
// The variables defined here will be available under `variables` in other expressions of the policy
|
||||
// except MatchConditions because MatchConditions are evaluated before the rest of the policy.
|
||||
//
|
||||
// The expression of a variable can refer to other variables defined earlier in the list but not those after.
|
||||
// Thus, Variables must be sorted by the order of first appearance and acyclic.
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
Variables []Variable `json:"variables,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=variables"`
|
||||
}
|
||||
|
||||
// ParamKind is a tuple of Group Kind and Version.
|
||||
// +structType=atomic
|
||||
type ParamKind struct {
|
||||
// APIVersion is the API group version the resources belong to.
|
||||
// In format of "group/version".
|
||||
// Required.
|
||||
APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,rep,name=apiVersion"`
|
||||
|
||||
// Kind is the API kind the resources belong to.
|
||||
// Required.
|
||||
Kind string `json:"kind,omitempty" protobuf:"bytes,2,rep,name=kind"`
|
||||
}
|
||||
|
||||
// Validation specifies the CEL expression which is used to apply the validation.
|
||||
type Validation struct {
|
||||
// Expression represents the expression which will be evaluated by CEL.
|
||||
// ref: https://github.com/google/cel-spec
|
||||
// CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:
|
||||
//
|
||||
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
|
||||
// - 'oldObject' - The existing object. The value is null for CREATE requests.
|
||||
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
|
||||
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
|
||||
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
|
||||
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
|
||||
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
|
||||
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
|
||||
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
|
||||
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
|
||||
// request resource.
|
||||
//
|
||||
// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
|
||||
// object. No other metadata properties are accessible.
|
||||
//
|
||||
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
|
||||
// Accessible property names are escaped according to the following rules when accessed in the expression:
|
||||
// - '__' escapes to '__underscores__'
|
||||
// - '.' escapes to '__dot__'
|
||||
// - '-' escapes to '__dash__'
|
||||
// - '/' escapes to '__slash__'
|
||||
// - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:
|
||||
// "true", "false", "null", "in", "as", "break", "const", "continue", "else", "for", "function", "if",
|
||||
// "import", "let", "loop", "package", "namespace", "return".
|
||||
// Examples:
|
||||
// - Expression accessing a property named "namespace": {"Expression": "object.__namespace__ > 0"}
|
||||
// - Expression accessing a property named "x-prop": {"Expression": "object.x__dash__prop > 0"}
|
||||
// - Expression accessing a property named "redact__d": {"Expression": "object.redact__underscores__d > 0"}
|
||||
//
|
||||
// Equality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1].
|
||||
// Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:
|
||||
// - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and
|
||||
// non-intersecting elements in `Y` are appended, retaining their partial order.
|
||||
// - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values
|
||||
// are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with
|
||||
// non-intersecting keys are appended, retaining their partial order.
|
||||
// Required.
|
||||
Expression string `json:"expression" protobuf:"bytes,1,opt,name=Expression"`
|
||||
// Message represents the message displayed when validation fails. The message is required if the Expression contains
|
||||
// line breaks. The message must not contain line breaks.
|
||||
// If unset, the message is "failed rule: {Rule}".
|
||||
// e.g. "must be a URL with the host matching spec.host"
|
||||
// If the Expression contains line breaks. Message is required.
|
||||
// The message must not contain line breaks.
|
||||
// If unset, the message is "failed Expression: {Expression}".
|
||||
// +optional
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"`
|
||||
// Reason represents a machine-readable description of why this validation failed.
|
||||
// If this is the first validation in the list to fail, this reason, as well as the
|
||||
// corresponding HTTP response code, are used in the
|
||||
// HTTP response to the client.
|
||||
// The currently supported reasons are: "Unauthorized", "Forbidden", "Invalid", "RequestEntityTooLarge".
|
||||
// If not set, StatusReasonInvalid is used in the response to the client.
|
||||
// +optional
|
||||
Reason *metav1.StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"`
|
||||
// messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails.
|
||||
// Since messageExpression is used as a failure message, it must evaluate to a string.
|
||||
// If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails.
|
||||
// If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced
|
||||
// as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string
|
||||
// that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and
|
||||
// the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged.
|
||||
// messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'.
|
||||
// Example:
|
||||
// "object.x must be less than max ("+string(params.max)+")"
|
||||
// +optional
|
||||
MessageExpression string `json:"messageExpression,omitempty" protobuf:"bytes,4,opt,name=messageExpression"`
|
||||
}
|
||||
|
||||
// Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.
|
||||
// +structType=atomic
|
||||
type Variable struct {
|
||||
// Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables.
|
||||
// The variable can be accessed in other expressions through `variables`
|
||||
// For example, if name is "foo", the variable will be available as `variables.foo`
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=Name"`
|
||||
|
||||
// Expression is the expression that will be evaluated as the value of the variable.
|
||||
// The CEL expression has access to the same identifiers as the CEL expressions in Validation.
|
||||
Expression string `json:"expression" protobuf:"bytes,2,opt,name=Expression"`
|
||||
}
|
||||
|
||||
// AuditAnnotation describes how to produce an audit annotation for an API request.
|
||||
type AuditAnnotation struct {
|
||||
// key specifies the audit annotation key. The audit annotation keys of
|
||||
// a ValidatingAdmissionPolicy must be unique. The key must be a qualified
|
||||
// name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.
|
||||
//
|
||||
// The key is combined with the resource name of the
|
||||
// ValidatingAdmissionPolicy to construct an audit annotation key:
|
||||
// "{ValidatingAdmissionPolicy name}/{key}".
|
||||
//
|
||||
// If an admission webhook uses the same resource name as this ValidatingAdmissionPolicy
|
||||
// and the same audit annotation key, the annotation key will be identical.
|
||||
// In this case, the first annotation written with the key will be included
|
||||
// in the audit event and all subsequent annotations with the same key
|
||||
// will be discarded.
|
||||
//
|
||||
// Required.
|
||||
Key string `json:"key" protobuf:"bytes,1,opt,name=key"`
|
||||
|
||||
// valueExpression represents the expression which is evaluated by CEL to
|
||||
// produce an audit annotation value. The expression must evaluate to either
|
||||
// a string or null value. If the expression evaluates to a string, the
|
||||
// audit annotation is included with the string value. If the expression
|
||||
// evaluates to null or empty string the audit annotation will be omitted.
|
||||
// The valueExpression may be no longer than 5kb in length.
|
||||
// If the result of the valueExpression is more than 10kb in length, it
|
||||
// will be truncated to 10kb.
|
||||
//
|
||||
// If multiple ValidatingAdmissionPolicyBinding resources match an
|
||||
// API request, then the valueExpression will be evaluated for
|
||||
// each binding. All unique values produced by the valueExpressions
|
||||
// will be joined together in a comma-separated list.
|
||||
//
|
||||
// Required.
|
||||
ValueExpression string `json:"valueExpression" protobuf:"bytes,2,opt,name=valueExpression"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.30
|
||||
|
||||
// ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources.
|
||||
// ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.
|
||||
//
|
||||
// For a given admission request, each binding will cause its policy to be
|
||||
// evaluated N times, where N is 1 for policies/bindings that don't use
|
||||
// params, otherwise N is the number of parameters selected by the binding.
|
||||
//
|
||||
// The CEL expressions of a policy must have a computed CEL cost below the maximum
|
||||
// CEL budget. Each evaluation of the policy is given an independent CEL cost budget.
|
||||
// Adding/removing policies, bindings, or params can not affect whether a
|
||||
// given (policy, binding, param) combination is within its own CEL budget.
|
||||
type ValidatingAdmissionPolicyBinding struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
|
||||
Spec ValidatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.30
|
||||
|
||||
// ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.
|
||||
type ValidatingAdmissionPolicyBindingList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of PolicyBinding.
|
||||
Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
|
||||
type ValidatingAdmissionPolicyBindingSpec struct {
|
||||
// PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to.
|
||||
// If the referenced resource does not exist, this binding is considered invalid and will be ignored
|
||||
// Required.
|
||||
PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"`
|
||||
|
||||
// paramRef specifies the parameter resource used to configure the admission control policy.
|
||||
// It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy.
|
||||
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied.
|
||||
// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
|
||||
// +optional
|
||||
ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"`
|
||||
|
||||
// MatchResources declares what resources match this binding and will be validated by it.
|
||||
// Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this.
|
||||
// If this is unset, all resources matched by the policy are validated by this binding
|
||||
// When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated.
|
||||
// Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.
|
||||
// +optional
|
||||
MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"`
|
||||
|
||||
// validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced.
|
||||
// If a validation evaluates to false it is always enforced according to these actions.
|
||||
//
|
||||
// Failures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according
|
||||
// to these actions only if the FailurePolicy is set to Fail, otherwise the failures are
|
||||
// ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.
|
||||
//
|
||||
// validationActions is declared as a set of action values. Order does
|
||||
// not matter. validationActions may not contain duplicates of the same action.
|
||||
//
|
||||
// The supported actions values are:
|
||||
//
|
||||
// "Deny" specifies that a validation failure results in a denied request.
|
||||
//
|
||||
// "Warn" specifies that a validation failure is reported to the request client
|
||||
// in HTTP Warning headers, with a warning code of 299. Warnings can be sent
|
||||
// both for allowed or denied admission responses.
|
||||
//
|
||||
// "Audit" specifies that a validation failure is included in the published
|
||||
// audit event for the request. The audit event will contain a
|
||||
// `validation.policy.admission.k8s.io/validation_failure` audit annotation
|
||||
// with a value containing the details of the validation failures, formatted as
|
||||
// a JSON list of objects, each with the following fields:
|
||||
// - message: The validation failure message string
|
||||
// - policy: The resource name of the ValidatingAdmissionPolicy
|
||||
// - binding: The resource name of the ValidatingAdmissionPolicyBinding
|
||||
// - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy
|
||||
// - validationActions: The enforcement actions enacted for the validation failure
|
||||
// Example audit annotation:
|
||||
// `"validation.policy.admission.k8s.io/validation_failure": "[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]"`
|
||||
//
|
||||
// Clients should expect to handle additional values by ignoring
|
||||
// any values not recognized.
|
||||
//
|
||||
// "Deny" and "Warn" may not be used together since this combination
|
||||
// needlessly duplicates the validation failure both in the
|
||||
// API response body and the HTTP warning headers.
|
||||
//
|
||||
// Required.
|
||||
// +listType=set
|
||||
ValidationActions []ValidationAction `json:"validationActions,omitempty" protobuf:"bytes,4,rep,name=validationActions"`
|
||||
}
|
||||
|
||||
// ParamRef describes how to locate the params to be used as input to
|
||||
// expressions of rules applied by a policy binding.
|
||||
// +structType=atomic
|
||||
type ParamRef struct {
|
||||
// name is the name of the resource being referenced.
|
||||
//
|
||||
// One of `name` or `selector` must be set, but `name` and `selector` are
|
||||
// mutually exclusive properties. If one is set, the other must be unset.
|
||||
//
|
||||
// A single parameter used for all admission requests can be configured
|
||||
// by setting the `name` field, leaving `selector` blank, and setting namespace
|
||||
// if `paramKind` is namespace-scoped.
|
||||
//
|
||||
Name string `json:"name,omitempty" protobuf:"bytes,1,rep,name=name"`
|
||||
|
||||
// namespace is the namespace of the referenced resource. Allows limiting
|
||||
// the search for params to a specific namespace. Applies to both `name` and
|
||||
// `selector` fields.
|
||||
//
|
||||
// A per-namespace parameter may be used by specifying a namespace-scoped
|
||||
// `paramKind` in the policy and leaving this field empty.
|
||||
//
|
||||
// - If `paramKind` is cluster-scoped, this field MUST be unset. Setting this
|
||||
// field results in a configuration error.
|
||||
//
|
||||
// - If `paramKind` is namespace-scoped, the namespace of the object being
|
||||
// evaluated for admission will be used when this field is left unset. Take
|
||||
// care that if this is left empty the binding must not match any cluster-scoped
|
||||
// resources, which will result in an error.
|
||||
//
|
||||
// +optional
|
||||
Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,rep,name=namespace"`
|
||||
|
||||
// selector can be used to match multiple param objects based on their labels.
|
||||
// Supply selector: {} to match all resources of the ParamKind.
|
||||
//
|
||||
// If multiple params are found, they are all evaluated with the policy expressions
|
||||
// and the results are ANDed together.
|
||||
//
|
||||
// One of `name` or `selector` must be set, but `name` and `selector` are
|
||||
// mutually exclusive properties. If one is set, the other must be unset.
|
||||
//
|
||||
// +optional
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,rep,name=selector"`
|
||||
|
||||
// `parameterNotFoundAction` controls the behavior of the binding when the resource
|
||||
// exists, and name or selector is valid, but there are no parameters
|
||||
// matched by the binding. If the value is set to `Allow`, then no
|
||||
// matched parameters will be treated as successful validation by the binding.
|
||||
// If set to `Deny`, then no matched parameters will be subject to the
|
||||
// `failurePolicy` of the policy.
|
||||
//
|
||||
// Allowed values are `Allow` or `Deny`
|
||||
//
|
||||
// Required
|
||||
ParameterNotFoundAction *ParameterNotFoundActionType `json:"parameterNotFoundAction,omitempty" protobuf:"bytes,4,rep,name=parameterNotFoundAction"`
|
||||
}
|
||||
|
||||
// MatchResources decides whether to run the admission control policy on an object based
|
||||
// on whether it meets the match criteria.
|
||||
// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
|
||||
// +structType=atomic
|
||||
type MatchResources struct {
|
||||
// NamespaceSelector decides whether to run the admission control policy on an object based
|
||||
// on whether the namespace for that object matches the selector. If the
|
||||
// object itself is a namespace, the matching is performed on
|
||||
// object.metadata.labels. If the object is another cluster scoped resource,
|
||||
// it never skips the policy.
|
||||
//
|
||||
// For example, to run the webhook on any objects whose namespace is not
|
||||
// associated with "runlevel" of "0" or "1"; you will set the selector as
|
||||
// follows:
|
||||
// "namespaceSelector": {
|
||||
// "matchExpressions": [
|
||||
// {
|
||||
// "key": "runlevel",
|
||||
// "operator": "NotIn",
|
||||
// "values": [
|
||||
// "0",
|
||||
// "1"
|
||||
// ]
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// If instead you want to only run the policy on any objects whose
|
||||
// namespace is associated with the "environment" of "prod" or "staging";
|
||||
// you will set the selector as follows:
|
||||
// "namespaceSelector": {
|
||||
// "matchExpressions": [
|
||||
// {
|
||||
// "key": "environment",
|
||||
// "operator": "In",
|
||||
// "values": [
|
||||
// "prod",
|
||||
// "staging"
|
||||
// ]
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
// See
|
||||
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
// for more examples of label selectors.
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,1,opt,name=namespaceSelector"`
|
||||
// ObjectSelector decides whether to run the validation based on if the
|
||||
// object has matching labels. objectSelector is evaluated against both
|
||||
// the oldObject and newObject that would be sent to the cel validation, and
|
||||
// is considered to match if either object matches the selector. A null
|
||||
// object (oldObject in the case of create, or newObject in the case of
|
||||
// delete) or an object that cannot have labels (like a
|
||||
// DeploymentRollback or a PodProxyOptions object) is not considered to
|
||||
// match.
|
||||
// Use the object selector only if the webhook is opt-in, because end
|
||||
// users may skip the admission webhook by setting the labels.
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,2,opt,name=objectSelector"`
|
||||
// ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
|
||||
// The policy cares about an operation if it matches _any_ Rule.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
ResourceRules []NamedRuleWithOperations `json:"resourceRules,omitempty" protobuf:"bytes,3,rep,name=resourceRules"`
|
||||
// ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about.
|
||||
// The exclude rules take precedence over include rules (if a resource matches both, it is excluded)
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
ExcludeResourceRules []NamedRuleWithOperations `json:"excludeResourceRules,omitempty" protobuf:"bytes,4,rep,name=excludeResourceRules"`
|
||||
// matchPolicy defines how the "MatchResources" list is used to match incoming requests.
|
||||
// Allowed values are "Exact" or "Equivalent".
|
||||
//
|
||||
// - Exact: match a request only if it exactly matches a specified rule.
|
||||
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
|
||||
// but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
|
||||
// a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.
|
||||
//
|
||||
// - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version.
|
||||
// For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1,
|
||||
// and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`,
|
||||
// a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.
|
||||
//
|
||||
// Defaults to "Equivalent"
|
||||
// +optional
|
||||
MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,7,opt,name=matchPolicy,casttype=MatchPolicyType"`
|
||||
}
|
||||
|
||||
// ValidationAction specifies a policy enforcement action.
|
||||
// +enum
|
||||
type ValidationAction string
|
||||
|
||||
const (
|
||||
// Deny specifies that a validation failure results in a denied request.
|
||||
Deny ValidationAction = "Deny"
|
||||
// Warn specifies that a validation failure is reported to the request client
|
||||
// in HTTP Warning headers, with a warning code of 299. Warnings can be sent
|
||||
// both for allowed or denied admission responses.
|
||||
Warn ValidationAction = "Warn"
|
||||
// Audit specifies that a validation failure is included in the published
|
||||
// audit event for the request. The audit event will contain a
|
||||
// `validation.policy.admission.k8s.io/validation_failure` audit annotation
|
||||
// with a value containing the details of the validation failure.
|
||||
Audit ValidationAction = "Audit"
|
||||
)
|
||||
|
||||
// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
|
||||
// +structType=atomic
|
||||
type NamedRuleWithOperations struct {
|
||||
// ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,1,rep,name=resourceNames"`
|
||||
// RuleWithOperations is a tuple of Operations and Resources.
|
||||
RuleWithOperations `json:",inline" protobuf:"bytes,2,opt,name=ruleWithOperations"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.16
|
||||
|
||||
// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.
|
||||
type ValidatingWebhookConfiguration struct {
|
||||
@@ -134,10 +727,13 @@ type ValidatingWebhookConfiguration struct {
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.16
|
||||
|
||||
// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.
|
||||
type ValidatingWebhookConfigurationList struct {
|
||||
@@ -153,6 +749,7 @@ type ValidatingWebhookConfigurationList struct {
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.16
|
||||
|
||||
// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.
|
||||
type MutatingWebhookConfiguration struct {
|
||||
@@ -164,10 +761,13 @@ type MutatingWebhookConfiguration struct {
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.16
|
||||
|
||||
// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.
|
||||
type MutatingWebhookConfigurationList struct {
|
||||
@@ -199,6 +799,7 @@ type ValidatingWebhook struct {
|
||||
// from putting the cluster in a state which cannot be recovered from without completely
|
||||
// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
|
||||
// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
|
||||
// +listType=atomic
|
||||
Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
@@ -306,6 +907,7 @@ type ValidatingWebhook struct {
|
||||
// If a persisted webhook configuration specifies allowed versions and does not
|
||||
// include any versions known to the API Server, calls to the webhook will fail
|
||||
// and be subject to the failure policy.
|
||||
// +listType=atomic
|
||||
AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"`
|
||||
|
||||
// MatchConditions is a list of conditions that must be met for a request to be sent to this
|
||||
@@ -320,13 +922,10 @@ type ValidatingWebhook struct {
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
|
||||
//
|
||||
// This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +featureGate=AdmissionWebhookMatchConditions
|
||||
// +optional
|
||||
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,opt,name=matchConditions"`
|
||||
}
|
||||
@@ -350,6 +949,7 @@ type MutatingWebhook struct {
|
||||
// from putting the cluster in a state which cannot be recovered from without completely
|
||||
// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
|
||||
// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
|
||||
// +listType=atomic
|
||||
Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
@@ -457,6 +1057,7 @@ type MutatingWebhook struct {
|
||||
// If a persisted webhook configuration specifies allowed versions and does not
|
||||
// include any versions known to the API Server, calls to the webhook will fail
|
||||
// and be subject to the failure policy.
|
||||
// +listType=atomic
|
||||
AdmissionReviewVersions []string `json:"admissionReviewVersions" protobuf:"bytes,8,rep,name=admissionReviewVersions"`
|
||||
|
||||
// reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation.
|
||||
@@ -489,13 +1090,10 @@ type MutatingWebhook struct {
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
|
||||
//
|
||||
// This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +featureGate=AdmissionWebhookMatchConditions
|
||||
// +optional
|
||||
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,opt,name=matchConditions"`
|
||||
}
|
||||
|
||||
178
vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go
generated
vendored
178
vendor/k8s.io/api/admissionregistration/v1/types_swagger_doc_generated.go
generated
vendored
@@ -27,6 +27,26 @@ package v1
|
||||
// Those methods can be generated by using hack/update-codegen.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_AuditAnnotation = map[string]string{
|
||||
"": "AuditAnnotation describes how to produce an audit annotation for an API request.",
|
||||
"key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.",
|
||||
"valueExpression": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.",
|
||||
}
|
||||
|
||||
func (AuditAnnotation) SwaggerDoc() map[string]string {
|
||||
return map_AuditAnnotation
|
||||
}
|
||||
|
||||
var map_ExpressionWarning = map[string]string{
|
||||
"": "ExpressionWarning is a warning information that targets a specific expression.",
|
||||
"fieldRef": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"",
|
||||
"warning": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.",
|
||||
}
|
||||
|
||||
func (ExpressionWarning) SwaggerDoc() map[string]string {
|
||||
return map_ExpressionWarning
|
||||
}
|
||||
|
||||
var map_MatchCondition = map[string]string{
|
||||
"": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.",
|
||||
"name": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.",
|
||||
@@ -37,6 +57,19 @@ func (MatchCondition) SwaggerDoc() map[string]string {
|
||||
return map_MatchCondition
|
||||
}
|
||||
|
||||
var map_MatchResources = map[string]string{
|
||||
"": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
|
||||
"namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
|
||||
"objectSelector": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.",
|
||||
"resourceRules": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.",
|
||||
"excludeResourceRules": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
|
||||
"matchPolicy": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"",
|
||||
}
|
||||
|
||||
func (MatchResources) SwaggerDoc() map[string]string {
|
||||
return map_MatchResources
|
||||
}
|
||||
|
||||
var map_MutatingWebhook = map[string]string{
|
||||
"": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.",
|
||||
"name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
|
||||
@@ -50,7 +83,7 @@ var map_MutatingWebhook = map[string]string{
|
||||
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.",
|
||||
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.",
|
||||
"reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".",
|
||||
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.",
|
||||
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped",
|
||||
}
|
||||
|
||||
func (MutatingWebhook) SwaggerDoc() map[string]string {
|
||||
@@ -77,6 +110,37 @@ func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string {
|
||||
return map_MutatingWebhookConfigurationList
|
||||
}
|
||||
|
||||
var map_NamedRuleWithOperations = map[string]string{
|
||||
"": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.",
|
||||
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
|
||||
}
|
||||
|
||||
func (NamedRuleWithOperations) SwaggerDoc() map[string]string {
|
||||
return map_NamedRuleWithOperations
|
||||
}
|
||||
|
||||
var map_ParamKind = map[string]string{
|
||||
"": "ParamKind is a tuple of Group Kind and Version.",
|
||||
"apiVersion": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.",
|
||||
"kind": "Kind is the API kind the resources belong to. Required.",
|
||||
}
|
||||
|
||||
func (ParamKind) SwaggerDoc() map[string]string {
|
||||
return map_ParamKind
|
||||
}
|
||||
|
||||
var map_ParamRef = map[string]string{
|
||||
"": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.",
|
||||
"name": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.",
|
||||
"namespace": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.",
|
||||
"selector": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.",
|
||||
"parameterNotFoundAction": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired",
|
||||
}
|
||||
|
||||
func (ParamRef) SwaggerDoc() map[string]string {
|
||||
return map_ParamRef
|
||||
}
|
||||
|
||||
var map_Rule = map[string]string{
|
||||
"": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.",
|
||||
"apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.",
|
||||
@@ -110,6 +174,94 @@ func (ServiceReference) SwaggerDoc() map[string]string {
|
||||
return map_ServiceReference
|
||||
}
|
||||
|
||||
var map_TypeChecking = map[string]string{
|
||||
"": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy",
|
||||
"expressionWarnings": "The type checking warnings for each expression.",
|
||||
}
|
||||
|
||||
func (TypeChecking) SwaggerDoc() map[string]string {
|
||||
return map_TypeChecking
|
||||
}
|
||||
|
||||
var map_ValidatingAdmissionPolicy = map[string]string{
|
||||
"": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
|
||||
"spec": "Specification of the desired behavior of the ValidatingAdmissionPolicy.",
|
||||
"status": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.",
|
||||
}
|
||||
|
||||
func (ValidatingAdmissionPolicy) SwaggerDoc() map[string]string {
|
||||
return map_ValidatingAdmissionPolicy
|
||||
}
|
||||
|
||||
var map_ValidatingAdmissionPolicyBinding = map[string]string{
|
||||
"": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
|
||||
"spec": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.",
|
||||
}
|
||||
|
||||
func (ValidatingAdmissionPolicyBinding) SwaggerDoc() map[string]string {
|
||||
return map_ValidatingAdmissionPolicyBinding
|
||||
}
|
||||
|
||||
var map_ValidatingAdmissionPolicyBindingList = map[string]string{
|
||||
"": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
"items": "List of PolicyBinding.",
|
||||
}
|
||||
|
||||
func (ValidatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string {
|
||||
return map_ValidatingAdmissionPolicyBindingList
|
||||
}
|
||||
|
||||
var map_ValidatingAdmissionPolicyBindingSpec = map[string]string{
|
||||
"": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.",
|
||||
"policyName": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.",
|
||||
"paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.",
|
||||
"matchResources": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.",
|
||||
"validationActions": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.",
|
||||
}
|
||||
|
||||
func (ValidatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string {
|
||||
return map_ValidatingAdmissionPolicyBindingSpec
|
||||
}
|
||||
|
||||
var map_ValidatingAdmissionPolicyList = map[string]string{
|
||||
"": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
"items": "List of ValidatingAdmissionPolicy.",
|
||||
}
|
||||
|
||||
func (ValidatingAdmissionPolicyList) SwaggerDoc() map[string]string {
|
||||
return map_ValidatingAdmissionPolicyList
|
||||
}
|
||||
|
||||
var map_ValidatingAdmissionPolicySpec = map[string]string{
|
||||
"": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.",
|
||||
"paramKind": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.",
|
||||
"matchConstraints": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.",
|
||||
"validations": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.",
|
||||
"failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.",
|
||||
"auditAnnotations": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.",
|
||||
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped",
|
||||
"variables": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.",
|
||||
}
|
||||
|
||||
func (ValidatingAdmissionPolicySpec) SwaggerDoc() map[string]string {
|
||||
return map_ValidatingAdmissionPolicySpec
|
||||
}
|
||||
|
||||
var map_ValidatingAdmissionPolicyStatus = map[string]string{
|
||||
"": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.",
|
||||
"observedGeneration": "The generation observed by the controller.",
|
||||
"typeChecking": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.",
|
||||
"conditions": "The conditions represent the latest available observations of a policy's current state.",
|
||||
}
|
||||
|
||||
func (ValidatingAdmissionPolicyStatus) SwaggerDoc() map[string]string {
|
||||
return map_ValidatingAdmissionPolicyStatus
|
||||
}
|
||||
|
||||
var map_ValidatingWebhook = map[string]string{
|
||||
"": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.",
|
||||
"name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
|
||||
@@ -122,7 +274,7 @@ var map_ValidatingWebhook = map[string]string{
|
||||
"sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.",
|
||||
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.",
|
||||
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.",
|
||||
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.",
|
||||
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped",
|
||||
}
|
||||
|
||||
func (ValidatingWebhook) SwaggerDoc() map[string]string {
|
||||
@@ -149,6 +301,28 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string {
|
||||
return map_ValidatingWebhookConfigurationList
|
||||
}
|
||||
|
||||
var map_Validation = map[string]string{
|
||||
"": "Validation specifies the CEL expression which is used to apply the validation.",
|
||||
"expression": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ > 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop > 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d > 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.",
|
||||
"message": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".",
|
||||
"reason": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.",
|
||||
"messageExpression": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"",
|
||||
}
|
||||
|
||||
func (Validation) SwaggerDoc() map[string]string {
|
||||
return map_Validation
|
||||
}
|
||||
|
||||
var map_Variable = map[string]string{
|
||||
"": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.",
|
||||
"name": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`",
|
||||
"expression": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.",
|
||||
}
|
||||
|
||||
func (Variable) SwaggerDoc() map[string]string {
|
||||
return map_Variable
|
||||
}
|
||||
|
||||
var map_WebhookClientConfig = map[string]string{
|
||||
"": "WebhookClientConfig contains the information to make a TLS connection with the webhook",
|
||||
"url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
|
||||
|
||||
432
vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go
generated
vendored
432
vendor/k8s.io/api/admissionregistration/v1/zz_generated.deepcopy.go
generated
vendored
@@ -26,6 +26,38 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditAnnotation.
|
||||
func (in *AuditAnnotation) DeepCopy() *AuditAnnotation {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuditAnnotation)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExpressionWarning) DeepCopyInto(out *ExpressionWarning) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExpressionWarning.
|
||||
func (in *ExpressionWarning) DeepCopy() *ExpressionWarning {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ExpressionWarning)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
|
||||
*out = *in
|
||||
@@ -42,6 +74,51 @@ func (in *MatchCondition) DeepCopy() *MatchCondition {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MatchResources) DeepCopyInto(out *MatchResources) {
|
||||
*out = *in
|
||||
if in.NamespaceSelector != nil {
|
||||
in, out := &in.NamespaceSelector, &out.NamespaceSelector
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ObjectSelector != nil {
|
||||
in, out := &in.ObjectSelector, &out.ObjectSelector
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ResourceRules != nil {
|
||||
in, out := &in.ResourceRules, &out.ResourceRules
|
||||
*out = make([]NamedRuleWithOperations, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.ExcludeResourceRules != nil {
|
||||
in, out := &in.ExcludeResourceRules, &out.ExcludeResourceRules
|
||||
*out = make([]NamedRuleWithOperations, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.MatchPolicy != nil {
|
||||
in, out := &in.MatchPolicy, &out.MatchPolicy
|
||||
*out = new(MatchPolicyType)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources.
|
||||
func (in *MatchResources) DeepCopy() *MatchResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MatchResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) {
|
||||
*out = *in
|
||||
@@ -177,6 +254,70 @@ func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) {
|
||||
*out = *in
|
||||
if in.ResourceNames != nil {
|
||||
in, out := &in.ResourceNames, &out.ResourceNames
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
in.RuleWithOperations.DeepCopyInto(&out.RuleWithOperations)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRuleWithOperations.
|
||||
func (in *NamedRuleWithOperations) DeepCopy() *NamedRuleWithOperations {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NamedRuleWithOperations)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ParamKind) DeepCopyInto(out *ParamKind) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamKind.
|
||||
func (in *ParamKind) DeepCopy() *ParamKind {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ParamKind)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ParamRef) DeepCopyInto(out *ParamRef) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ParameterNotFoundAction != nil {
|
||||
in, out := &in.ParameterNotFoundAction, &out.ParameterNotFoundAction
|
||||
*out = new(ParameterNotFoundActionType)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParamRef.
|
||||
func (in *ParamRef) DeepCopy() *ParamRef {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ParamRef)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Rule) DeepCopyInto(out *Rule) {
|
||||
*out = *in
|
||||
@@ -261,6 +402,260 @@ func (in *ServiceReference) DeepCopy() *ServiceReference {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TypeChecking) DeepCopyInto(out *TypeChecking) {
|
||||
*out = *in
|
||||
if in.ExpressionWarnings != nil {
|
||||
in, out := &in.ExpressionWarnings, &out.ExpressionWarnings
|
||||
*out = make([]ExpressionWarning, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypeChecking.
|
||||
func (in *TypeChecking) DeepCopy() *TypeChecking {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TypeChecking)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ValidatingAdmissionPolicy) DeepCopyInto(out *ValidatingAdmissionPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicy.
|
||||
func (in *ValidatingAdmissionPolicy) DeepCopy() *ValidatingAdmissionPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ValidatingAdmissionPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ValidatingAdmissionPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ValidatingAdmissionPolicyBinding) DeepCopyInto(out *ValidatingAdmissionPolicyBinding) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBinding.
|
||||
func (in *ValidatingAdmissionPolicyBinding) DeepCopy() *ValidatingAdmissionPolicyBinding {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ValidatingAdmissionPolicyBinding)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ValidatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ValidatingAdmissionPolicyBindingList) DeepCopyInto(out *ValidatingAdmissionPolicyBindingList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ValidatingAdmissionPolicyBinding, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingList.
|
||||
func (in *ValidatingAdmissionPolicyBindingList) DeepCopy() *ValidatingAdmissionPolicyBindingList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ValidatingAdmissionPolicyBindingList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ValidatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopyInto(out *ValidatingAdmissionPolicyBindingSpec) {
|
||||
*out = *in
|
||||
if in.ParamRef != nil {
|
||||
in, out := &in.ParamRef, &out.ParamRef
|
||||
*out = new(ParamRef)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.MatchResources != nil {
|
||||
in, out := &in.MatchResources, &out.MatchResources
|
||||
*out = new(MatchResources)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ValidationActions != nil {
|
||||
in, out := &in.ValidationActions, &out.ValidationActions
|
||||
*out = make([]ValidationAction, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyBindingSpec.
|
||||
func (in *ValidatingAdmissionPolicyBindingSpec) DeepCopy() *ValidatingAdmissionPolicyBindingSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ValidatingAdmissionPolicyBindingSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ValidatingAdmissionPolicyList) DeepCopyInto(out *ValidatingAdmissionPolicyList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ValidatingAdmissionPolicy, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyList.
|
||||
func (in *ValidatingAdmissionPolicyList) DeepCopy() *ValidatingAdmissionPolicyList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ValidatingAdmissionPolicyList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ValidatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ValidatingAdmissionPolicySpec) DeepCopyInto(out *ValidatingAdmissionPolicySpec) {
|
||||
*out = *in
|
||||
if in.ParamKind != nil {
|
||||
in, out := &in.ParamKind, &out.ParamKind
|
||||
*out = new(ParamKind)
|
||||
**out = **in
|
||||
}
|
||||
if in.MatchConstraints != nil {
|
||||
in, out := &in.MatchConstraints, &out.MatchConstraints
|
||||
*out = new(MatchResources)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Validations != nil {
|
||||
in, out := &in.Validations, &out.Validations
|
||||
*out = make([]Validation, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.FailurePolicy != nil {
|
||||
in, out := &in.FailurePolicy, &out.FailurePolicy
|
||||
*out = new(FailurePolicyType)
|
||||
**out = **in
|
||||
}
|
||||
if in.AuditAnnotations != nil {
|
||||
in, out := &in.AuditAnnotations, &out.AuditAnnotations
|
||||
*out = make([]AuditAnnotation, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.MatchConditions != nil {
|
||||
in, out := &in.MatchConditions, &out.MatchConditions
|
||||
*out = make([]MatchCondition, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Variables != nil {
|
||||
in, out := &in.Variables, &out.Variables
|
||||
*out = make([]Variable, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicySpec.
|
||||
func (in *ValidatingAdmissionPolicySpec) DeepCopy() *ValidatingAdmissionPolicySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ValidatingAdmissionPolicySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ValidatingAdmissionPolicyStatus) DeepCopyInto(out *ValidatingAdmissionPolicyStatus) {
|
||||
*out = *in
|
||||
if in.TypeChecking != nil {
|
||||
in, out := &in.TypeChecking, &out.TypeChecking
|
||||
*out = new(TypeChecking)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]metav1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingAdmissionPolicyStatus.
|
||||
func (in *ValidatingAdmissionPolicyStatus) DeepCopy() *ValidatingAdmissionPolicyStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ValidatingAdmissionPolicyStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) {
|
||||
*out = *in
|
||||
@@ -391,6 +786,43 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Validation) DeepCopyInto(out *Validation) {
|
||||
*out = *in
|
||||
if in.Reason != nil {
|
||||
in, out := &in.Reason, &out.Reason
|
||||
*out = new(metav1.StatusReason)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation.
|
||||
func (in *Validation) DeepCopy() *Validation {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Validation)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Variable) DeepCopyInto(out *Variable) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Variable.
|
||||
func (in *Variable) DeepCopy() *Variable {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Variable)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) {
|
||||
*out = *in
|
||||
|
||||
70
vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
70
vendor/k8s.io/api/admissionregistration/v1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *MutatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *MutatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingWebhookConfiguration) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 16
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingWebhookConfigurationList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 16
|
||||
}
|
||||
1
vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
generated
vendored
1
vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
generated
vendored
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:prerelease-lifecycle-gen=true
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the API.
|
||||
|
||||
2469
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
generated
vendored
2469
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
319
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
319
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
@@ -29,6 +29,51 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "k8s.io/api/admissionregistration/v1alpha1";
|
||||
|
||||
// ApplyConfiguration defines the desired configuration values of an object.
|
||||
message ApplyConfiguration {
|
||||
// expression will be evaluated by CEL to create an apply configuration.
|
||||
// ref: https://github.com/google/cel-spec
|
||||
//
|
||||
// Apply configurations are declared in CEL using object initialization. For example, this CEL expression
|
||||
// returns an apply configuration to set a single field:
|
||||
//
|
||||
// Object{
|
||||
// spec: Object.spec{
|
||||
// serviceAccountName: "example"
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
|
||||
// values not included in the apply configuration.
|
||||
//
|
||||
// CEL expressions have access to the object types needed to create apply configurations:
|
||||
//
|
||||
// - 'Object' - CEL type of the resource object.
|
||||
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
|
||||
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
|
||||
//
|
||||
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
|
||||
//
|
||||
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
|
||||
// - 'oldObject' - The existing object. The value is null for CREATE requests.
|
||||
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
|
||||
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
|
||||
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
|
||||
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
|
||||
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
|
||||
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
|
||||
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
|
||||
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
|
||||
// request resource.
|
||||
//
|
||||
// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
|
||||
// object. No other metadata properties are accessible.
|
||||
//
|
||||
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
|
||||
// Required.
|
||||
optional string expression = 1;
|
||||
}
|
||||
|
||||
// AuditAnnotation describes how to produce an audit annotation for an API request.
|
||||
message AuditAnnotation {
|
||||
// key specifies the audit annotation key. The audit annotation keys of
|
||||
@@ -79,6 +124,75 @@ message ExpressionWarning {
|
||||
optional string warning = 3;
|
||||
}
|
||||
|
||||
// JSONPatch defines a JSON Patch.
|
||||
message JSONPatch {
|
||||
// expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
|
||||
// ref: https://github.com/google/cel-spec
|
||||
//
|
||||
// expression must return an array of JSONPatch values.
|
||||
//
|
||||
// For example, this CEL expression returns a JSON patch to conditionally modify a value:
|
||||
//
|
||||
// [
|
||||
// JSONPatch{op: "test", path: "/spec/example", value: "Red"},
|
||||
// JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
|
||||
// ]
|
||||
//
|
||||
// To define an object for the patch value, use Object types. For example:
|
||||
//
|
||||
// [
|
||||
// JSONPatch{
|
||||
// op: "add",
|
||||
// path: "/spec/selector",
|
||||
// value: Object.spec.selector{matchLabels: {"environment": "test"}}
|
||||
// }
|
||||
// ]
|
||||
//
|
||||
// To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
|
||||
//
|
||||
// [
|
||||
// JSONPatch{
|
||||
// op: "add",
|
||||
// path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
|
||||
// value: "test"
|
||||
// },
|
||||
// ]
|
||||
//
|
||||
// CEL expressions have access to the types needed to create JSON patches and objects:
|
||||
//
|
||||
// - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
|
||||
// See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
|
||||
// integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
|
||||
// [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
|
||||
// function may be used to escape path keys containing '/' and '~'.
|
||||
// - 'Object' - CEL type of the resource object.
|
||||
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
|
||||
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
|
||||
//
|
||||
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
|
||||
//
|
||||
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
|
||||
// - 'oldObject' - The existing object. The value is null for CREATE requests.
|
||||
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
|
||||
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
|
||||
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
|
||||
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
|
||||
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
|
||||
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
|
||||
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
|
||||
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
|
||||
// request resource.
|
||||
//
|
||||
// CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
|
||||
// as well as:
|
||||
//
|
||||
// - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
|
||||
//
|
||||
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
|
||||
// Required.
|
||||
optional string expression = 1;
|
||||
}
|
||||
|
||||
message MatchCondition {
|
||||
// Name is an identifier for this match condition, used for strategic merging of MatchConditions,
|
||||
// as well as providing an identifier for logging purposes. A good name should be descriptive of
|
||||
@@ -156,7 +270,7 @@ message MatchResources {
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
|
||||
|
||||
// ObjectSelector decides whether to run the validation based on if the
|
||||
// object has matching labels. objectSelector is evaluated against both
|
||||
@@ -170,7 +284,7 @@ message MatchResources {
|
||||
// users may skip the admission webhook by setting the labels.
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
|
||||
|
||||
// ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
|
||||
// The policy cares about an operation if it matches _any_ Rule.
|
||||
@@ -202,6 +316,193 @@ message MatchResources {
|
||||
optional string matchPolicy = 7;
|
||||
}
|
||||
|
||||
// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
|
||||
message MutatingAdmissionPolicy {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the MutatingAdmissionPolicy.
|
||||
optional MutatingAdmissionPolicySpec spec = 2;
|
||||
}
|
||||
|
||||
// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
|
||||
// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
|
||||
// configure policies for clusters.
|
||||
//
|
||||
// For a given admission request, each binding will cause its policy to be
|
||||
// evaluated N times, where N is 1 for policies/bindings that don't use
|
||||
// params, otherwise N is the number of parameters selected by the binding.
|
||||
// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
|
||||
//
|
||||
// Adding/removing policies, bindings, or params can not affect whether a
|
||||
// given (policy, binding, param) combination is within its own CEL budget.
|
||||
message MutatingAdmissionPolicyBinding {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
|
||||
optional MutatingAdmissionPolicyBindingSpec spec = 2;
|
||||
}
|
||||
|
||||
// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
|
||||
message MutatingAdmissionPolicyBindingList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of PolicyBinding.
|
||||
repeated MutatingAdmissionPolicyBinding items = 2;
|
||||
}
|
||||
|
||||
// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
|
||||
message MutatingAdmissionPolicyBindingSpec {
|
||||
// policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
|
||||
// If the referenced resource does not exist, this binding is considered invalid and will be ignored
|
||||
// Required.
|
||||
optional string policyName = 1;
|
||||
|
||||
// paramRef specifies the parameter resource used to configure the admission control policy.
|
||||
// It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
|
||||
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
|
||||
// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
|
||||
// +optional
|
||||
optional ParamRef paramRef = 2;
|
||||
|
||||
// matchResources limits what resources match this binding and may be mutated by it.
|
||||
// Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
|
||||
// matchConditions before the resource may be mutated.
|
||||
// When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
|
||||
// and matchConditions must match for the resource to be mutated.
|
||||
// Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
|
||||
// Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
|
||||
// The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
|
||||
// '*' matches CREATE, UPDATE and CONNECT.
|
||||
// +optional
|
||||
optional MatchResources matchResources = 3;
|
||||
}
|
||||
|
||||
// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
|
||||
message MutatingAdmissionPolicyList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of ValidatingAdmissionPolicy.
|
||||
repeated MutatingAdmissionPolicy items = 2;
|
||||
}
|
||||
|
||||
// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
|
||||
message MutatingAdmissionPolicySpec {
|
||||
// paramKind specifies the kind of resources used to parameterize this policy.
|
||||
// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
|
||||
// If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
|
||||
// If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
|
||||
// +optional
|
||||
optional ParamKind paramKind = 1;
|
||||
|
||||
// matchConstraints specifies what resources this policy is designed to validate.
|
||||
// The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
|
||||
// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
|
||||
// MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
|
||||
// The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
|
||||
// '*' matches CREATE, UPDATE and CONNECT.
|
||||
// Required.
|
||||
optional MatchResources matchConstraints = 2;
|
||||
|
||||
// variables contain definitions of variables that can be used in composition of other expressions.
|
||||
// Each variable is defined as a named CEL expression.
|
||||
// The variables defined here will be available under `variables` in other expressions of the policy
|
||||
// except matchConditions because matchConditions are evaluated before the rest of the policy.
|
||||
//
|
||||
// The expression of a variable can refer to other variables defined earlier in the list but not those after.
|
||||
// Thus, variables must be sorted by the order of first appearance and acyclic.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated Variable variables = 3;
|
||||
|
||||
// mutations contain operations to perform on matching objects.
|
||||
// mutations may not be empty; a minimum of one mutation is required.
|
||||
// mutations are evaluated in order, and are reinvoked according to
|
||||
// the reinvocationPolicy.
|
||||
// The mutations of a policy are invoked for each binding of this policy
|
||||
// and reinvocation of mutations occurs on a per binding basis.
|
||||
//
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
repeated Mutation mutations = 4;
|
||||
|
||||
// failurePolicy defines how to handle failures for the admission policy. Failures can
|
||||
// occur from CEL expression parse errors, type check errors, runtime errors and invalid
|
||||
// or mis-configured policy definitions or bindings.
|
||||
//
|
||||
// A policy is invalid if paramKind refers to a non-existent Kind.
|
||||
// A binding is invalid if paramRef.name refers to a non-existent resource.
|
||||
//
|
||||
// failurePolicy does not define how validations that evaluate to false are handled.
|
||||
//
|
||||
// Allowed values are Ignore or Fail. Defaults to Fail.
|
||||
// +optional
|
||||
optional string failurePolicy = 5;
|
||||
|
||||
// matchConditions is a list of conditions that must be met for a request to be validated.
|
||||
// Match conditions filter requests that have already been matched by the matchConstraints.
|
||||
// An empty list of matchConditions matches all requests.
|
||||
// There are a maximum of 64 match conditions allowed.
|
||||
//
|
||||
// If a parameter object is provided, it can be accessed via the `params` handle in the same
|
||||
// manner as validation expressions.
|
||||
//
|
||||
// The exact matching logic is (in order):
|
||||
// 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
|
||||
// 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
|
||||
// 3. If any matchCondition evaluates to an error (but none are FALSE):
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the policy is skipped
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
repeated MatchCondition matchConditions = 6;
|
||||
|
||||
// reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
|
||||
// as part of a single admission evaluation.
|
||||
// Allowed values are "Never" and "IfNeeded".
|
||||
//
|
||||
// Never: These mutations will not be called more than once per binding in a single admission evaluation.
|
||||
//
|
||||
// IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
|
||||
// order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
|
||||
// reinvoked when mutations change the object after this mutation is invoked.
|
||||
// Required.
|
||||
optional string reinvocationPolicy = 7;
|
||||
}
|
||||
|
||||
// Mutation specifies the CEL expression which is used to apply the Mutation.
|
||||
message Mutation {
|
||||
// patchType indicates the patch strategy used.
|
||||
// Allowed values are "ApplyConfiguration" and "JSONPatch".
|
||||
// Required.
|
||||
//
|
||||
// +unionDiscriminator
|
||||
optional string patchType = 2;
|
||||
|
||||
// applyConfiguration defines the desired configuration values of an object.
|
||||
// The configuration is applied to the admission object using
|
||||
// [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
|
||||
// A CEL expression is used to create apply configuration.
|
||||
optional ApplyConfiguration applyConfiguration = 3;
|
||||
|
||||
// jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
|
||||
// A CEL expression is used to create the JSON patch.
|
||||
optional JSONPatch jsonPatch = 4;
|
||||
}
|
||||
|
||||
// NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.
|
||||
// +structType=atomic
|
||||
message NamedRuleWithOperations {
|
||||
@@ -211,7 +512,7 @@ message NamedRuleWithOperations {
|
||||
repeated string resourceNames = 1;
|
||||
|
||||
// RuleWithOperations is a tuple of Operations and Resources.
|
||||
optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2;
|
||||
optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2;
|
||||
}
|
||||
|
||||
// ParamKind is a tuple of Group Kind and Version.
|
||||
@@ -267,7 +568,7 @@ message ParamRef {
|
||||
// mutually exclusive properties. If one is set, the other must be unset.
|
||||
//
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
|
||||
|
||||
// `parameterNotFoundAction` controls the behavior of the binding when the resource
|
||||
// exists, and name or selector is valid, but there are no parameters
|
||||
@@ -295,7 +596,7 @@ message TypeChecking {
|
||||
message ValidatingAdmissionPolicy {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the ValidatingAdmissionPolicy.
|
||||
optional ValidatingAdmissionPolicySpec spec = 2;
|
||||
@@ -322,7 +623,7 @@ message ValidatingAdmissionPolicy {
|
||||
message ValidatingAdmissionPolicyBinding {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
|
||||
optional ValidatingAdmissionPolicyBindingSpec spec = 2;
|
||||
@@ -333,7 +634,7 @@ message ValidatingAdmissionPolicyBindingList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of PolicyBinding.
|
||||
repeated ValidatingAdmissionPolicyBinding items = 2;
|
||||
@@ -409,7 +710,7 @@ message ValidatingAdmissionPolicyList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of ValidatingAdmissionPolicy.
|
||||
repeated ValidatingAdmissionPolicy items = 2;
|
||||
@@ -514,7 +815,7 @@ message ValidatingAdmissionPolicyStatus {
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
|
||||
repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
|
||||
}
|
||||
|
||||
// Validation specifies the CEL expression which is used to apply the validation.
|
||||
|
||||
4
vendor/k8s.io/api/admissionregistration/v1alpha1/register.go
generated
vendored
4
vendor/k8s.io/api/admissionregistration/v1alpha1/register.go
generated
vendored
@@ -50,6 +50,10 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&ValidatingAdmissionPolicyList{},
|
||||
&ValidatingAdmissionPolicyBinding{},
|
||||
&ValidatingAdmissionPolicyBindingList{},
|
||||
&MutatingAdmissionPolicy{},
|
||||
&MutatingAdmissionPolicyList{},
|
||||
&MutatingAdmissionPolicyBinding{},
|
||||
&MutatingAdmissionPolicyBindingList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
|
||||
347
vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
generated
vendored
347
vendor/k8s.io/api/admissionregistration/v1alpha1/types.go
generated
vendored
@@ -142,7 +142,7 @@ type ValidatingAdmissionPolicyList struct {
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of ValidatingAdmissionPolicy.
|
||||
Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
|
||||
Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
|
||||
@@ -404,7 +404,7 @@ type ValidatingAdmissionPolicyBindingList struct {
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of PolicyBinding.
|
||||
Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
|
||||
Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
|
||||
@@ -663,3 +663,346 @@ const (
|
||||
Delete OperationType = v1.Delete
|
||||
Connect OperationType = v1.Connect
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.32
|
||||
|
||||
// MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.
|
||||
type MutatingAdmissionPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// Specification of the desired behavior of the MutatingAdmissionPolicy.
|
||||
Spec MutatingAdmissionPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.32
|
||||
|
||||
// MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.
|
||||
type MutatingAdmissionPolicyList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of ValidatingAdmissionPolicy.
|
||||
Items []MutatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.
|
||||
type MutatingAdmissionPolicySpec struct {
|
||||
// paramKind specifies the kind of resources used to parameterize this policy.
|
||||
// If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions.
|
||||
// If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied.
|
||||
// If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.
|
||||
// +optional
|
||||
ParamKind *ParamKind `json:"paramKind,omitempty" protobuf:"bytes,1,rep,name=paramKind"`
|
||||
|
||||
// matchConstraints specifies what resources this policy is designed to validate.
|
||||
// The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints.
|
||||
// However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API
|
||||
// MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding.
|
||||
// The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
|
||||
// '*' matches CREATE, UPDATE and CONNECT.
|
||||
// Required.
|
||||
MatchConstraints *MatchResources `json:"matchConstraints,omitempty" protobuf:"bytes,2,rep,name=matchConstraints"`
|
||||
|
||||
// variables contain definitions of variables that can be used in composition of other expressions.
|
||||
// Each variable is defined as a named CEL expression.
|
||||
// The variables defined here will be available under `variables` in other expressions of the policy
|
||||
// except matchConditions because matchConditions are evaluated before the rest of the policy.
|
||||
//
|
||||
// The expression of a variable can refer to other variables defined earlier in the list but not those after.
|
||||
// Thus, variables must be sorted by the order of first appearance and acyclic.
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
Variables []Variable `json:"variables,omitempty" protobuf:"bytes,3,rep,name=variables"`
|
||||
|
||||
// mutations contain operations to perform on matching objects.
|
||||
// mutations may not be empty; a minimum of one mutation is required.
|
||||
// mutations are evaluated in order, and are reinvoked according to
|
||||
// the reinvocationPolicy.
|
||||
// The mutations of a policy are invoked for each binding of this policy
|
||||
// and reinvocation of mutations occurs on a per binding basis.
|
||||
//
|
||||
// +listType=atomic
|
||||
// +optional
|
||||
Mutations []Mutation `json:"mutations,omitempty" protobuf:"bytes,4,rep,name=mutations"`
|
||||
|
||||
// failurePolicy defines how to handle failures for the admission policy. Failures can
|
||||
// occur from CEL expression parse errors, type check errors, runtime errors and invalid
|
||||
// or mis-configured policy definitions or bindings.
|
||||
//
|
||||
// A policy is invalid if paramKind refers to a non-existent Kind.
|
||||
// A binding is invalid if paramRef.name refers to a non-existent resource.
|
||||
//
|
||||
// failurePolicy does not define how validations that evaluate to false are handled.
|
||||
//
|
||||
// Allowed values are Ignore or Fail. Defaults to Fail.
|
||||
// +optional
|
||||
FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,5,opt,name=failurePolicy,casttype=FailurePolicyType"`
|
||||
|
||||
// matchConditions is a list of conditions that must be met for a request to be validated.
|
||||
// Match conditions filter requests that have already been matched by the matchConstraints.
|
||||
// An empty list of matchConditions matches all requests.
|
||||
// There are a maximum of 64 match conditions allowed.
|
||||
//
|
||||
// If a parameter object is provided, it can be accessed via the `params` handle in the same
|
||||
// manner as validation expressions.
|
||||
//
|
||||
// The exact matching logic is (in order):
|
||||
// 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.
|
||||
// 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.
|
||||
// 3. If any matchCondition evaluates to an error (but none are FALSE):
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the policy is skipped
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +optional
|
||||
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,6,rep,name=matchConditions"`
|
||||
|
||||
// reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding
|
||||
// as part of a single admission evaluation.
|
||||
// Allowed values are "Never" and "IfNeeded".
|
||||
//
|
||||
// Never: These mutations will not be called more than once per binding in a single admission evaluation.
|
||||
//
|
||||
// IfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of
|
||||
// order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only
|
||||
// reinvoked when mutations change the object after this mutation is invoked.
|
||||
// Required.
|
||||
ReinvocationPolicy ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,7,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"`
|
||||
}
|
||||
|
||||
// Mutation specifies the CEL expression which is used to apply the Mutation.
|
||||
type Mutation struct {
|
||||
// patchType indicates the patch strategy used.
|
||||
// Allowed values are "ApplyConfiguration" and "JSONPatch".
|
||||
// Required.
|
||||
//
|
||||
// +unionDiscriminator
|
||||
PatchType PatchType `json:"patchType" protobuf:"bytes,2,opt,name=patchType,casttype=PatchType"`
|
||||
|
||||
// applyConfiguration defines the desired configuration values of an object.
|
||||
// The configuration is applied to the admission object using
|
||||
// [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff).
|
||||
// A CEL expression is used to create apply configuration.
|
||||
ApplyConfiguration *ApplyConfiguration `json:"applyConfiguration,omitempty" protobuf:"bytes,3,opt,name=applyConfiguration"`
|
||||
|
||||
// jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object.
|
||||
// A CEL expression is used to create the JSON patch.
|
||||
JSONPatch *JSONPatch `json:"jsonPatch,omitempty" protobuf:"bytes,4,opt,name=jsonPatch"`
|
||||
}
|
||||
|
||||
// PatchType specifies the type of patch operation for a mutation.
|
||||
// +enum
|
||||
type PatchType string
|
||||
|
||||
const (
|
||||
// ApplyConfiguration indicates that the mutation is using apply configuration to mutate the object.
|
||||
PatchTypeApplyConfiguration PatchType = "ApplyConfiguration"
|
||||
// JSONPatch indicates that the object is mutated through JSON Patch.
|
||||
PatchTypeJSONPatch PatchType = "JSONPatch"
|
||||
)
|
||||
|
||||
// ApplyConfiguration defines the desired configuration values of an object.
|
||||
type ApplyConfiguration struct {
|
||||
// expression will be evaluated by CEL to create an apply configuration.
|
||||
// ref: https://github.com/google/cel-spec
|
||||
//
|
||||
// Apply configurations are declared in CEL using object initialization. For example, this CEL expression
|
||||
// returns an apply configuration to set a single field:
|
||||
//
|
||||
// Object{
|
||||
// spec: Object.spec{
|
||||
// serviceAccountName: "example"
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Apply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of
|
||||
// values not included in the apply configuration.
|
||||
//
|
||||
// CEL expressions have access to the object types needed to create apply configurations:
|
||||
//
|
||||
// - 'Object' - CEL type of the resource object.
|
||||
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
|
||||
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
|
||||
//
|
||||
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
|
||||
//
|
||||
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
|
||||
// - 'oldObject' - The existing object. The value is null for CREATE requests.
|
||||
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
|
||||
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
|
||||
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
|
||||
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
|
||||
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
|
||||
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
|
||||
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
|
||||
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
|
||||
// request resource.
|
||||
//
|
||||
// The `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the
|
||||
// object. No other metadata properties are accessible.
|
||||
//
|
||||
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
|
||||
// Required.
|
||||
Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
|
||||
}
|
||||
|
||||
// JSONPatch defines a JSON Patch.
|
||||
type JSONPatch struct {
|
||||
// expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/).
|
||||
// ref: https://github.com/google/cel-spec
|
||||
//
|
||||
// expression must return an array of JSONPatch values.
|
||||
//
|
||||
// For example, this CEL expression returns a JSON patch to conditionally modify a value:
|
||||
//
|
||||
// [
|
||||
// JSONPatch{op: "test", path: "/spec/example", value: "Red"},
|
||||
// JSONPatch{op: "replace", path: "/spec/example", value: "Green"}
|
||||
// ]
|
||||
//
|
||||
// To define an object for the patch value, use Object types. For example:
|
||||
//
|
||||
// [
|
||||
// JSONPatch{
|
||||
// op: "add",
|
||||
// path: "/spec/selector",
|
||||
// value: Object.spec.selector{matchLabels: {"environment": "test"}}
|
||||
// }
|
||||
// ]
|
||||
//
|
||||
// To use strings containing '/' and '~' as JSONPatch path keys, use "jsonpatch.escapeKey". For example:
|
||||
//
|
||||
// [
|
||||
// JSONPatch{
|
||||
// op: "add",
|
||||
// path: "/metadata/labels/" + jsonpatch.escapeKey("example.com/environment"),
|
||||
// value: "test"
|
||||
// },
|
||||
// ]
|
||||
//
|
||||
// CEL expressions have access to the types needed to create JSON patches and objects:
|
||||
//
|
||||
// - 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.
|
||||
// See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,
|
||||
// integer, array, map or object. If set, the 'path' and 'from' fields must be set to a
|
||||
// [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL
|
||||
// function may be used to escape path keys containing '/' and '~'.
|
||||
// - 'Object' - CEL type of the resource object.
|
||||
// - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec')
|
||||
// - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')
|
||||
//
|
||||
// CEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:
|
||||
//
|
||||
// - 'object' - The object from the incoming request. The value is null for DELETE requests.
|
||||
// - 'oldObject' - The existing object. The value is null for CREATE requests.
|
||||
// - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)).
|
||||
// - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind.
|
||||
// - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources.
|
||||
// - 'variables' - Map of composited variables, from its name to its lazily evaluated value.
|
||||
// For example, a variable named 'foo' can be accessed as 'variables.foo'.
|
||||
// - 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.
|
||||
// See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz
|
||||
// - 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the
|
||||
// request resource.
|
||||
//
|
||||
// CEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries)
|
||||
// as well as:
|
||||
//
|
||||
// - 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).
|
||||
//
|
||||
//
|
||||
// Only property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible.
|
||||
// Required.
|
||||
Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"`
|
||||
}
|
||||
|
||||
// ReinvocationPolicyType specifies what type of policy the admission mutation uses.
|
||||
// +enum
|
||||
type ReinvocationPolicyType = v1.ReinvocationPolicyType
|
||||
|
||||
const (
|
||||
// NeverReinvocationPolicy indicates that the mutation must not be called more than once in a
|
||||
// single admission evaluation.
|
||||
NeverReinvocationPolicy ReinvocationPolicyType = v1.NeverReinvocationPolicy
|
||||
// IfNeededReinvocationPolicy indicates that the mutation may be called at least one
|
||||
// additional time as part of the admission evaluation if the object being admitted is
|
||||
// modified by other admission plugins after the initial mutation call.
|
||||
IfNeededReinvocationPolicy ReinvocationPolicyType = v1.IfNeededReinvocationPolicy
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.32
|
||||
|
||||
// MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources.
|
||||
// MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators
|
||||
// configure policies for clusters.
|
||||
//
|
||||
// For a given admission request, each binding will cause its policy to be
|
||||
// evaluated N times, where N is 1 for policies/bindings that don't use
|
||||
// params, otherwise N is the number of parameters selected by the binding.
|
||||
// Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).
|
||||
//
|
||||
// Adding/removing policies, bindings, or params can not affect whether a
|
||||
// given (policy, binding, param) combination is within its own CEL budget.
|
||||
type MutatingAdmissionPolicyBinding struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// Specification of the desired behavior of the MutatingAdmissionPolicyBinding.
|
||||
Spec MutatingAdmissionPolicyBindingSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.32
|
||||
|
||||
// MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.
|
||||
type MutatingAdmissionPolicyBindingList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of PolicyBinding.
|
||||
Items []MutatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.
|
||||
type MutatingAdmissionPolicyBindingSpec struct {
|
||||
// policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to.
|
||||
// If the referenced resource does not exist, this binding is considered invalid and will be ignored
|
||||
// Required.
|
||||
PolicyName string `json:"policyName,omitempty" protobuf:"bytes,1,rep,name=policyName"`
|
||||
|
||||
// paramRef specifies the parameter resource used to configure the admission control policy.
|
||||
// It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy.
|
||||
// If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied.
|
||||
// If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.
|
||||
// +optional
|
||||
ParamRef *ParamRef `json:"paramRef,omitempty" protobuf:"bytes,2,rep,name=paramRef"`
|
||||
|
||||
// matchResources limits what resources match this binding and may be mutated by it.
|
||||
// Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and
|
||||
// matchConditions before the resource may be mutated.
|
||||
// When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints
|
||||
// and matchConditions must match for the resource to be mutated.
|
||||
// Additionally, matchResources.resourceRules are optional and do not constraint matching when unset.
|
||||
// Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required.
|
||||
// The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched.
|
||||
// '*' matches CREATE, UPDATE and CONNECT.
|
||||
// +optional
|
||||
MatchResources *MatchResources `json:"matchResources,omitempty" protobuf:"bytes,3,rep,name=matchResources"`
|
||||
}
|
||||
|
||||
95
vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
95
vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
@@ -27,6 +27,15 @@ package v1alpha1
|
||||
// Those methods can be generated by using hack/update-codegen.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_ApplyConfiguration = map[string]string{
|
||||
"": "ApplyConfiguration defines the desired configuration values of an object.",
|
||||
"expression": "expression will be evaluated by CEL to create an apply configuration. ref: https://github.com/google/cel-spec\n\nApply configurations are declared in CEL using object initialization. For example, this CEL expression returns an apply configuration to set a single field:\n\n\tObject{\n\t spec: Object.spec{\n\t serviceAccountName: \"example\"\n\t }\n\t}\n\nApply configurations may not modify atomic structs, maps or arrays due to the risk of accidental deletion of values not included in the apply configuration.\n\nCEL expressions have access to the object types needed to create apply configurations:\n\n- 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
|
||||
}
|
||||
|
||||
func (ApplyConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_ApplyConfiguration
|
||||
}
|
||||
|
||||
var map_AuditAnnotation = map[string]string{
|
||||
"": "AuditAnnotation describes how to produce an audit annotation for an API request.",
|
||||
"key": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.",
|
||||
@@ -47,6 +56,15 @@ func (ExpressionWarning) SwaggerDoc() map[string]string {
|
||||
return map_ExpressionWarning
|
||||
}
|
||||
|
||||
var map_JSONPatch = map[string]string{
|
||||
"": "JSONPatch defines a JSON Patch.",
|
||||
"expression": "expression will be evaluated by CEL to create a [JSON patch](https://jsonpatch.com/). ref: https://github.com/google/cel-spec\n\nexpression must return an array of JSONPatch values.\n\nFor example, this CEL expression returns a JSON patch to conditionally modify a value:\n\n\t [\n\t JSONPatch{op: \"test\", path: \"/spec/example\", value: \"Red\"},\n\t JSONPatch{op: \"replace\", path: \"/spec/example\", value: \"Green\"}\n\t ]\n\nTo define an object for the patch value, use Object types. For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/spec/selector\",\n\t value: Object.spec.selector{matchLabels: {\"environment\": \"test\"}}\n\t }\n\t ]\n\nTo use strings containing '/' and '~' as JSONPatch path keys, use \"jsonpatch.escapeKey\". For example:\n\n\t [\n\t JSONPatch{\n\t op: \"add\",\n\t path: \"/metadata/labels/\" + jsonpatch.escapeKey(\"example.com/environment\"),\n\t value: \"test\"\n\t },\n\t ]\n\nCEL expressions have access to the types needed to create JSON patches and objects:\n\n- 'JSONPatch' - CEL type of JSON Patch operations. JSONPatch has the fields 'op', 'from', 'path' and 'value'.\n See [JSON patch](https://jsonpatch.com/) for more details. The 'value' field may be set to any of: string,\n integer, array, map or object. If set, the 'path' and 'from' fields must be set to a\n [JSON pointer](https://datatracker.ietf.org/doc/html/rfc6901/) string, where the 'jsonpatch.escapeKey()' CEL\n function may be used to escape path keys containing '/' and '~'.\n- 'Object' - CEL type of the resource object. - 'Object.<fieldName>' - CEL type of object field (such as 'Object.spec') - 'Object.<fieldName1>.<fieldName2>...<fieldNameN>` - CEL type of nested field (such as 'Object.spec.containers')\n\nCEL expressions have access to the contents of the API request, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nCEL expressions have access to [Kubernetes CEL function libraries](https://kubernetes.io/docs/reference/using-api/cel/#cel-options-language-features-and-libraries) as well as:\n\n- 'jsonpatch.escapeKey' - Performs JSONPatch key escaping. '~' and '/' are escaped as '~0' and `~1' respectively).\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Required.",
|
||||
}
|
||||
|
||||
func (JSONPatch) SwaggerDoc() map[string]string {
|
||||
return map_JSONPatch
|
||||
}
|
||||
|
||||
var map_MatchResources = map[string]string{
|
||||
"": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)",
|
||||
"namespaceSelector": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
|
||||
@@ -60,6 +78,83 @@ func (MatchResources) SwaggerDoc() map[string]string {
|
||||
return map_MatchResources
|
||||
}
|
||||
|
||||
var map_MutatingAdmissionPolicy = map[string]string{
|
||||
"": "MutatingAdmissionPolicy describes the definition of an admission mutation policy that mutates the object coming into admission chain.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
|
||||
"spec": "Specification of the desired behavior of the MutatingAdmissionPolicy.",
|
||||
}
|
||||
|
||||
func (MutatingAdmissionPolicy) SwaggerDoc() map[string]string {
|
||||
return map_MutatingAdmissionPolicy
|
||||
}
|
||||
|
||||
var map_MutatingAdmissionPolicyBinding = map[string]string{
|
||||
"": "MutatingAdmissionPolicyBinding binds the MutatingAdmissionPolicy with parametrized resources. MutatingAdmissionPolicyBinding and the optional parameter resource together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding. Each evaluation is constrained by a [runtime cost budget](https://kubernetes.io/docs/reference/using-api/cel/#runtime-cost-budget).\n\nAdding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
|
||||
"spec": "Specification of the desired behavior of the MutatingAdmissionPolicyBinding.",
|
||||
}
|
||||
|
||||
func (MutatingAdmissionPolicyBinding) SwaggerDoc() map[string]string {
|
||||
return map_MutatingAdmissionPolicyBinding
|
||||
}
|
||||
|
||||
var map_MutatingAdmissionPolicyBindingList = map[string]string{
|
||||
"": "MutatingAdmissionPolicyBindingList is a list of MutatingAdmissionPolicyBinding.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
"items": "List of PolicyBinding.",
|
||||
}
|
||||
|
||||
func (MutatingAdmissionPolicyBindingList) SwaggerDoc() map[string]string {
|
||||
return map_MutatingAdmissionPolicyBindingList
|
||||
}
|
||||
|
||||
var map_MutatingAdmissionPolicyBindingSpec = map[string]string{
|
||||
"": "MutatingAdmissionPolicyBindingSpec is the specification of the MutatingAdmissionPolicyBinding.",
|
||||
"policyName": "policyName references a MutatingAdmissionPolicy name which the MutatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.",
|
||||
"paramRef": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in spec.ParamKind of the bound MutatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the MutatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.",
|
||||
"matchResources": "matchResources limits what resources match this binding and may be mutated by it. Note that if matchResources matches a resource, the resource must also match a policy's matchConstraints and matchConditions before the resource may be mutated. When matchResources is unset, it does not constrain resource matching, and only the policy's matchConstraints and matchConditions must match for the resource to be mutated. Additionally, matchResources.resourceRules are optional and do not constraint matching when unset. Note that this is differs from MutatingAdmissionPolicy matchConstraints, where resourceRules are required. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT.",
|
||||
}
|
||||
|
||||
func (MutatingAdmissionPolicyBindingSpec) SwaggerDoc() map[string]string {
|
||||
return map_MutatingAdmissionPolicyBindingSpec
|
||||
}
|
||||
|
||||
var map_MutatingAdmissionPolicyList = map[string]string{
|
||||
"": "MutatingAdmissionPolicyList is a list of MutatingAdmissionPolicy.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
"items": "List of ValidatingAdmissionPolicy.",
|
||||
}
|
||||
|
||||
func (MutatingAdmissionPolicyList) SwaggerDoc() map[string]string {
|
||||
return map_MutatingAdmissionPolicyList
|
||||
}
|
||||
|
||||
var map_MutatingAdmissionPolicySpec = map[string]string{
|
||||
"": "MutatingAdmissionPolicySpec is the specification of the desired behavior of the admission policy.",
|
||||
"paramKind": "paramKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If paramKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in MutatingAdmissionPolicyBinding, the params variable will be null.",
|
||||
"matchConstraints": "matchConstraints specifies what resources this policy is designed to validate. The MutatingAdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API MutatingAdmissionPolicy cannot match MutatingAdmissionPolicy and MutatingAdmissionPolicyBinding. The CREATE, UPDATE and CONNECT operations are allowed. The DELETE operation may not be matched. '*' matches CREATE, UPDATE and CONNECT. Required.",
|
||||
"variables": "variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except matchConditions because matchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, variables must be sorted by the order of first appearance and acyclic.",
|
||||
"mutations": "mutations contain operations to perform on matching objects. mutations may not be empty; a minimum of one mutation is required. mutations are evaluated in order, and are reinvoked according to the reinvocationPolicy. The mutations of a policy are invoked for each binding of this policy and reinvocation of mutations occurs on a per binding basis.",
|
||||
"failurePolicy": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if paramKind refers to a non-existent Kind. A binding is invalid if paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nAllowed values are Ignore or Fail. Defaults to Fail.",
|
||||
"matchConditions": "matchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the matchConstraints. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped",
|
||||
"reinvocationPolicy": "reinvocationPolicy indicates whether mutations may be called multiple times per MutatingAdmissionPolicyBinding as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: These mutations will not be called more than once per binding in a single admission evaluation.\n\nIfNeeded: These mutations may be invoked more than once per binding for a single admission request and there is no guarantee of order with respect to other admission plugins, admission webhooks, bindings of this policy and admission policies. Mutations are only reinvoked when mutations change the object after this mutation is invoked. Required.",
|
||||
}
|
||||
|
||||
func (MutatingAdmissionPolicySpec) SwaggerDoc() map[string]string {
|
||||
return map_MutatingAdmissionPolicySpec
|
||||
}
|
||||
|
||||
var map_Mutation = map[string]string{
|
||||
"": "Mutation specifies the CEL expression which is used to apply the Mutation.",
|
||||
"patchType": "patchType indicates the patch strategy used. Allowed values are \"ApplyConfiguration\" and \"JSONPatch\". Required.",
|
||||
"applyConfiguration": "applyConfiguration defines the desired configuration values of an object. The configuration is applied to the admission object using [structured merge diff](https://github.com/kubernetes-sigs/structured-merge-diff). A CEL expression is used to create apply configuration.",
|
||||
"jsonPatch": "jsonPatch defines a [JSON patch](https://jsonpatch.com/) operation to perform a mutation to the object. A CEL expression is used to create the JSON patch.",
|
||||
}
|
||||
|
||||
func (Mutation) SwaggerDoc() map[string]string {
|
||||
return map_Mutation
|
||||
}
|
||||
|
||||
var map_NamedRuleWithOperations = map[string]string{
|
||||
"": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.",
|
||||
"resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.",
|
||||
|
||||
252
vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
252
vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
@@ -26,6 +26,22 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ApplyConfiguration) DeepCopyInto(out *ApplyConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplyConfiguration.
|
||||
func (in *ApplyConfiguration) DeepCopy() *ApplyConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ApplyConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuditAnnotation) DeepCopyInto(out *AuditAnnotation) {
|
||||
*out = *in
|
||||
@@ -58,6 +74,22 @@ func (in *ExpressionWarning) DeepCopy() *ExpressionWarning {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *JSONPatch) DeepCopyInto(out *JSONPatch) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatch.
|
||||
func (in *JSONPatch) DeepCopy() *JSONPatch {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(JSONPatch)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MatchCondition) DeepCopyInto(out *MatchCondition) {
|
||||
*out = *in
|
||||
@@ -119,6 +151,226 @@ func (in *MatchResources) DeepCopy() *MatchResources {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MutatingAdmissionPolicy) DeepCopyInto(out *MutatingAdmissionPolicy) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicy.
|
||||
func (in *MutatingAdmissionPolicy) DeepCopy() *MutatingAdmissionPolicy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MutatingAdmissionPolicy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *MutatingAdmissionPolicy) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MutatingAdmissionPolicyBinding) DeepCopyInto(out *MutatingAdmissionPolicyBinding) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBinding.
|
||||
func (in *MutatingAdmissionPolicyBinding) DeepCopy() *MutatingAdmissionPolicyBinding {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MutatingAdmissionPolicyBinding)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *MutatingAdmissionPolicyBinding) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MutatingAdmissionPolicyBindingList) DeepCopyInto(out *MutatingAdmissionPolicyBindingList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]MutatingAdmissionPolicyBinding, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingList.
|
||||
func (in *MutatingAdmissionPolicyBindingList) DeepCopy() *MutatingAdmissionPolicyBindingList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MutatingAdmissionPolicyBindingList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *MutatingAdmissionPolicyBindingList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MutatingAdmissionPolicyBindingSpec) DeepCopyInto(out *MutatingAdmissionPolicyBindingSpec) {
|
||||
*out = *in
|
||||
if in.ParamRef != nil {
|
||||
in, out := &in.ParamRef, &out.ParamRef
|
||||
*out = new(ParamRef)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.MatchResources != nil {
|
||||
in, out := &in.MatchResources, &out.MatchResources
|
||||
*out = new(MatchResources)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyBindingSpec.
|
||||
func (in *MutatingAdmissionPolicyBindingSpec) DeepCopy() *MutatingAdmissionPolicyBindingSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MutatingAdmissionPolicyBindingSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MutatingAdmissionPolicyList) DeepCopyInto(out *MutatingAdmissionPolicyList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]MutatingAdmissionPolicy, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicyList.
|
||||
func (in *MutatingAdmissionPolicyList) DeepCopy() *MutatingAdmissionPolicyList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MutatingAdmissionPolicyList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *MutatingAdmissionPolicyList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MutatingAdmissionPolicySpec) DeepCopyInto(out *MutatingAdmissionPolicySpec) {
|
||||
*out = *in
|
||||
if in.ParamKind != nil {
|
||||
in, out := &in.ParamKind, &out.ParamKind
|
||||
*out = new(ParamKind)
|
||||
**out = **in
|
||||
}
|
||||
if in.MatchConstraints != nil {
|
||||
in, out := &in.MatchConstraints, &out.MatchConstraints
|
||||
*out = new(MatchResources)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Variables != nil {
|
||||
in, out := &in.Variables, &out.Variables
|
||||
*out = make([]Variable, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Mutations != nil {
|
||||
in, out := &in.Mutations, &out.Mutations
|
||||
*out = make([]Mutation, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.FailurePolicy != nil {
|
||||
in, out := &in.FailurePolicy, &out.FailurePolicy
|
||||
*out = new(FailurePolicyType)
|
||||
**out = **in
|
||||
}
|
||||
if in.MatchConditions != nil {
|
||||
in, out := &in.MatchConditions, &out.MatchConditions
|
||||
*out = make([]MatchCondition, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingAdmissionPolicySpec.
|
||||
func (in *MutatingAdmissionPolicySpec) DeepCopy() *MutatingAdmissionPolicySpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MutatingAdmissionPolicySpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Mutation) DeepCopyInto(out *Mutation) {
|
||||
*out = *in
|
||||
if in.ApplyConfiguration != nil {
|
||||
in, out := &in.ApplyConfiguration, &out.ApplyConfiguration
|
||||
*out = new(ApplyConfiguration)
|
||||
**out = **in
|
||||
}
|
||||
if in.JSONPatch != nil {
|
||||
in, out := &in.JSONPatch, &out.JSONPatch
|
||||
*out = new(JSONPatch)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mutation.
|
||||
func (in *Mutation) DeepCopy() *Mutation {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Mutation)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamedRuleWithOperations) DeepCopyInto(out *NamedRuleWithOperations) {
|
||||
*out = *in
|
||||
|
||||
166
vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
166
vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *MutatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *MutatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 35
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *MutatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 38
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *MutatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *MutatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 35
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *MutatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 38
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *MutatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *MutatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 35
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *MutatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 38
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *MutatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *MutatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 35
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *MutatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 38
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingAdmissionPolicy) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 26
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ValidatingAdmissionPolicy) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ValidatingAdmissionPolicy) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingAdmissionPolicyBinding) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 26
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ValidatingAdmissionPolicyBinding) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ValidatingAdmissionPolicyBinding) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingAdmissionPolicyBindingList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 26
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ValidatingAdmissionPolicyBindingList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ValidatingAdmissionPolicyBindingList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ValidatingAdmissionPolicyList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 26
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ValidatingAdmissionPolicyList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 29
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ValidatingAdmissionPolicyList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 32
|
||||
}
|
||||
305
vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
generated
vendored
305
vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
generated
vendored
@@ -15,7 +15,7 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
|
||||
// source: k8s.io/api/admissionregistration/v1beta1/generated.proto
|
||||
|
||||
package v1beta1
|
||||
|
||||
@@ -49,7 +49,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
func (m *AuditAnnotation) Reset() { *m = AuditAnnotation{} }
|
||||
func (*AuditAnnotation) ProtoMessage() {}
|
||||
func (*AuditAnnotation) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{0}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{0}
|
||||
}
|
||||
func (m *AuditAnnotation) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -77,7 +77,7 @@ var xxx_messageInfo_AuditAnnotation proto.InternalMessageInfo
|
||||
func (m *ExpressionWarning) Reset() { *m = ExpressionWarning{} }
|
||||
func (*ExpressionWarning) ProtoMessage() {}
|
||||
func (*ExpressionWarning) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{1}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{1}
|
||||
}
|
||||
func (m *ExpressionWarning) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -105,7 +105,7 @@ var xxx_messageInfo_ExpressionWarning proto.InternalMessageInfo
|
||||
func (m *MatchCondition) Reset() { *m = MatchCondition{} }
|
||||
func (*MatchCondition) ProtoMessage() {}
|
||||
func (*MatchCondition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{2}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{2}
|
||||
}
|
||||
func (m *MatchCondition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -133,7 +133,7 @@ var xxx_messageInfo_MatchCondition proto.InternalMessageInfo
|
||||
func (m *MatchResources) Reset() { *m = MatchResources{} }
|
||||
func (*MatchResources) ProtoMessage() {}
|
||||
func (*MatchResources) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{3}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{3}
|
||||
}
|
||||
func (m *MatchResources) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -161,7 +161,7 @@ var xxx_messageInfo_MatchResources proto.InternalMessageInfo
|
||||
func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} }
|
||||
func (*MutatingWebhook) ProtoMessage() {}
|
||||
func (*MutatingWebhook) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{4}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{4}
|
||||
}
|
||||
func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -189,7 +189,7 @@ var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo
|
||||
func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} }
|
||||
func (*MutatingWebhookConfiguration) ProtoMessage() {}
|
||||
func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{5}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{5}
|
||||
}
|
||||
func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -217,7 +217,7 @@ var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo
|
||||
func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} }
|
||||
func (*MutatingWebhookConfigurationList) ProtoMessage() {}
|
||||
func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{6}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{6}
|
||||
}
|
||||
func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -245,7 +245,7 @@ var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo
|
||||
func (m *NamedRuleWithOperations) Reset() { *m = NamedRuleWithOperations{} }
|
||||
func (*NamedRuleWithOperations) ProtoMessage() {}
|
||||
func (*NamedRuleWithOperations) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{7}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{7}
|
||||
}
|
||||
func (m *NamedRuleWithOperations) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -273,7 +273,7 @@ var xxx_messageInfo_NamedRuleWithOperations proto.InternalMessageInfo
|
||||
func (m *ParamKind) Reset() { *m = ParamKind{} }
|
||||
func (*ParamKind) ProtoMessage() {}
|
||||
func (*ParamKind) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{8}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{8}
|
||||
}
|
||||
func (m *ParamKind) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -301,7 +301,7 @@ var xxx_messageInfo_ParamKind proto.InternalMessageInfo
|
||||
func (m *ParamRef) Reset() { *m = ParamRef{} }
|
||||
func (*ParamRef) ProtoMessage() {}
|
||||
func (*ParamRef) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{9}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{9}
|
||||
}
|
||||
func (m *ParamRef) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -329,7 +329,7 @@ var xxx_messageInfo_ParamRef proto.InternalMessageInfo
|
||||
func (m *ServiceReference) Reset() { *m = ServiceReference{} }
|
||||
func (*ServiceReference) ProtoMessage() {}
|
||||
func (*ServiceReference) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{10}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{10}
|
||||
}
|
||||
func (m *ServiceReference) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -357,7 +357,7 @@ var xxx_messageInfo_ServiceReference proto.InternalMessageInfo
|
||||
func (m *TypeChecking) Reset() { *m = TypeChecking{} }
|
||||
func (*TypeChecking) ProtoMessage() {}
|
||||
func (*TypeChecking) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{11}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{11}
|
||||
}
|
||||
func (m *TypeChecking) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -385,7 +385,7 @@ var xxx_messageInfo_TypeChecking proto.InternalMessageInfo
|
||||
func (m *ValidatingAdmissionPolicy) Reset() { *m = ValidatingAdmissionPolicy{} }
|
||||
func (*ValidatingAdmissionPolicy) ProtoMessage() {}
|
||||
func (*ValidatingAdmissionPolicy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{12}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{12}
|
||||
}
|
||||
func (m *ValidatingAdmissionPolicy) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -413,7 +413,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicy proto.InternalMessageInfo
|
||||
func (m *ValidatingAdmissionPolicyBinding) Reset() { *m = ValidatingAdmissionPolicyBinding{} }
|
||||
func (*ValidatingAdmissionPolicyBinding) ProtoMessage() {}
|
||||
func (*ValidatingAdmissionPolicyBinding) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{13}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{13}
|
||||
}
|
||||
func (m *ValidatingAdmissionPolicyBinding) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -441,7 +441,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBinding proto.InternalMessageInfo
|
||||
func (m *ValidatingAdmissionPolicyBindingList) Reset() { *m = ValidatingAdmissionPolicyBindingList{} }
|
||||
func (*ValidatingAdmissionPolicyBindingList) ProtoMessage() {}
|
||||
func (*ValidatingAdmissionPolicyBindingList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{14}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{14}
|
||||
}
|
||||
func (m *ValidatingAdmissionPolicyBindingList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -469,7 +469,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingList proto.InternalMessageIn
|
||||
func (m *ValidatingAdmissionPolicyBindingSpec) Reset() { *m = ValidatingAdmissionPolicyBindingSpec{} }
|
||||
func (*ValidatingAdmissionPolicyBindingSpec) ProtoMessage() {}
|
||||
func (*ValidatingAdmissionPolicyBindingSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{15}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{15}
|
||||
}
|
||||
func (m *ValidatingAdmissionPolicyBindingSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -497,7 +497,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyBindingSpec proto.InternalMessageIn
|
||||
func (m *ValidatingAdmissionPolicyList) Reset() { *m = ValidatingAdmissionPolicyList{} }
|
||||
func (*ValidatingAdmissionPolicyList) ProtoMessage() {}
|
||||
func (*ValidatingAdmissionPolicyList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{16}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{16}
|
||||
}
|
||||
func (m *ValidatingAdmissionPolicyList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -525,7 +525,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyList proto.InternalMessageInfo
|
||||
func (m *ValidatingAdmissionPolicySpec) Reset() { *m = ValidatingAdmissionPolicySpec{} }
|
||||
func (*ValidatingAdmissionPolicySpec) ProtoMessage() {}
|
||||
func (*ValidatingAdmissionPolicySpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{17}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{17}
|
||||
}
|
||||
func (m *ValidatingAdmissionPolicySpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -553,7 +553,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicySpec proto.InternalMessageInfo
|
||||
func (m *ValidatingAdmissionPolicyStatus) Reset() { *m = ValidatingAdmissionPolicyStatus{} }
|
||||
func (*ValidatingAdmissionPolicyStatus) ProtoMessage() {}
|
||||
func (*ValidatingAdmissionPolicyStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{18}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{18}
|
||||
}
|
||||
func (m *ValidatingAdmissionPolicyStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -581,7 +581,7 @@ var xxx_messageInfo_ValidatingAdmissionPolicyStatus proto.InternalMessageInfo
|
||||
func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} }
|
||||
func (*ValidatingWebhook) ProtoMessage() {}
|
||||
func (*ValidatingWebhook) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{19}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{19}
|
||||
}
|
||||
func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -609,7 +609,7 @@ var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo
|
||||
func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} }
|
||||
func (*ValidatingWebhookConfiguration) ProtoMessage() {}
|
||||
func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{20}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{20}
|
||||
}
|
||||
func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -637,7 +637,7 @@ var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo
|
||||
func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} }
|
||||
func (*ValidatingWebhookConfigurationList) ProtoMessage() {}
|
||||
func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{21}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{21}
|
||||
}
|
||||
func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -665,7 +665,7 @@ var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo
|
||||
func (m *Validation) Reset() { *m = Validation{} }
|
||||
func (*Validation) ProtoMessage() {}
|
||||
func (*Validation) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{22}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{22}
|
||||
}
|
||||
func (m *Validation) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -693,7 +693,7 @@ var xxx_messageInfo_Validation proto.InternalMessageInfo
|
||||
func (m *Variable) Reset() { *m = Variable{} }
|
||||
func (*Variable) ProtoMessage() {}
|
||||
func (*Variable) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{23}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{23}
|
||||
}
|
||||
func (m *Variable) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -721,7 +721,7 @@ var xxx_messageInfo_Variable proto.InternalMessageInfo
|
||||
func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} }
|
||||
func (*WebhookClientConfig) ProtoMessage() {}
|
||||
func (*WebhookClientConfig) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_abeea74cbc46f55a, []int{24}
|
||||
return fileDescriptor_7f7c65a4f012fb19, []int{24}
|
||||
}
|
||||
func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -775,135 +775,134 @@ func init() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a)
|
||||
proto.RegisterFile("k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_7f7c65a4f012fb19)
|
||||
}
|
||||
|
||||
var fileDescriptor_abeea74cbc46f55a = []byte{
|
||||
// 1973 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x23, 0x49,
|
||||
0x35, 0x1d, 0xe7, 0xc3, 0x7e, 0xce, 0x97, 0x6b, 0x67, 0x89, 0x77, 0x76, 0xd6, 0x8e, 0x5a, 0x2b,
|
||||
0x94, 0x91, 0xc0, 0xde, 0xc9, 0xae, 0x76, 0x97, 0x59, 0x21, 0x14, 0x67, 0x67, 0x86, 0x99, 0x9d,
|
||||
0x64, 0x42, 0x65, 0x37, 0x91, 0x60, 0x57, 0x9a, 0x72, 0x77, 0xd9, 0x6e, 0x6c, 0x77, 0x37, 0x5d,
|
||||
0x6d, 0xcf, 0x04, 0x24, 0x40, 0xe2, 0xb0, 0x57, 0x24, 0x2e, 0x48, 0x9c, 0xf8, 0x0b, 0xdc, 0x91,
|
||||
0xe0, 0x36, 0xc7, 0xbd, 0x31, 0x12, 0xc2, 0x22, 0xe6, 0xc0, 0x89, 0x03, 0x07, 0x38, 0xe4, 0x02,
|
||||
0xaa, 0xea, 0xea, 0x4f, 0xb7, 0x27, 0x9d, 0x90, 0x09, 0x97, 0xb9, 0xa5, 0xdf, 0x67, 0xbd, 0x57,
|
||||
0xef, 0xab, 0x9e, 0x03, 0xdf, 0xeb, 0x7e, 0xc8, 0x6a, 0x86, 0x55, 0xef, 0x0e, 0x9a, 0xd4, 0x31,
|
||||
0xa9, 0x4b, 0x59, 0x7d, 0x48, 0x4d, 0xdd, 0x72, 0xea, 0x12, 0x41, 0x6c, 0xa3, 0x4e, 0xf4, 0xbe,
|
||||
0xc1, 0x98, 0x61, 0x99, 0x0e, 0x6d, 0x1b, 0xcc, 0x75, 0x88, 0x6b, 0x58, 0x66, 0x7d, 0x78, 0xab,
|
||||
0x49, 0x5d, 0x72, 0xab, 0xde, 0xa6, 0x26, 0x75, 0x88, 0x4b, 0xf5, 0x9a, 0xed, 0x58, 0xae, 0x85,
|
||||
0x36, 0x3d, 0xce, 0x1a, 0xb1, 0x8d, 0x5a, 0x2a, 0x67, 0x4d, 0x72, 0x5e, 0xff, 0x66, 0xdb, 0x70,
|
||||
0x3b, 0x83, 0x66, 0x4d, 0xb3, 0xfa, 0xf5, 0xb6, 0xd5, 0xb6, 0xea, 0x42, 0x40, 0x73, 0xd0, 0x12,
|
||||
0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x6e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd, 0xbd,
|
||||
0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xb8, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x7d,
|
||||
0xea, 0x92, 0x34, 0xae, 0xfa, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0xbc, 0x7f,
|
||||
0x16, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xd5, 0xed, 0x81, 0x6e, 0xb8, 0xdb,
|
||||
0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x16, 0xe4, 0xba, 0xf4, 0xb8, 0xac, 0x6c, 0x28, 0x9b, 0x85,
|
||||
0x46, 0xf1, 0xd9, 0xa8, 0x3a, 0x33, 0x1e, 0x55, 0x73, 0x9f, 0xd0, 0x63, 0xcc, 0xe1, 0x68, 0x1b,
|
||||
0x56, 0x87, 0xa4, 0x37, 0xa0, 0x77, 0x9e, 0xda, 0x0e, 0x15, 0x2e, 0x28, 0xcf, 0x0a, 0xd2, 0x75,
|
||||
0x49, 0xba, 0x7a, 0x18, 0x47, 0xe3, 0x24, 0xbd, 0xda, 0x83, 0x52, 0xf8, 0x75, 0x44, 0x1c, 0xd3,
|
||||
0x30, 0xdb, 0xe8, 0x1b, 0x90, 0x6f, 0x19, 0xb4, 0xa7, 0x63, 0xda, 0x92, 0x02, 0xd7, 0xa4, 0xc0,
|
||||
0xfc, 0x5d, 0x09, 0xc7, 0x01, 0x05, 0xba, 0x09, 0x8b, 0x4f, 0x3c, 0xc6, 0x72, 0x4e, 0x10, 0xaf,
|
||||
0x4a, 0xe2, 0x45, 0x29, 0x0f, 0xfb, 0x78, 0xb5, 0x05, 0x2b, 0xbb, 0xc4, 0xd5, 0x3a, 0x3b, 0x96,
|
||||
0xa9, 0x1b, 0xc2, 0xc2, 0x0d, 0x98, 0x33, 0x49, 0x9f, 0x4a, 0x13, 0x97, 0x24, 0xe7, 0xdc, 0x1e,
|
||||
0xe9, 0x53, 0x2c, 0x30, 0x68, 0x0b, 0x80, 0x26, 0xed, 0x43, 0x92, 0x0e, 0x22, 0xa6, 0x45, 0xa8,
|
||||
0xd4, 0x3f, 0xcd, 0x49, 0x45, 0x98, 0x32, 0x6b, 0xe0, 0x68, 0x94, 0xa1, 0xa7, 0x50, 0xe2, 0xe2,
|
||||
0x98, 0x4d, 0x34, 0x7a, 0x40, 0x7b, 0x54, 0x73, 0x2d, 0x47, 0x68, 0x2d, 0x6e, 0xbd, 0x5b, 0x0b,
|
||||
0xc3, 0x34, 0xb8, 0xb1, 0x9a, 0xdd, 0x6d, 0x73, 0x00, 0xab, 0xf1, 0xc0, 0xa8, 0x0d, 0x6f, 0xd5,
|
||||
0x1e, 0x92, 0x26, 0xed, 0xf9, 0xac, 0x8d, 0xd7, 0xc7, 0xa3, 0x6a, 0x69, 0x2f, 0x29, 0x11, 0x4f,
|
||||
0x2a, 0x41, 0x16, 0xac, 0x58, 0xcd, 0x1f, 0x52, 0xcd, 0x0d, 0xd4, 0xce, 0x5e, 0x5c, 0x2d, 0x1a,
|
||||
0x8f, 0xaa, 0x2b, 0x8f, 0x62, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x29, 0x2c, 0x3b, 0xd2, 0x6e, 0x3c,
|
||||
0xe8, 0x51, 0x56, 0xce, 0x6d, 0xe4, 0x36, 0x8b, 0x5b, 0xdb, 0xb5, 0xac, 0xd9, 0x58, 0xe3, 0x76,
|
||||
0xe9, 0x9c, 0xf7, 0xc8, 0x70, 0x3b, 0x8f, 0x6c, 0xea, 0xa1, 0x59, 0xe3, 0x75, 0xe9, 0xf7, 0x65,
|
||||
0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x14, 0xb8, 0x46, 0x9f, 0x6a, 0xbd, 0x81, 0x4e, 0x63,
|
||||
0x74, 0xe5, 0xb9, 0xcb, 0x3a, 0xc7, 0x0d, 0x79, 0x8e, 0x6b, 0x77, 0x52, 0xd4, 0xe0, 0x54, 0xe5,
|
||||
0xe8, 0x63, 0x28, 0xf6, 0x79, 0x48, 0xec, 0x5b, 0x3d, 0x43, 0x3b, 0x2e, 0x2f, 0x8a, 0x40, 0x52,
|
||||
0xc7, 0xa3, 0x6a, 0x71, 0x37, 0x04, 0x9f, 0x8e, 0xaa, 0xab, 0x91, 0xcf, 0x4f, 0x8f, 0x6d, 0x8a,
|
||||
0xa3, 0x6c, 0xea, 0x1f, 0xf3, 0xb0, 0xba, 0x3b, 0xe0, 0xe9, 0x69, 0xb6, 0x8f, 0x68, 0xb3, 0x63,
|
||||
0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x04, 0x96, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xc7, 0x32, 0x5b, 0x46,
|
||||
0x5b, 0x06, 0xc0, 0xb7, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x89, 0x08, 0x69, 0x5c, 0x93, 0x8a, 0x96,
|
||||
0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x79, 0x27, 0x12, 0x02, 0x1f, 0x64, 0xd1, 0x58, 0x4b,
|
||||
0x71, 0xf8, 0xb2, 0xd4, 0x35, 0xef, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc2, 0x72, 0x8b, 0x18, 0xbd,
|
||||
0x81, 0x43, 0xa5, 0x53, 0xe7, 0x84, 0x07, 0xbe, 0xce, 0x23, 0xe4, 0x6e, 0x14, 0x71, 0x3a, 0xaa,
|
||||
0x96, 0x62, 0x00, 0xe1, 0xd8, 0x38, 0x73, 0xf2, 0x82, 0x0a, 0x17, 0xba, 0xa0, 0xf4, 0x3c, 0x9f,
|
||||
0xff, 0xff, 0xe4, 0x79, 0xf1, 0xe5, 0xe6, 0xf9, 0xc7, 0x50, 0x64, 0x86, 0x4e, 0xef, 0xb4, 0x5a,
|
||||
0x54, 0x73, 0x59, 0x79, 0x21, 0x74, 0xd8, 0x41, 0x08, 0xe6, 0x0e, 0x0b, 0x3f, 0x77, 0x7a, 0x84,
|
||||
0x31, 0x1c, 0x65, 0x43, 0xb7, 0x61, 0x85, 0x77, 0x25, 0x6b, 0xe0, 0x1e, 0x50, 0xcd, 0x32, 0x75,
|
||||
0x26, 0x52, 0x63, 0xde, 0x3b, 0xc1, 0xa7, 0x31, 0x0c, 0x4e, 0x50, 0xa2, 0xcf, 0x60, 0x3d, 0x88,
|
||||
0x22, 0x4c, 0x87, 0x06, 0x7d, 0x72, 0x48, 0x1d, 0xfe, 0xc1, 0xca, 0xf9, 0x8d, 0xdc, 0x66, 0xa1,
|
||||
0xf1, 0xe6, 0x78, 0x54, 0x5d, 0xdf, 0x4e, 0x27, 0xc1, 0xd3, 0x78, 0xd1, 0x63, 0x40, 0x0e, 0x35,
|
||||
0xcc, 0xa1, 0xa5, 0x89, 0xf0, 0x93, 0x01, 0x01, 0xc2, 0xbe, 0x77, 0xc6, 0xa3, 0x2a, 0xc2, 0x13,
|
||||
0xd8, 0xd3, 0x51, 0xf5, 0x6b, 0x93, 0x50, 0x11, 0x1e, 0x29, 0xb2, 0xd0, 0x4f, 0x60, 0xb5, 0x1f,
|
||||
0x6b, 0x44, 0xac, 0xbc, 0x24, 0x32, 0xe4, 0xc3, 0xec, 0x39, 0x19, 0xef, 0x64, 0x61, 0xcf, 0x8d,
|
||||
0xc3, 0x19, 0x4e, 0x6a, 0x52, 0xff, 0xa2, 0xc0, 0x8d, 0x44, 0x0d, 0xf1, 0xd2, 0x75, 0xe0, 0x69,
|
||||
0x40, 0x8f, 0x21, 0xcf, 0xa3, 0x42, 0x27, 0x2e, 0x91, 0x2d, 0xea, 0x9d, 0x6c, 0x31, 0xe4, 0x05,
|
||||
0xcc, 0x2e, 0x75, 0x49, 0xd8, 0x22, 0x43, 0x18, 0x0e, 0xa4, 0xa2, 0x1f, 0x40, 0x5e, 0x6a, 0x66,
|
||||
0xe5, 0x59, 0x61, 0xf8, 0xb7, 0xce, 0x61, 0x78, 0xfc, 0xec, 0x8d, 0x39, 0xae, 0x0a, 0x07, 0x02,
|
||||
0xd5, 0x7f, 0x28, 0xb0, 0xf1, 0x22, 0xfb, 0x1e, 0x1a, 0xcc, 0x45, 0x9f, 0x4f, 0xd8, 0x58, 0xcb,
|
||||
0x98, 0x27, 0x06, 0xf3, 0x2c, 0x0c, 0x66, 0x12, 0x1f, 0x12, 0xb1, 0xaf, 0x0b, 0xf3, 0x86, 0x4b,
|
||||
0xfb, 0xbe, 0x71, 0x77, 0x2f, 0x6c, 0x5c, 0xec, 0xe0, 0x61, 0x19, 0xbc, 0xcf, 0x85, 0x63, 0x4f,
|
||||
0x87, 0xfa, 0x5c, 0x81, 0xf5, 0x29, 0x9d, 0x0a, 0x7d, 0x10, 0xf6, 0x62, 0x51, 0x44, 0xca, 0x8a,
|
||||
0xc8, 0x8b, 0x52, 0xb4, 0x89, 0x0a, 0x04, 0x8e, 0xd3, 0xa1, 0x5f, 0x28, 0x80, 0x9c, 0x09, 0x79,
|
||||
0xb2, 0x73, 0x5c, 0xb8, 0x8e, 0x5f, 0x97, 0x06, 0xa0, 0x49, 0x1c, 0x4e, 0x51, 0xa7, 0x12, 0x28,
|
||||
0xec, 0x13, 0x87, 0xf4, 0x3f, 0x31, 0x4c, 0x9d, 0x4f, 0x62, 0xc4, 0x36, 0x64, 0x96, 0xca, 0x6e,
|
||||
0x17, 0x84, 0xd9, 0xf6, 0xfe, 0x7d, 0x89, 0xc1, 0x11, 0x2a, 0xde, 0x1b, 0xbb, 0x86, 0xa9, 0xcb,
|
||||
0xb9, 0x2d, 0xe8, 0x8d, 0x5c, 0x1e, 0x16, 0x18, 0xf5, 0x77, 0xb3, 0x90, 0x17, 0x3a, 0xf8, 0x2c,
|
||||
0x79, 0x76, 0x2b, 0xad, 0x43, 0x21, 0x28, 0xbd, 0x52, 0x6a, 0x49, 0x92, 0x15, 0x82, 0x32, 0x8d,
|
||||
0x43, 0x1a, 0xf4, 0x05, 0xe4, 0x99, 0x5f, 0x90, 0x73, 0x17, 0x2f, 0xc8, 0x4b, 0x3c, 0xd2, 0x82,
|
||||
0x52, 0x1c, 0x88, 0x44, 0x2e, 0xac, 0xdb, 0xfc, 0xf4, 0xd4, 0xa5, 0xce, 0x9e, 0xe5, 0xde, 0xb5,
|
||||
0x06, 0xa6, 0xbe, 0xad, 0x71, 0xef, 0xc9, 0x6e, 0x78, 0x9b, 0x97, 0xc0, 0xfd, 0x74, 0x92, 0xd3,
|
||||
0x51, 0xf5, 0xcd, 0x29, 0x28, 0x51, 0xba, 0xa6, 0x89, 0x56, 0x7f, 0xab, 0xc0, 0xda, 0x01, 0x75,
|
||||
0x86, 0x86, 0x46, 0x31, 0x6d, 0x51, 0x87, 0x9a, 0x5a, 0xc2, 0x35, 0x4a, 0x06, 0xd7, 0xf8, 0xde,
|
||||
0x9e, 0x9d, 0xea, 0xed, 0x1b, 0x30, 0x67, 0x13, 0xb7, 0x23, 0x07, 0xfb, 0x3c, 0xc7, 0xee, 0x13,
|
||||
0xb7, 0x83, 0x05, 0x54, 0x60, 0x2d, 0xc7, 0x15, 0x86, 0xce, 0x4b, 0xac, 0xe5, 0xb8, 0x58, 0x40,
|
||||
0xd5, 0x5f, 0x2b, 0xb0, 0xc4, 0xad, 0xd8, 0xe9, 0x50, 0xad, 0xcb, 0x9f, 0x15, 0x5f, 0x2a, 0x80,
|
||||
0x68, 0xf2, 0xb1, 0xe1, 0x65, 0x44, 0x71, 0xeb, 0xa3, 0xec, 0x29, 0x3a, 0xf1, 0x60, 0x09, 0xc3,
|
||||
0x7a, 0x02, 0xc5, 0x70, 0x8a, 0x4a, 0xf5, 0xcf, 0xb3, 0xf0, 0xc6, 0x21, 0xe9, 0x19, 0xba, 0x48,
|
||||
0xf5, 0xa0, 0x3f, 0xc9, 0xe6, 0xf0, 0xf2, 0xcb, 0xaf, 0x01, 0x73, 0xcc, 0xa6, 0x9a, 0xcc, 0xe6,
|
||||
0x7b, 0xd9, 0x4d, 0x9f, 0x7a, 0xe8, 0x03, 0x9b, 0x6a, 0xe1, 0x0d, 0xf2, 0x2f, 0x2c, 0x54, 0xa0,
|
||||
0x1f, 0xc1, 0x02, 0x73, 0x89, 0x3b, 0x60, 0x32, 0xf8, 0xef, 0x5f, 0x86, 0x32, 0x21, 0xb0, 0xb1,
|
||||
0x22, 0xd5, 0x2d, 0x78, 0xdf, 0x58, 0x2a, 0x52, 0xff, 0xad, 0xc0, 0xc6, 0x54, 0xde, 0x86, 0x61,
|
||||
0xea, 0x3c, 0x18, 0x5e, 0xbe, 0x93, 0xed, 0x98, 0x93, 0xf7, 0x2e, 0xc1, 0x6e, 0x79, 0xf6, 0x69,
|
||||
0xbe, 0x56, 0xff, 0xa5, 0xc0, 0xdb, 0x67, 0x31, 0x5f, 0x41, 0xf3, 0xb3, 0xe2, 0xcd, 0xef, 0xc1,
|
||||
0xe5, 0x59, 0x3e, 0xa5, 0x01, 0x7e, 0x99, 0x3b, 0xdb, 0x6e, 0xee, 0x26, 0xde, 0x41, 0x6c, 0x01,
|
||||
0xdc, 0x0b, 0x8b, 0x7c, 0x70, 0x89, 0xfb, 0x01, 0x06, 0x47, 0xa8, 0xb8, 0xaf, 0x6c, 0xd9, 0x1e,
|
||||
0xe4, 0x55, 0x6e, 0x65, 0x37, 0xc8, 0x6f, 0x2c, 0x5e, 0xf9, 0xf6, 0xbf, 0x70, 0x20, 0x11, 0xb9,
|
||||
0xb0, 0xd2, 0x8f, 0x2d, 0x0a, 0x64, 0x9a, 0x9c, 0x77, 0x0e, 0x0c, 0xf8, 0xbd, 0xb9, 0x39, 0x0e,
|
||||
0xc3, 0x09, 0x1d, 0xe8, 0x08, 0x4a, 0x43, 0xe9, 0x2f, 0xcb, 0xf4, 0x4a, 0xba, 0xf7, 0x3a, 0x2e,
|
||||
0x34, 0x6e, 0xf2, 0xf7, 0xc6, 0x61, 0x12, 0x79, 0x3a, 0xaa, 0xae, 0x25, 0x81, 0x78, 0x52, 0x86,
|
||||
0xfa, 0x77, 0x05, 0xde, 0x9a, 0x7a, 0x13, 0x57, 0x10, 0x7a, 0x9d, 0x78, 0xe8, 0xed, 0x5c, 0x46,
|
||||
0xe8, 0xa5, 0xc7, 0xdc, 0x6f, 0x16, 0x5e, 0x60, 0xa9, 0x08, 0xb6, 0xc7, 0x50, 0xb0, 0xfd, 0xd9,
|
||||
0x25, 0x65, 0xd3, 0x93, 0x25, 0x72, 0x38, 0x6b, 0x63, 0x99, 0xf7, 0xcf, 0xe0, 0x13, 0x87, 0x42,
|
||||
0xd1, 0x8f, 0x61, 0xcd, 0x9f, 0xed, 0x39, 0xbf, 0x61, 0xba, 0xfe, 0x80, 0x76, 0xf1, 0xf0, 0xb9,
|
||||
0x36, 0x1e, 0x55, 0xd7, 0x76, 0x13, 0x52, 0xf1, 0x84, 0x1e, 0xd4, 0x85, 0x62, 0x78, 0xfd, 0xfe,
|
||||
0xfb, 0xfe, 0xbd, 0xf3, 0xfb, 0xdb, 0x32, 0x1b, 0xaf, 0x49, 0x07, 0x17, 0x43, 0x18, 0xc3, 0x51,
|
||||
0xe9, 0x97, 0xfc, 0xd0, 0xff, 0x19, 0xac, 0x91, 0xf8, 0xa2, 0x93, 0x95, 0xe7, 0xcf, 0xfb, 0x08,
|
||||
0x49, 0xac, 0x4a, 0x1b, 0x65, 0x69, 0xc4, 0x5a, 0x02, 0xc1, 0xf0, 0x84, 0xb2, 0xb4, 0xd7, 0xdf,
|
||||
0xc2, 0x55, 0xbd, 0xfe, 0x90, 0x06, 0x85, 0x21, 0x71, 0x0c, 0xd2, 0xec, 0x51, 0xfe, 0xd4, 0xce,
|
||||
0x9d, 0xaf, 0xa0, 0x1d, 0x4a, 0xd6, 0x70, 0xb2, 0xf3, 0x21, 0x0c, 0x87, 0x72, 0xd5, 0x3f, 0xcc,
|
||||
0x42, 0xf5, 0x8c, 0xf6, 0x8d, 0x1e, 0x00, 0xb2, 0x9a, 0x8c, 0x3a, 0x43, 0xaa, 0xdf, 0xf3, 0x56,
|
||||
0xd1, 0xfe, 0x58, 0x9f, 0x0b, 0x07, 0xaa, 0x47, 0x13, 0x14, 0x38, 0x85, 0x0b, 0xf5, 0x60, 0xc9,
|
||||
0x8d, 0x8c, 0x7a, 0x32, 0x0b, 0xde, 0xcf, 0x6e, 0x57, 0x74, 0x50, 0x6c, 0xac, 0x8d, 0x47, 0xd5,
|
||||
0xd8, 0xe8, 0x88, 0x63, 0xd2, 0x91, 0x06, 0xa0, 0x85, 0x57, 0xe7, 0x85, 0x7e, 0x3d, 0x5b, 0x15,
|
||||
0x0b, 0x6f, 0x2c, 0xe8, 0x3b, 0x91, 0xcb, 0x8a, 0x88, 0x55, 0x4f, 0x16, 0xa1, 0x14, 0xba, 0xf0,
|
||||
0xd5, 0xae, 0xef, 0xd5, 0xae, 0xef, 0x85, 0xbb, 0x3e, 0x78, 0xb5, 0xeb, 0xbb, 0xd0, 0xae, 0x2f,
|
||||
0xa5, 0x16, 0x17, 0xaf, 0x6c, 0x13, 0x77, 0xa2, 0x40, 0x65, 0x22, 0xc7, 0xaf, 0x7a, 0x17, 0xf7,
|
||||
0xc5, 0xc4, 0x2e, 0xee, 0xa3, 0x8b, 0x8c, 0x4d, 0xd3, 0xb6, 0x71, 0xff, 0x54, 0x40, 0x7d, 0xb1,
|
||||
0x8d, 0x57, 0x30, 0x17, 0xf6, 0xe3, 0x73, 0xe1, 0x77, 0xff, 0x07, 0x03, 0xb3, 0x6c, 0xe4, 0xfe,
|
||||
0xa3, 0x00, 0x84, 0xc3, 0x0c, 0x7a, 0x1b, 0x22, 0x3f, 0x14, 0xca, 0xd2, 0xed, 0xb9, 0x29, 0x02,
|
||||
0x47, 0x37, 0x61, 0xb1, 0x4f, 0x19, 0x23, 0x6d, 0x7f, 0x21, 0x12, 0xfc, 0x8e, 0xb9, 0xeb, 0x81,
|
||||
0xb1, 0x8f, 0x47, 0x47, 0xb0, 0xe0, 0x50, 0xc2, 0x2c, 0x53, 0x2e, 0x46, 0xbe, 0xc3, 0x5f, 0xc1,
|
||||
0x58, 0x40, 0x4e, 0x47, 0xd5, 0x5b, 0x59, 0x7e, 0x67, 0xae, 0xc9, 0x47, 0xb3, 0x60, 0xc2, 0x52,
|
||||
0x1c, 0xba, 0x07, 0x25, 0xa9, 0x23, 0x72, 0x60, 0xaf, 0xd2, 0xbe, 0x21, 0x4f, 0x53, 0xda, 0x4d,
|
||||
0x12, 0xe0, 0x49, 0x1e, 0xf5, 0x01, 0xe4, 0xfd, 0xc1, 0x00, 0x95, 0x61, 0x2e, 0xf2, 0xde, 0xf2,
|
||||
0x0c, 0x17, 0x90, 0x84, 0x63, 0x66, 0xd3, 0x1d, 0xa3, 0xfe, 0x5e, 0x81, 0xd7, 0x52, 0x9a, 0x12,
|
||||
0x7a, 0x03, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0xb0, 0x38, 0x1e, 0x55, 0x73, 0x9f, 0xe1, 0x87, 0x98,
|
||||
0xc3, 0x10, 0x81, 0x45, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xce, 0x7e, 0xe3, 0xc9, 0xbd, 0x56,
|
||||
0xa3, 0xc8, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x26, 0xe4, 0x35, 0xd2, 0x18, 0x98, 0x7a, 0xcf,
|
||||
0xbb, 0xaf, 0x25, 0xef, 0x8d, 0xb7, 0xb3, 0xed, 0xc1, 0x70, 0x80, 0x6d, 0xec, 0x3d, 0x3b, 0xa9,
|
||||
0xcc, 0x7c, 0x75, 0x52, 0x99, 0x79, 0x7e, 0x52, 0x99, 0xf9, 0xf9, 0xb8, 0xa2, 0x3c, 0x1b, 0x57,
|
||||
0x94, 0xaf, 0xc6, 0x15, 0xe5, 0xf9, 0xb8, 0xa2, 0xfc, 0x75, 0x5c, 0x51, 0x7e, 0xf9, 0xb7, 0xca,
|
||||
0xcc, 0xf7, 0x37, 0xb3, 0xfe, 0x97, 0xc3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x71, 0x54, 0x54,
|
||||
0xe6, 0x29, 0x21, 0x00, 0x00,
|
||||
var fileDescriptor_7f7c65a4f012fb19 = []byte{
|
||||
// 1957 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x1a, 0x4d, 0x6f, 0x1b, 0xc7,
|
||||
0xd5, 0x2b, 0x52, 0x12, 0xf9, 0xa8, 0x2f, 0x4e, 0x9c, 0x8a, 0x76, 0x1c, 0x52, 0x58, 0x04, 0x85,
|
||||
0x0c, 0xb4, 0x64, 0xac, 0x04, 0x89, 0xeb, 0xa0, 0x28, 0x44, 0xc5, 0x76, 0xed, 0x58, 0xb2, 0x30,
|
||||
0x4a, 0x24, 0xa0, 0x4d, 0x00, 0x8f, 0x76, 0x87, 0xe4, 0x96, 0xe4, 0xee, 0x76, 0x67, 0x49, 0x5b,
|
||||
0x2d, 0xd0, 0x16, 0xe8, 0x21, 0xd7, 0x02, 0xbd, 0x14, 0xe8, 0xa9, 0x7f, 0xa1, 0xf7, 0x02, 0xed,
|
||||
0xcd, 0xc7, 0xdc, 0x6a, 0xa0, 0x28, 0x51, 0xb1, 0x87, 0x9e, 0x7a, 0xe8, 0xa1, 0x3d, 0xe8, 0xd2,
|
||||
0x62, 0x66, 0x67, 0x3f, 0xb9, 0xb4, 0x56, 0xaa, 0xac, 0x5c, 0x7c, 0xd3, 0xbe, 0xcf, 0x79, 0x6f,
|
||||
0xde, 0xd7, 0x3c, 0x0a, 0x6e, 0x77, 0x6f, 0xb3, 0xba, 0x61, 0x35, 0x88, 0x6d, 0x34, 0x88, 0xde,
|
||||
0x37, 0x18, 0x33, 0x2c, 0xd3, 0xa1, 0x6d, 0x83, 0xb9, 0x0e, 0x71, 0x0d, 0xcb, 0x6c, 0x0c, 0x6f,
|
||||
0x1d, 0x52, 0x97, 0xdc, 0x6a, 0xb4, 0xa9, 0x49, 0x1d, 0xe2, 0x52, 0xbd, 0x6e, 0x3b, 0x96, 0x6b,
|
||||
0xa1, 0x75, 0x8f, 0xb3, 0x4e, 0x6c, 0xa3, 0x9e, 0xca, 0x59, 0x97, 0x9c, 0xd7, 0xbf, 0xdd, 0x36,
|
||||
0xdc, 0xce, 0xe0, 0xb0, 0xae, 0x59, 0xfd, 0x46, 0xdb, 0x6a, 0x5b, 0x0d, 0x21, 0xe0, 0x70, 0xd0,
|
||||
0x12, 0x5f, 0xe2, 0x43, 0xfc, 0xe5, 0x09, 0xbe, 0xfe, 0x5e, 0x86, 0x23, 0x25, 0x4f, 0x73, 0xfd,
|
||||
0xfd, 0x90, 0xa9, 0x4f, 0xb4, 0x8e, 0x61, 0x52, 0xe7, 0xa8, 0x61, 0x77, 0xdb, 0x1c, 0xc0, 0x1a,
|
||||
0x7d, 0xea, 0x92, 0x34, 0xae, 0xc6, 0x34, 0x2e, 0x67, 0x60, 0xba, 0x46, 0x9f, 0x4e, 0x30, 0x7c,
|
||||
0x70, 0x1a, 0x03, 0xd3, 0x3a, 0xb4, 0x4f, 0x92, 0x7c, 0x2a, 0x83, 0xe5, 0xcd, 0x81, 0x6e, 0xb8,
|
||||
0x9b, 0xa6, 0x69, 0xb9, 0xc2, 0x08, 0xf4, 0x36, 0xe4, 0xba, 0xf4, 0xa8, 0xa2, 0xac, 0x29, 0xeb,
|
||||
0xc5, 0x66, 0xe9, 0xf9, 0xa8, 0x76, 0x65, 0x3c, 0xaa, 0xe5, 0x3e, 0xa1, 0x47, 0x98, 0xc3, 0xd1,
|
||||
0x26, 0x2c, 0x0f, 0x49, 0x6f, 0x40, 0xef, 0x3e, 0xb3, 0x1d, 0x2a, 0x5c, 0x50, 0x99, 0x11, 0xa4,
|
||||
0xab, 0x92, 0x74, 0x79, 0x3f, 0x8e, 0xc6, 0x49, 0x7a, 0xb5, 0x07, 0xe5, 0xf0, 0xeb, 0x80, 0x38,
|
||||
0xa6, 0x61, 0xb6, 0xd1, 0xb7, 0xa0, 0xd0, 0x32, 0x68, 0x4f, 0xc7, 0xb4, 0x25, 0x05, 0xae, 0x48,
|
||||
0x81, 0x85, 0x7b, 0x12, 0x8e, 0x03, 0x0a, 0x74, 0x13, 0xe6, 0x9f, 0x7a, 0x8c, 0x95, 0x9c, 0x20,
|
||||
0x5e, 0x96, 0xc4, 0xf3, 0x52, 0x1e, 0xf6, 0xf1, 0x6a, 0x0b, 0x96, 0xb6, 0x89, 0xab, 0x75, 0xb6,
|
||||
0x2c, 0x53, 0x37, 0x84, 0x85, 0x6b, 0x90, 0x37, 0x49, 0x9f, 0x4a, 0x13, 0x17, 0x24, 0x67, 0x7e,
|
||||
0x87, 0xf4, 0x29, 0x16, 0x18, 0xb4, 0x01, 0x40, 0x93, 0xf6, 0x21, 0x49, 0x07, 0x11, 0xd3, 0x22,
|
||||
0x54, 0xea, 0x9f, 0xf3, 0x52, 0x11, 0xa6, 0xcc, 0x1a, 0x38, 0x1a, 0x65, 0xe8, 0x19, 0x94, 0xb9,
|
||||
0x38, 0x66, 0x13, 0x8d, 0xee, 0xd1, 0x1e, 0xd5, 0x5c, 0xcb, 0x11, 0x5a, 0x4b, 0x1b, 0xef, 0xd5,
|
||||
0xc3, 0x30, 0x0d, 0x6e, 0xac, 0x6e, 0x77, 0xdb, 0x1c, 0xc0, 0xea, 0x3c, 0x30, 0xea, 0xc3, 0x5b,
|
||||
0xf5, 0x47, 0xe4, 0x90, 0xf6, 0x7c, 0xd6, 0xe6, 0x9b, 0xe3, 0x51, 0xad, 0xbc, 0x93, 0x94, 0x88,
|
||||
0x27, 0x95, 0x20, 0x0b, 0x96, 0xac, 0xc3, 0x1f, 0x51, 0xcd, 0x0d, 0xd4, 0xce, 0x9c, 0x5f, 0x2d,
|
||||
0x1a, 0x8f, 0x6a, 0x4b, 0x8f, 0x63, 0xe2, 0x70, 0x42, 0x3c, 0xfa, 0x19, 0x2c, 0x3a, 0xd2, 0x6e,
|
||||
0x3c, 0xe8, 0x51, 0x56, 0xc9, 0xad, 0xe5, 0xd6, 0x4b, 0x1b, 0x9b, 0xf5, 0xac, 0xd9, 0x58, 0xe7,
|
||||
0x76, 0xe9, 0x9c, 0xf7, 0xc0, 0x70, 0x3b, 0x8f, 0x6d, 0xea, 0xa1, 0x59, 0xf3, 0x4d, 0xe9, 0xf7,
|
||||
0x45, 0x1c, 0x95, 0x8f, 0xe3, 0xea, 0xd0, 0xaf, 0x15, 0xb8, 0x4a, 0x9f, 0x69, 0xbd, 0x81, 0x4e,
|
||||
0x63, 0x74, 0x95, 0xfc, 0x45, 0x9d, 0xe3, 0x86, 0x3c, 0xc7, 0xd5, 0xbb, 0x29, 0x6a, 0x70, 0xaa,
|
||||
0x72, 0xf4, 0x31, 0x94, 0xfa, 0x3c, 0x24, 0x76, 0xad, 0x9e, 0xa1, 0x1d, 0x55, 0xe6, 0x45, 0x20,
|
||||
0xa9, 0xe3, 0x51, 0xad, 0xb4, 0x1d, 0x82, 0x4f, 0x46, 0xb5, 0xe5, 0xc8, 0xe7, 0xa7, 0x47, 0x36,
|
||||
0xc5, 0x51, 0x36, 0xf5, 0x4f, 0x05, 0x58, 0xde, 0x1e, 0xf0, 0xf4, 0x34, 0xdb, 0x07, 0xf4, 0xb0,
|
||||
0x63, 0x59, 0xdd, 0x0c, 0x31, 0xfc, 0x14, 0x16, 0xb4, 0x9e, 0x41, 0x4d, 0x77, 0xcb, 0x32, 0x5b,
|
||||
0x46, 0x5b, 0x06, 0xc0, 0x77, 0xb3, 0x3b, 0x42, 0xaa, 0xda, 0x8a, 0x08, 0x69, 0x5e, 0x95, 0x8a,
|
||||
0x16, 0xa2, 0x50, 0x1c, 0x53, 0x84, 0x3e, 0x87, 0x59, 0x27, 0x12, 0x02, 0x1f, 0x66, 0xd1, 0x58,
|
||||
0x4f, 0x71, 0xf8, 0xa2, 0xd4, 0x35, 0xeb, 0x79, 0xd8, 0x13, 0x8a, 0x1e, 0xc1, 0x62, 0x8b, 0x18,
|
||||
0xbd, 0x81, 0x43, 0xa5, 0x53, 0xf3, 0xc2, 0x03, 0xdf, 0xe4, 0x11, 0x72, 0x2f, 0x8a, 0x38, 0x19,
|
||||
0xd5, 0xca, 0x31, 0x80, 0x70, 0x6c, 0x9c, 0x39, 0x79, 0x41, 0xc5, 0x73, 0x5d, 0x50, 0x7a, 0x9e,
|
||||
0xcf, 0x7e, 0x3d, 0x79, 0x5e, 0x7a, 0xb5, 0x79, 0xfe, 0x31, 0x94, 0x98, 0xa1, 0xd3, 0xbb, 0xad,
|
||||
0x16, 0xd5, 0x5c, 0x56, 0x99, 0x0b, 0x1d, 0xb6, 0x17, 0x82, 0xb9, 0xc3, 0xc2, 0xcf, 0xad, 0x1e,
|
||||
0x61, 0x0c, 0x47, 0xd9, 0xd0, 0x1d, 0x58, 0xe2, 0x5d, 0xc9, 0x1a, 0xb8, 0x7b, 0x54, 0xb3, 0x4c,
|
||||
0x9d, 0x89, 0xd4, 0x98, 0xf5, 0x4e, 0xf0, 0x69, 0x0c, 0x83, 0x13, 0x94, 0xe8, 0x33, 0x58, 0x0d,
|
||||
0xa2, 0x08, 0xd3, 0xa1, 0x41, 0x9f, 0xee, 0x53, 0x87, 0x7f, 0xb0, 0x4a, 0x61, 0x2d, 0xb7, 0x5e,
|
||||
0x6c, 0xbe, 0x35, 0x1e, 0xd5, 0x56, 0x37, 0xd3, 0x49, 0xf0, 0x34, 0x5e, 0xf4, 0x04, 0x90, 0x43,
|
||||
0x0d, 0x73, 0x68, 0x69, 0x22, 0xfc, 0x64, 0x40, 0x80, 0xb0, 0xef, 0xdd, 0xf1, 0xa8, 0x86, 0xf0,
|
||||
0x04, 0xf6, 0x64, 0x54, 0xfb, 0xc6, 0x24, 0x54, 0x84, 0x47, 0x8a, 0x2c, 0xf4, 0x53, 0x58, 0xee,
|
||||
0xc7, 0x1a, 0x11, 0xab, 0x2c, 0x88, 0x0c, 0xb9, 0x9d, 0x3d, 0x27, 0xe3, 0x9d, 0x2c, 0xec, 0xb9,
|
||||
0x71, 0x38, 0xc3, 0x49, 0x4d, 0xea, 0x5f, 0x15, 0xb8, 0x91, 0xa8, 0x21, 0x5e, 0xba, 0x0e, 0x3c,
|
||||
0x0d, 0xe8, 0x09, 0x14, 0x78, 0x54, 0xe8, 0xc4, 0x25, 0xb2, 0x45, 0xbd, 0x9b, 0x2d, 0x86, 0xbc,
|
||||
0x80, 0xd9, 0xa6, 0x2e, 0x09, 0x5b, 0x64, 0x08, 0xc3, 0x81, 0x54, 0xf4, 0x43, 0x28, 0x48, 0xcd,
|
||||
0xac, 0x32, 0x23, 0x0c, 0xff, 0xce, 0x19, 0x0c, 0x8f, 0x9f, 0xbd, 0x99, 0xe7, 0xaa, 0x70, 0x20,
|
||||
0x50, 0xfd, 0xa7, 0x02, 0x6b, 0x2f, 0xb3, 0xef, 0x91, 0xc1, 0x5c, 0xf4, 0xf9, 0x84, 0x8d, 0xf5,
|
||||
0x8c, 0x79, 0x62, 0x30, 0xcf, 0xc2, 0x60, 0x26, 0xf1, 0x21, 0x11, 0xfb, 0xba, 0x30, 0x6b, 0xb8,
|
||||
0xb4, 0xef, 0x1b, 0x77, 0xef, 0xdc, 0xc6, 0xc5, 0x0e, 0x1e, 0x96, 0xc1, 0x07, 0x5c, 0x38, 0xf6,
|
||||
0x74, 0xa8, 0x2f, 0x14, 0x58, 0x9d, 0xd2, 0xa9, 0xd0, 0x87, 0x61, 0x2f, 0x16, 0x45, 0xa4, 0xa2,
|
||||
0x88, 0xbc, 0x28, 0x47, 0x9b, 0xa8, 0x40, 0xe0, 0x38, 0x1d, 0xfa, 0xa5, 0x02, 0xc8, 0x99, 0x90,
|
||||
0x27, 0x3b, 0xc7, 0xb9, 0xeb, 0xf8, 0x75, 0x69, 0x00, 0x9a, 0xc4, 0xe1, 0x14, 0x75, 0x2a, 0x81,
|
||||
0xe2, 0x2e, 0x71, 0x48, 0xff, 0x13, 0xc3, 0xd4, 0xf9, 0x24, 0x46, 0x6c, 0x43, 0x66, 0xa9, 0xec,
|
||||
0x76, 0x41, 0x98, 0x6d, 0xee, 0x3e, 0x90, 0x18, 0x1c, 0xa1, 0xe2, 0xbd, 0xb1, 0x6b, 0x98, 0xba,
|
||||
0x9c, 0xdb, 0x82, 0xde, 0xc8, 0xe5, 0x61, 0x81, 0x51, 0x7f, 0x3f, 0x03, 0x05, 0xa1, 0x83, 0xcf,
|
||||
0x92, 0xa7, 0xb7, 0xd2, 0x06, 0x14, 0x83, 0xd2, 0x2b, 0xa5, 0x96, 0x25, 0x59, 0x31, 0x28, 0xd3,
|
||||
0x38, 0xa4, 0x41, 0x5f, 0x40, 0x81, 0xf9, 0x05, 0x39, 0x77, 0xfe, 0x82, 0xbc, 0xc0, 0x23, 0x2d,
|
||||
0x28, 0xc5, 0x81, 0x48, 0xe4, 0xc2, 0xaa, 0xcd, 0x4f, 0x4f, 0x5d, 0xea, 0xec, 0x58, 0xee, 0x3d,
|
||||
0x6b, 0x60, 0xea, 0x9b, 0x1a, 0xf7, 0x9e, 0xec, 0x86, 0x77, 0x78, 0x09, 0xdc, 0x4d, 0x27, 0x39,
|
||||
0x19, 0xd5, 0xde, 0x9a, 0x82, 0x12, 0xa5, 0x6b, 0x9a, 0x68, 0xf5, 0x77, 0x0a, 0xac, 0xec, 0x51,
|
||||
0x67, 0x68, 0x68, 0x14, 0xd3, 0x16, 0x75, 0xa8, 0xa9, 0x25, 0x5c, 0xa3, 0x64, 0x70, 0x8d, 0xef,
|
||||
0xed, 0x99, 0xa9, 0xde, 0xbe, 0x01, 0x79, 0x9b, 0xb8, 0x1d, 0x39, 0xd8, 0x17, 0x38, 0x76, 0x97,
|
||||
0xb8, 0x1d, 0x2c, 0xa0, 0x02, 0x6b, 0x39, 0xae, 0x30, 0x74, 0x56, 0x62, 0x2d, 0xc7, 0xc5, 0x02,
|
||||
0xaa, 0xfe, 0x46, 0x81, 0x05, 0x6e, 0xc5, 0x56, 0x87, 0x6a, 0x5d, 0xfe, 0xac, 0xf8, 0x52, 0x01,
|
||||
0x44, 0x93, 0x8f, 0x0d, 0x2f, 0x23, 0x4a, 0x1b, 0x1f, 0x65, 0x4f, 0xd1, 0x89, 0x07, 0x4b, 0x18,
|
||||
0xd6, 0x13, 0x28, 0x86, 0x53, 0x54, 0xaa, 0x7f, 0x99, 0x81, 0x6b, 0xfb, 0xa4, 0x67, 0xe8, 0x22,
|
||||
0xd5, 0x83, 0xfe, 0x24, 0x9b, 0xc3, 0xab, 0x2f, 0xbf, 0x06, 0xe4, 0x99, 0x4d, 0x35, 0x99, 0xcd,
|
||||
0xf7, 0xb3, 0x9b, 0x3e, 0xf5, 0xd0, 0x7b, 0x36, 0xd5, 0xc2, 0x1b, 0xe4, 0x5f, 0x58, 0xa8, 0x40,
|
||||
0x3f, 0x86, 0x39, 0xe6, 0x12, 0x77, 0xc0, 0x64, 0xf0, 0x3f, 0xb8, 0x08, 0x65, 0x42, 0x60, 0x73,
|
||||
0x49, 0xaa, 0x9b, 0xf3, 0xbe, 0xb1, 0x54, 0xa4, 0xfe, 0x47, 0x81, 0xb5, 0xa9, 0xbc, 0x4d, 0xc3,
|
||||
0xd4, 0x79, 0x30, 0xbc, 0x7a, 0x27, 0xdb, 0x31, 0x27, 0xef, 0x5c, 0x80, 0xdd, 0xf2, 0xec, 0xd3,
|
||||
0x7c, 0xad, 0xfe, 0x5b, 0x81, 0x77, 0x4e, 0x63, 0xbe, 0x84, 0xe6, 0x67, 0xc5, 0x9b, 0xdf, 0xc3,
|
||||
0x8b, 0xb3, 0x7c, 0x4a, 0x03, 0xfc, 0x32, 0x77, 0xba, 0xdd, 0xdc, 0x4d, 0xbc, 0x83, 0xd8, 0x02,
|
||||
0xb8, 0x13, 0x16, 0xf9, 0xe0, 0x12, 0x77, 0x03, 0x0c, 0x8e, 0x50, 0x71, 0x5f, 0xd9, 0xb2, 0x3d,
|
||||
0xc8, 0xab, 0xdc, 0xc8, 0x6e, 0x90, 0xdf, 0x58, 0xbc, 0xf2, 0xed, 0x7f, 0xe1, 0x40, 0x22, 0x72,
|
||||
0x61, 0xa9, 0x1f, 0x5b, 0x14, 0xc8, 0x34, 0x39, 0xeb, 0x1c, 0x18, 0xf0, 0x7b, 0x73, 0x73, 0x1c,
|
||||
0x86, 0x13, 0x3a, 0xd0, 0x01, 0x94, 0x87, 0xd2, 0x5f, 0x96, 0xe9, 0x95, 0x74, 0xef, 0x75, 0x5c,
|
||||
0x6c, 0xde, 0xe4, 0xef, 0x8d, 0xfd, 0x24, 0xf2, 0x64, 0x54, 0x5b, 0x49, 0x02, 0xf1, 0xa4, 0x0c,
|
||||
0xf5, 0x1f, 0x0a, 0xbc, 0x3d, 0xf5, 0x26, 0x2e, 0x21, 0xf4, 0x3a, 0xf1, 0xd0, 0xdb, 0xba, 0x88,
|
||||
0xd0, 0x4b, 0x8f, 0xb9, 0xdf, 0xce, 0xbd, 0xc4, 0x52, 0x11, 0x6c, 0x4f, 0xa0, 0x68, 0xfb, 0xb3,
|
||||
0x4b, 0xca, 0xa6, 0x27, 0x4b, 0xe4, 0x70, 0xd6, 0xe6, 0x22, 0xef, 0x9f, 0xc1, 0x27, 0x0e, 0x85,
|
||||
0xa2, 0x9f, 0xc0, 0x8a, 0x3f, 0xdb, 0x73, 0x7e, 0xc3, 0x74, 0xfd, 0x01, 0xed, 0xfc, 0xe1, 0x73,
|
||||
0x75, 0x3c, 0xaa, 0xad, 0x6c, 0x27, 0xa4, 0xe2, 0x09, 0x3d, 0xa8, 0x0b, 0xa5, 0xf0, 0xfa, 0xfd,
|
||||
0xf7, 0xfd, 0xfb, 0x67, 0xf7, 0xb7, 0x65, 0x36, 0xdf, 0x90, 0x0e, 0x2e, 0x85, 0x30, 0x86, 0xa3,
|
||||
0xd2, 0x2f, 0xf8, 0xa1, 0xff, 0x73, 0x58, 0x21, 0xf1, 0x45, 0x27, 0xab, 0xcc, 0x9e, 0xf5, 0x11,
|
||||
0x92, 0x58, 0x95, 0x36, 0x2b, 0xd2, 0x88, 0x95, 0x04, 0x82, 0xe1, 0x09, 0x65, 0x69, 0xaf, 0xbf,
|
||||
0xb9, 0xcb, 0x7a, 0xfd, 0x21, 0x0d, 0x8a, 0x43, 0xe2, 0x18, 0xe4, 0xb0, 0x47, 0xf9, 0x53, 0x3b,
|
||||
0x77, 0xb6, 0x82, 0xb6, 0x2f, 0x59, 0xc3, 0xc9, 0xce, 0x87, 0x30, 0x1c, 0xca, 0x55, 0xff, 0x38,
|
||||
0x03, 0xb5, 0x53, 0xda, 0x37, 0x7a, 0x08, 0xc8, 0x3a, 0x64, 0xd4, 0x19, 0x52, 0xfd, 0xbe, 0xb7,
|
||||
0x8a, 0xf6, 0xc7, 0xfa, 0x5c, 0x38, 0x50, 0x3d, 0x9e, 0xa0, 0xc0, 0x29, 0x5c, 0xa8, 0x07, 0x0b,
|
||||
0x6e, 0x64, 0xd4, 0x93, 0x59, 0xf0, 0x41, 0x76, 0xbb, 0xa2, 0x83, 0x62, 0x73, 0x65, 0x3c, 0xaa,
|
||||
0xc5, 0x46, 0x47, 0x1c, 0x93, 0x8e, 0x34, 0x00, 0x2d, 0xbc, 0x3a, 0x2f, 0xf4, 0x1b, 0xd9, 0xaa,
|
||||
0x58, 0x78, 0x63, 0x41, 0xdf, 0x89, 0x5c, 0x56, 0x44, 0xac, 0x7a, 0x3c, 0x0f, 0xe5, 0xd0, 0x85,
|
||||
0xaf, 0x77, 0x7d, 0xaf, 0x77, 0x7d, 0x2f, 0xdd, 0xf5, 0xc1, 0xeb, 0x5d, 0xdf, 0xb9, 0x76, 0x7d,
|
||||
0x29, 0xb5, 0xb8, 0x74, 0x69, 0x9b, 0xb8, 0x63, 0x05, 0xaa, 0x13, 0x39, 0x7e, 0xd9, 0xbb, 0xb8,
|
||||
0x2f, 0x26, 0x76, 0x71, 0x1f, 0x9d, 0x67, 0x6c, 0x9a, 0xb6, 0x8d, 0xfb, 0x97, 0x02, 0xea, 0xcb,
|
||||
0x6d, 0xbc, 0x84, 0xb9, 0xb0, 0x1f, 0x9f, 0x0b, 0xbf, 0xff, 0x7f, 0x18, 0x98, 0x65, 0x23, 0xf7,
|
||||
0x5f, 0x05, 0x20, 0x1c, 0x66, 0xd0, 0x3b, 0x10, 0xf9, 0xa1, 0x50, 0x96, 0x6e, 0xcf, 0x4d, 0x11,
|
||||
0x38, 0xba, 0x09, 0xf3, 0x7d, 0xca, 0x18, 0x69, 0xfb, 0x0b, 0x91, 0xe0, 0x77, 0xcc, 0x6d, 0x0f,
|
||||
0x8c, 0x7d, 0x3c, 0x3a, 0x80, 0x39, 0x87, 0x12, 0x66, 0x99, 0x72, 0x31, 0xf2, 0x3d, 0xfe, 0x0a,
|
||||
0xc6, 0x02, 0x72, 0x32, 0xaa, 0xdd, 0xca, 0xf2, 0x3b, 0x73, 0x5d, 0x3e, 0x9a, 0x05, 0x13, 0x96,
|
||||
0xe2, 0xd0, 0x7d, 0x28, 0x4b, 0x1d, 0x91, 0x03, 0x7b, 0x95, 0xf6, 0x9a, 0x3c, 0x4d, 0x79, 0x3b,
|
||||
0x49, 0x80, 0x27, 0x79, 0xd4, 0x87, 0x50, 0xf0, 0x07, 0x03, 0x54, 0x81, 0x7c, 0xe4, 0xbd, 0xe5,
|
||||
0x19, 0x2e, 0x20, 0x09, 0xc7, 0xcc, 0xa4, 0x3b, 0x46, 0xfd, 0x83, 0x02, 0x6f, 0xa4, 0x34, 0x25,
|
||||
0x74, 0x0d, 0x72, 0x03, 0xa7, 0x27, 0x5d, 0x30, 0x3f, 0x1e, 0xd5, 0x72, 0x9f, 0xe1, 0x47, 0x98,
|
||||
0xc3, 0x10, 0x81, 0x79, 0xe6, 0xad, 0xa7, 0x64, 0x30, 0xdd, 0xc9, 0x7e, 0xe3, 0xc9, 0xbd, 0x56,
|
||||
0xb3, 0xc4, 0xef, 0xc0, 0x87, 0xfa, 0x72, 0xd1, 0x3a, 0x14, 0x34, 0xd2, 0x1c, 0x98, 0x7a, 0xcf,
|
||||
0xbb, 0xaf, 0x05, 0xef, 0x8d, 0xb7, 0xb5, 0xe9, 0xc1, 0x70, 0x80, 0x6d, 0xee, 0x3c, 0x3f, 0xae,
|
||||
0x5e, 0xf9, 0xea, 0xb8, 0x7a, 0xe5, 0xc5, 0x71, 0xf5, 0xca, 0x2f, 0xc6, 0x55, 0xe5, 0xf9, 0xb8,
|
||||
0xaa, 0x7c, 0x35, 0xae, 0x2a, 0x2f, 0xc6, 0x55, 0xe5, 0x6f, 0xe3, 0xaa, 0xf2, 0xab, 0xbf, 0x57,
|
||||
0xaf, 0xfc, 0x60, 0x3d, 0xeb, 0x7f, 0x39, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6f, 0xf2, 0xe8,
|
||||
0x4a, 0x10, 0x21, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *AuditAnnotation) Marshal() (dAtA []byte, err error) {
|
||||
|
||||
53
vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
generated
vendored
53
vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
generated
vendored
@@ -157,7 +157,7 @@ message MatchResources {
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 1;
|
||||
|
||||
// ObjectSelector decides whether to run the validation based on if the
|
||||
// object has matching labels. objectSelector is evaluated against both
|
||||
@@ -171,7 +171,7 @@ message MatchResources {
|
||||
// users may skip the admission webhook by setting the labels.
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 2;
|
||||
|
||||
// ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches.
|
||||
// The policy cares about an operation if it matches _any_ Rule.
|
||||
@@ -222,7 +222,8 @@ message MutatingWebhook {
|
||||
// from putting the cluster in a state which cannot be recovered from without completely
|
||||
// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
|
||||
// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
|
||||
repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3;
|
||||
// +listType=atomic
|
||||
repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3;
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
// allowed values are Ignore or Fail. Defaults to Ignore.
|
||||
@@ -290,7 +291,7 @@ message MutatingWebhook {
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
|
||||
// ObjectSelector decides whether to run the webhook based on if the
|
||||
// object has matching labels. objectSelector is evaluated against both
|
||||
@@ -304,7 +305,7 @@ message MutatingWebhook {
|
||||
// users may skip the admission webhook by setting the labels.
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11;
|
||||
|
||||
// SideEffects states whether this webhook has side effects.
|
||||
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
|
||||
@@ -332,6 +333,7 @@ message MutatingWebhook {
|
||||
// and be subject to the failure policy.
|
||||
// Default to `['v1beta1']`.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
repeated string admissionReviewVersions = 8;
|
||||
|
||||
// reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation.
|
||||
@@ -364,13 +366,10 @@ message MutatingWebhook {
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
|
||||
//
|
||||
// This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +featureGate=AdmissionWebhookMatchConditions
|
||||
// +optional
|
||||
repeated MatchCondition matchConditions = 12;
|
||||
}
|
||||
@@ -380,12 +379,14 @@ message MutatingWebhook {
|
||||
message MutatingWebhookConfiguration {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Webhooks is a list of webhooks and the affected resources and operations.
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
repeated MutatingWebhook Webhooks = 2;
|
||||
}
|
||||
|
||||
@@ -394,7 +395,7 @@ message MutatingWebhookConfigurationList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of MutatingWebhookConfiguration.
|
||||
repeated MutatingWebhookConfiguration items = 2;
|
||||
@@ -409,7 +410,7 @@ message NamedRuleWithOperations {
|
||||
repeated string resourceNames = 1;
|
||||
|
||||
// RuleWithOperations is a tuple of Operations and Resources.
|
||||
optional k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2;
|
||||
optional .k8s.io.api.admissionregistration.v1.RuleWithOperations ruleWithOperations = 2;
|
||||
}
|
||||
|
||||
// ParamKind is a tuple of Group Kind and Version.
|
||||
@@ -467,7 +468,7 @@ message ParamRef {
|
||||
// mutually exclusive properties. If one is set, the other must be unset.
|
||||
//
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3;
|
||||
|
||||
// `parameterNotFoundAction` controls the behavior of the binding when the resource
|
||||
// exists, and name or selector is valid, but there are no parameters
|
||||
@@ -522,7 +523,7 @@ message TypeChecking {
|
||||
message ValidatingAdmissionPolicy {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the ValidatingAdmissionPolicy.
|
||||
optional ValidatingAdmissionPolicySpec spec = 2;
|
||||
@@ -549,7 +550,7 @@ message ValidatingAdmissionPolicy {
|
||||
message ValidatingAdmissionPolicyBinding {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.
|
||||
optional ValidatingAdmissionPolicyBindingSpec spec = 2;
|
||||
@@ -560,7 +561,7 @@ message ValidatingAdmissionPolicyBindingList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of PolicyBinding.
|
||||
repeated ValidatingAdmissionPolicyBinding items = 2;
|
||||
@@ -638,7 +639,7 @@ message ValidatingAdmissionPolicyList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of ValidatingAdmissionPolicy.
|
||||
repeated ValidatingAdmissionPolicy items = 2;
|
||||
@@ -743,7 +744,7 @@ message ValidatingAdmissionPolicyStatus {
|
||||
// +optional
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
|
||||
repeated .k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 3;
|
||||
}
|
||||
|
||||
// ValidatingWebhook describes an admission webhook and the resources and operations it applies to.
|
||||
@@ -765,7 +766,8 @@ message ValidatingWebhook {
|
||||
// from putting the cluster in a state which cannot be recovered from without completely
|
||||
// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
|
||||
// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
|
||||
repeated k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3;
|
||||
// +listType=atomic
|
||||
repeated .k8s.io.api.admissionregistration.v1.RuleWithOperations rules = 3;
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
// allowed values are Ignore or Fail. Defaults to Ignore.
|
||||
@@ -833,7 +835,7 @@ message ValidatingWebhook {
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
|
||||
// ObjectSelector decides whether to run the webhook based on if the
|
||||
// object has matching labels. objectSelector is evaluated against both
|
||||
@@ -847,7 +849,7 @@ message ValidatingWebhook {
|
||||
// users may skip the admission webhook by setting the labels.
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10;
|
||||
|
||||
// SideEffects states whether this webhook has side effects.
|
||||
// Acceptable values are: Unknown, None, Some, NoneOnDryRun
|
||||
@@ -856,6 +858,7 @@ message ValidatingWebhook {
|
||||
// Requests with the dryRun attribute will be auto-rejected if they match a webhook with
|
||||
// sideEffects == Unknown or Some. Defaults to Unknown.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
optional string sideEffects = 6;
|
||||
|
||||
// TimeoutSeconds specifies the timeout for this webhook. After the timeout passes,
|
||||
@@ -875,6 +878,7 @@ message ValidatingWebhook {
|
||||
// and be subject to the failure policy.
|
||||
// Default to `['v1beta1']`.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
repeated string admissionReviewVersions = 8;
|
||||
|
||||
// MatchConditions is a list of conditions that must be met for a request to be sent to this
|
||||
@@ -889,13 +893,10 @@ message ValidatingWebhook {
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
|
||||
//
|
||||
// This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +featureGate=AdmissionWebhookMatchConditions
|
||||
// +optional
|
||||
repeated MatchCondition matchConditions = 11;
|
||||
}
|
||||
@@ -905,12 +906,14 @@ message ValidatingWebhook {
|
||||
message ValidatingWebhookConfiguration {
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Webhooks is a list of webhooks and the affected resources and operations.
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
repeated ValidatingWebhook Webhooks = 2;
|
||||
}
|
||||
|
||||
@@ -919,7 +922,7 @@ message ValidatingWebhookConfigurationList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of ValidatingWebhookConfiguration.
|
||||
repeated ValidatingWebhookConfiguration items = 2;
|
||||
|
||||
19
vendor/k8s.io/api/admissionregistration/v1beta1/types.go
generated
vendored
19
vendor/k8s.io/api/admissionregistration/v1beta1/types.go
generated
vendored
@@ -158,7 +158,7 @@ type ValidatingAdmissionPolicyList struct {
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of ValidatingAdmissionPolicy.
|
||||
Items []ValidatingAdmissionPolicy `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
|
||||
Items []ValidatingAdmissionPolicy `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.
|
||||
@@ -419,7 +419,7 @@ type ValidatingAdmissionPolicyBindingList struct {
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// List of PolicyBinding.
|
||||
Items []ValidatingAdmissionPolicyBinding `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
|
||||
Items []ValidatingAdmissionPolicyBinding `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.
|
||||
@@ -684,6 +684,8 @@ type ValidatingWebhookConfiguration struct {
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"`
|
||||
}
|
||||
|
||||
@@ -723,6 +725,8 @@ type MutatingWebhookConfiguration struct {
|
||||
// +optional
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"`
|
||||
}
|
||||
|
||||
@@ -762,6 +766,7 @@ type ValidatingWebhook struct {
|
||||
// from putting the cluster in a state which cannot be recovered from without completely
|
||||
// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
|
||||
// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
|
||||
// +listType=atomic
|
||||
Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
@@ -853,6 +858,7 @@ type ValidatingWebhook struct {
|
||||
// Requests with the dryRun attribute will be auto-rejected if they match a webhook with
|
||||
// sideEffects == Unknown or Some. Defaults to Unknown.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"`
|
||||
|
||||
// TimeoutSeconds specifies the timeout for this webhook. After the timeout passes,
|
||||
@@ -872,6 +878,7 @@ type ValidatingWebhook struct {
|
||||
// and be subject to the failure policy.
|
||||
// Default to `['v1beta1']`.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"`
|
||||
|
||||
// MatchConditions is a list of conditions that must be met for a request to be sent to this
|
||||
@@ -886,13 +893,10 @@ type ValidatingWebhook struct {
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
|
||||
//
|
||||
// This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +featureGate=AdmissionWebhookMatchConditions
|
||||
// +optional
|
||||
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,rep,name=matchConditions"`
|
||||
}
|
||||
@@ -916,6 +920,7 @@ type MutatingWebhook struct {
|
||||
// from putting the cluster in a state which cannot be recovered from without completely
|
||||
// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
|
||||
// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
|
||||
// +listType=atomic
|
||||
Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
|
||||
|
||||
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
|
||||
@@ -1026,6 +1031,7 @@ type MutatingWebhook struct {
|
||||
// and be subject to the failure policy.
|
||||
// Default to `['v1beta1']`.
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"`
|
||||
|
||||
// reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation.
|
||||
@@ -1058,13 +1064,10 @@ type MutatingWebhook struct {
|
||||
// - If failurePolicy=Fail, reject the request
|
||||
// - If failurePolicy=Ignore, the error is ignored and the webhook is skipped
|
||||
//
|
||||
// This is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.
|
||||
//
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=name
|
||||
// +featureGate=AdmissionWebhookMatchConditions
|
||||
// +optional
|
||||
MatchConditions []MatchCondition `json:"matchConditions,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,rep,name=matchConditions"`
|
||||
}
|
||||
|
||||
4
vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
generated
vendored
4
vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
generated
vendored
@@ -83,7 +83,7 @@ var map_MutatingWebhook = map[string]string{
|
||||
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.",
|
||||
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.",
|
||||
"reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".",
|
||||
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.",
|
||||
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped",
|
||||
}
|
||||
|
||||
func (MutatingWebhook) SwaggerDoc() map[string]string {
|
||||
@@ -253,7 +253,7 @@ var map_ValidatingWebhook = map[string]string{
|
||||
"sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
|
||||
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.",
|
||||
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.",
|
||||
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.",
|
||||
"matchConditions": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped",
|
||||
}
|
||||
|
||||
func (ValidatingWebhook) SwaggerDoc() map[string]string {
|
||||
|
||||
23
vendor/k8s.io/api/apidiscovery/v2/doc.go
generated
vendored
Normal file
23
vendor/k8s.io/api/apidiscovery/v2/doc.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:prerelease-lifecycle-gen=true
|
||||
// +groupName=apidiscovery.k8s.io
|
||||
|
||||
package v2 // import "k8s.io/api/apidiscovery/v2"
|
||||
1742
vendor/k8s.io/api/apidiscovery/v2/generated.pb.go
generated
vendored
Normal file
1742
vendor/k8s.io/api/apidiscovery/v2/generated.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
156
vendor/k8s.io/api/apidiscovery/v2/generated.proto
generated
vendored
Normal file
156
vendor/k8s.io/api/apidiscovery/v2/generated.proto
generated
vendored
Normal file
@@ -0,0 +1,156 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
|
||||
|
||||
syntax = "proto2";
|
||||
|
||||
package k8s.io.api.apidiscovery.v2;
|
||||
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "k8s.io/api/apidiscovery/v2";
|
||||
|
||||
// APIGroupDiscovery holds information about which resources are being served for all version of the API Group.
|
||||
// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version.
|
||||
// Versions are in descending order of preference, with the first version being the preferred entry.
|
||||
message APIGroupDiscovery {
|
||||
// Standard object's metadata.
|
||||
// The only field completed will be name. For instance, resourceVersion will be empty.
|
||||
// name is the name of the API group whose discovery information is presented here.
|
||||
// name is allowed to be "" to represent the legacy, ungroupified resources.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// versions are the versions supported in this group. They are sorted in descending order of preference,
|
||||
// with the preferred version being the first entry.
|
||||
// +listType=map
|
||||
// +listMapKey=version
|
||||
repeated APIVersionDiscovery versions = 2;
|
||||
}
|
||||
|
||||
// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery.
|
||||
// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated
|
||||
// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers)
|
||||
// that a cluster supports.
|
||||
message APIGroupDiscoveryList {
|
||||
// ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// items is the list of groups for discovery. The groups are listed in priority order.
|
||||
repeated APIGroupDiscovery items = 2;
|
||||
}
|
||||
|
||||
// APIResourceDiscovery provides information about an API resource for discovery.
|
||||
message APIResourceDiscovery {
|
||||
// resource is the plural name of the resource. This is used in the URL path and is the unique identifier
|
||||
// for this resource across all versions in the API group.
|
||||
// Resources with non-empty groups are located at /apis/<APIGroupDiscovery.objectMeta.name>/<APIVersionDiscovery.version>/<APIResourceDiscovery.Resource>
|
||||
// Resources with empty groups are located at /api/v1/<APIResourceDiscovery.Resource>
|
||||
optional string resource = 1;
|
||||
|
||||
// responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
|
||||
// APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior.
|
||||
// This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
|
||||
|
||||
// scope indicates the scope of a resource, either Cluster or Namespaced
|
||||
optional string scope = 3;
|
||||
|
||||
// singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely.
|
||||
// For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence.
|
||||
// The command line tool kubectl, for example, allows use of the singular resource name in place of plurals.
|
||||
// The singular form of a resource should always be an optional element - when in doubt use the canonical resource name.
|
||||
optional string singularResource = 4;
|
||||
|
||||
// verbs is a list of supported API operation types (this includes
|
||||
// but is not limited to get, list, watch, create, update, patch,
|
||||
// delete, deletecollection, and proxy).
|
||||
// +listType=set
|
||||
repeated string verbs = 5;
|
||||
|
||||
// shortNames is a list of suggested short names of the resource.
|
||||
// +listType=set
|
||||
repeated string shortNames = 6;
|
||||
|
||||
// categories is a list of the grouped resources this resource belongs to (e.g. 'all').
|
||||
// Clients may use this to simplify acting on multiple resource types at once.
|
||||
// +listType=set
|
||||
repeated string categories = 7;
|
||||
|
||||
// subresources is a list of subresources provided by this resource. Subresources are located at /apis/<APIGroupDiscovery.objectMeta.name>/<APIVersionDiscovery.version>/<APIResourceDiscovery.Resource>/name-of-instance/<APIResourceDiscovery.subresources[i].subresource>
|
||||
// +listType=map
|
||||
// +listMapKey=subresource
|
||||
repeated APISubresourceDiscovery subresources = 8;
|
||||
}
|
||||
|
||||
// APISubresourceDiscovery provides information about an API subresource for discovery.
|
||||
message APISubresourceDiscovery {
|
||||
// subresource is the name of the subresource. This is used in the URL path and is the unique identifier
|
||||
// for this resource across all versions.
|
||||
optional string subresource = 1;
|
||||
|
||||
// responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
|
||||
// Some subresources do not return normal resources, these will have null or empty return types.
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
|
||||
|
||||
// acceptedTypes describes the kinds that this endpoint accepts.
|
||||
// Subresources may accept the standard content types or define
|
||||
// custom negotiation schemes. The list may not be exhaustive for
|
||||
// all operations.
|
||||
// +listType=map
|
||||
// +listMapKey=group
|
||||
// +listMapKey=version
|
||||
// +listMapKey=kind
|
||||
repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3;
|
||||
|
||||
// verbs is a list of supported API operation types (this includes
|
||||
// but is not limited to get, list, watch, create, update, patch,
|
||||
// delete, deletecollection, and proxy). Subresources may define
|
||||
// custom verbs outside the standard Kubernetes verb set. Clients
|
||||
// should expect the behavior of standard verbs to align with
|
||||
// Kubernetes interaction conventions.
|
||||
// +listType=set
|
||||
repeated string verbs = 4;
|
||||
}
|
||||
|
||||
// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group.
|
||||
message APIVersionDiscovery {
|
||||
// version is the name of the version within a group version.
|
||||
optional string version = 1;
|
||||
|
||||
// resources is a list of APIResourceDiscovery objects for the corresponding group version.
|
||||
// +listType=map
|
||||
// +listMapKey=resource
|
||||
repeated APIResourceDiscovery resources = 2;
|
||||
|
||||
// freshness marks whether a group version's discovery document is up to date.
|
||||
// "Current" indicates the discovery document was recently
|
||||
// refreshed. "Stale" indicates the discovery document could not
|
||||
// be retrieved and the returned discovery document may be
|
||||
// significantly out of date. Clients that require the latest
|
||||
// version of the discovery information be retrieved before
|
||||
// performing an operation should not use the aggregated document
|
||||
optional string freshness = 3;
|
||||
}
|
||||
|
||||
56
vendor/k8s.io/api/apidiscovery/v2/register.go
generated
vendored
Normal file
56
vendor/k8s.io/api/apidiscovery/v2/register.go
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name for this API.
|
||||
const GroupName = "apidiscovery.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2"}
|
||||
|
||||
// Kind takes an unqualified kind and returns a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// SchemeBuilder installs the api group to a scheme
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
// AddToScheme adds api to a scheme
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// Adds the list of known types to the given scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&APIGroupDiscoveryList{},
|
||||
&APIGroupDiscovery{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
157
vendor/k8s.io/api/apidiscovery/v2/types.go
generated
vendored
Normal file
157
vendor/k8s.io/api/apidiscovery/v2/types.go
generated
vendored
Normal file
@@ -0,0 +1,157 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.30
|
||||
|
||||
// APIGroupDiscoveryList is a resource containing a list of APIGroupDiscovery.
|
||||
// This is one of the types able to be returned from the /api and /apis endpoint and contains an aggregated
|
||||
// list of API resources (built-ins, Custom Resource Definitions, resources from aggregated servers)
|
||||
// that a cluster supports.
|
||||
type APIGroupDiscoveryList struct {
|
||||
v1.TypeMeta `json:",inline"`
|
||||
// ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// items is the list of groups for discovery. The groups are listed in priority order.
|
||||
Items []APIGroupDiscovery `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.30
|
||||
|
||||
// APIGroupDiscovery holds information about which resources are being served for all version of the API Group.
|
||||
// It contains a list of APIVersionDiscovery that holds a list of APIResourceDiscovery types served for a version.
|
||||
// Versions are in descending order of preference, with the first version being the preferred entry.
|
||||
type APIGroupDiscovery struct {
|
||||
v1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// The only field completed will be name. For instance, resourceVersion will be empty.
|
||||
// name is the name of the API group whose discovery information is presented here.
|
||||
// name is allowed to be "" to represent the legacy, ungroupified resources.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// versions are the versions supported in this group. They are sorted in descending order of preference,
|
||||
// with the preferred version being the first entry.
|
||||
// +listType=map
|
||||
// +listMapKey=version
|
||||
Versions []APIVersionDiscovery `json:"versions,omitempty" protobuf:"bytes,2,rep,name=versions"`
|
||||
}
|
||||
|
||||
// APIVersionDiscovery holds a list of APIResourceDiscovery types that are served for a particular version within an API Group.
|
||||
type APIVersionDiscovery struct {
|
||||
// version is the name of the version within a group version.
|
||||
Version string `json:"version" protobuf:"bytes,1,opt,name=version"`
|
||||
// resources is a list of APIResourceDiscovery objects for the corresponding group version.
|
||||
// +listType=map
|
||||
// +listMapKey=resource
|
||||
Resources []APIResourceDiscovery `json:"resources,omitempty" protobuf:"bytes,2,rep,name=resources"`
|
||||
// freshness marks whether a group version's discovery document is up to date.
|
||||
// "Current" indicates the discovery document was recently
|
||||
// refreshed. "Stale" indicates the discovery document could not
|
||||
// be retrieved and the returned discovery document may be
|
||||
// significantly out of date. Clients that require the latest
|
||||
// version of the discovery information be retrieved before
|
||||
// performing an operation should not use the aggregated document
|
||||
Freshness DiscoveryFreshness `json:"freshness,omitempty" protobuf:"bytes,3,opt,name=freshness"`
|
||||
}
|
||||
|
||||
// APIResourceDiscovery provides information about an API resource for discovery.
|
||||
type APIResourceDiscovery struct {
|
||||
// resource is the plural name of the resource. This is used in the URL path and is the unique identifier
|
||||
// for this resource across all versions in the API group.
|
||||
// Resources with non-empty groups are located at /apis/<APIGroupDiscovery.objectMeta.name>/<APIVersionDiscovery.version>/<APIResourceDiscovery.Resource>
|
||||
// Resources with empty groups are located at /api/v1/<APIResourceDiscovery.Resource>
|
||||
Resource string `json:"resource" protobuf:"bytes,1,opt,name=resource"`
|
||||
// responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
|
||||
// APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior.
|
||||
// This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource
|
||||
ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"`
|
||||
// scope indicates the scope of a resource, either Cluster or Namespaced
|
||||
Scope ResourceScope `json:"scope" protobuf:"bytes,3,opt,name=scope"`
|
||||
// singularResource is the singular name of the resource. This allows clients to handle plural and singular opaquely.
|
||||
// For many clients the singular form of the resource will be more understandable to users reading messages and should be used when integrating the name of the resource into a sentence.
|
||||
// The command line tool kubectl, for example, allows use of the singular resource name in place of plurals.
|
||||
// The singular form of a resource should always be an optional element - when in doubt use the canonical resource name.
|
||||
SingularResource string `json:"singularResource" protobuf:"bytes,4,opt,name=singularResource"`
|
||||
// verbs is a list of supported API operation types (this includes
|
||||
// but is not limited to get, list, watch, create, update, patch,
|
||||
// delete, deletecollection, and proxy).
|
||||
// +listType=set
|
||||
Verbs []string `json:"verbs" protobuf:"bytes,5,opt,name=verbs"`
|
||||
// shortNames is a list of suggested short names of the resource.
|
||||
// +listType=set
|
||||
ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,6,rep,name=shortNames"`
|
||||
// categories is a list of the grouped resources this resource belongs to (e.g. 'all').
|
||||
// Clients may use this to simplify acting on multiple resource types at once.
|
||||
// +listType=set
|
||||
Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"`
|
||||
// subresources is a list of subresources provided by this resource. Subresources are located at /apis/<APIGroupDiscovery.objectMeta.name>/<APIVersionDiscovery.version>/<APIResourceDiscovery.Resource>/name-of-instance/<APIResourceDiscovery.subresources[i].subresource>
|
||||
// +listType=map
|
||||
// +listMapKey=subresource
|
||||
Subresources []APISubresourceDiscovery `json:"subresources,omitempty" protobuf:"bytes,8,rep,name=subresources"`
|
||||
}
|
||||
|
||||
// ResourceScope is an enum defining the different scopes available to a resource.
|
||||
type ResourceScope string
|
||||
|
||||
const (
|
||||
ScopeCluster ResourceScope = "Cluster"
|
||||
ScopeNamespace ResourceScope = "Namespaced"
|
||||
)
|
||||
|
||||
// DiscoveryFreshness is an enum defining whether the Discovery document published by an apiservice is up to date (fresh).
|
||||
type DiscoveryFreshness string
|
||||
|
||||
const (
|
||||
DiscoveryFreshnessCurrent DiscoveryFreshness = "Current"
|
||||
DiscoveryFreshnessStale DiscoveryFreshness = "Stale"
|
||||
)
|
||||
|
||||
// APISubresourceDiscovery provides information about an API subresource for discovery.
|
||||
type APISubresourceDiscovery struct {
|
||||
// subresource is the name of the subresource. This is used in the URL path and is the unique identifier
|
||||
// for this resource across all versions.
|
||||
Subresource string `json:"subresource" protobuf:"bytes,1,opt,name=subresource"`
|
||||
// responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
|
||||
// Some subresources do not return normal resources, these will have null or empty return types.
|
||||
ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"`
|
||||
// acceptedTypes describes the kinds that this endpoint accepts.
|
||||
// Subresources may accept the standard content types or define
|
||||
// custom negotiation schemes. The list may not be exhaustive for
|
||||
// all operations.
|
||||
// +listType=map
|
||||
// +listMapKey=group
|
||||
// +listMapKey=version
|
||||
// +listMapKey=kind
|
||||
AcceptedTypes []v1.GroupVersionKind `json:"acceptedTypes,omitempty" protobuf:"bytes,3,rep,name=acceptedTypes"`
|
||||
// verbs is a list of supported API operation types (this includes
|
||||
// but is not limited to get, list, watch, create, update, patch,
|
||||
// delete, deletecollection, and proxy). Subresources may define
|
||||
// custom verbs outside the standard Kubernetes verb set. Clients
|
||||
// should expect the behavior of standard verbs to align with
|
||||
// Kubernetes interaction conventions.
|
||||
// +listType=set
|
||||
Verbs []string `json:"verbs" protobuf:"bytes,4,opt,name=verbs"`
|
||||
}
|
||||
190
vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go
generated
vendored
Normal file
190
vendor/k8s.io/api/apidiscovery/v2/zz_generated.deepcopy.go
generated
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v2
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *APIGroupDiscovery) DeepCopyInto(out *APIGroupDiscovery) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
if in.Versions != nil {
|
||||
in, out := &in.Versions, &out.Versions
|
||||
*out = make([]APIVersionDiscovery, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscovery.
|
||||
func (in *APIGroupDiscovery) DeepCopy() *APIGroupDiscovery {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(APIGroupDiscovery)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *APIGroupDiscovery) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *APIGroupDiscoveryList) DeepCopyInto(out *APIGroupDiscoveryList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]APIGroupDiscovery, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupDiscoveryList.
|
||||
func (in *APIGroupDiscoveryList) DeepCopy() *APIGroupDiscoveryList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(APIGroupDiscoveryList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *APIGroupDiscoveryList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *APIResourceDiscovery) DeepCopyInto(out *APIResourceDiscovery) {
|
||||
*out = *in
|
||||
if in.ResponseKind != nil {
|
||||
in, out := &in.ResponseKind, &out.ResponseKind
|
||||
*out = new(v1.GroupVersionKind)
|
||||
**out = **in
|
||||
}
|
||||
if in.Verbs != nil {
|
||||
in, out := &in.Verbs, &out.Verbs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.ShortNames != nil {
|
||||
in, out := &in.ShortNames, &out.ShortNames
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Categories != nil {
|
||||
in, out := &in.Categories, &out.Categories
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Subresources != nil {
|
||||
in, out := &in.Subresources, &out.Subresources
|
||||
*out = make([]APISubresourceDiscovery, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceDiscovery.
|
||||
func (in *APIResourceDiscovery) DeepCopy() *APIResourceDiscovery {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(APIResourceDiscovery)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *APISubresourceDiscovery) DeepCopyInto(out *APISubresourceDiscovery) {
|
||||
*out = *in
|
||||
if in.ResponseKind != nil {
|
||||
in, out := &in.ResponseKind, &out.ResponseKind
|
||||
*out = new(v1.GroupVersionKind)
|
||||
**out = **in
|
||||
}
|
||||
if in.AcceptedTypes != nil {
|
||||
in, out := &in.AcceptedTypes, &out.AcceptedTypes
|
||||
*out = make([]v1.GroupVersionKind, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Verbs != nil {
|
||||
in, out := &in.Verbs, &out.Verbs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APISubresourceDiscovery.
|
||||
func (in *APISubresourceDiscovery) DeepCopy() *APISubresourceDiscovery {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(APISubresourceDiscovery)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *APIVersionDiscovery) DeepCopyInto(out *APIVersionDiscovery) {
|
||||
*out = *in
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make([]APIResourceDiscovery, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersionDiscovery.
|
||||
func (in *APIVersionDiscovery) DeepCopy() *APIVersionDiscovery {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(APIVersionDiscovery)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
34
vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
34
vendor/k8s.io/api/apidiscovery/v2/zz_generated.prerelease-lifecycle.go
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by prerelease-lifecycle-gen. DO NOT EDIT.
|
||||
|
||||
package v2
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *APIGroupDiscovery) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *APIGroupDiscoveryList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
113
vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go
generated
vendored
113
vendor/k8s.io/api/apidiscovery/v2beta1/generated.pb.go
generated
vendored
@@ -15,7 +15,7 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto
|
||||
// source: k8s.io/api/apidiscovery/v2beta1/generated.proto
|
||||
|
||||
package v2beta1
|
||||
|
||||
@@ -47,7 +47,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
func (m *APIGroupDiscovery) Reset() { *m = APIGroupDiscovery{} }
|
||||
func (*APIGroupDiscovery) ProtoMessage() {}
|
||||
func (*APIGroupDiscovery) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0442b7af4d680cb7, []int{0}
|
||||
return fileDescriptor_48661e6ba3d554f3, []int{0}
|
||||
}
|
||||
func (m *APIGroupDiscovery) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -75,7 +75,7 @@ var xxx_messageInfo_APIGroupDiscovery proto.InternalMessageInfo
|
||||
func (m *APIGroupDiscoveryList) Reset() { *m = APIGroupDiscoveryList{} }
|
||||
func (*APIGroupDiscoveryList) ProtoMessage() {}
|
||||
func (*APIGroupDiscoveryList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0442b7af4d680cb7, []int{1}
|
||||
return fileDescriptor_48661e6ba3d554f3, []int{1}
|
||||
}
|
||||
func (m *APIGroupDiscoveryList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -103,7 +103,7 @@ var xxx_messageInfo_APIGroupDiscoveryList proto.InternalMessageInfo
|
||||
func (m *APIResourceDiscovery) Reset() { *m = APIResourceDiscovery{} }
|
||||
func (*APIResourceDiscovery) ProtoMessage() {}
|
||||
func (*APIResourceDiscovery) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0442b7af4d680cb7, []int{2}
|
||||
return fileDescriptor_48661e6ba3d554f3, []int{2}
|
||||
}
|
||||
func (m *APIResourceDiscovery) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -131,7 +131,7 @@ var xxx_messageInfo_APIResourceDiscovery proto.InternalMessageInfo
|
||||
func (m *APISubresourceDiscovery) Reset() { *m = APISubresourceDiscovery{} }
|
||||
func (*APISubresourceDiscovery) ProtoMessage() {}
|
||||
func (*APISubresourceDiscovery) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0442b7af4d680cb7, []int{3}
|
||||
return fileDescriptor_48661e6ba3d554f3, []int{3}
|
||||
}
|
||||
func (m *APISubresourceDiscovery) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -159,7 +159,7 @@ var xxx_messageInfo_APISubresourceDiscovery proto.InternalMessageInfo
|
||||
func (m *APIVersionDiscovery) Reset() { *m = APIVersionDiscovery{} }
|
||||
func (*APIVersionDiscovery) ProtoMessage() {}
|
||||
func (*APIVersionDiscovery) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_0442b7af4d680cb7, []int{4}
|
||||
return fileDescriptor_48661e6ba3d554f3, []int{4}
|
||||
}
|
||||
func (m *APIVersionDiscovery) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -193,59 +193,58 @@ func init() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto", fileDescriptor_0442b7af4d680cb7)
|
||||
proto.RegisterFile("k8s.io/api/apidiscovery/v2beta1/generated.proto", fileDescriptor_48661e6ba3d554f3)
|
||||
}
|
||||
|
||||
var fileDescriptor_0442b7af4d680cb7 = []byte{
|
||||
// 754 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4c,
|
||||
0x14, 0x8d, 0x09, 0xf9, 0x48, 0x26, 0xc9, 0xf7, 0x85, 0x01, 0xf4, 0x59, 0x2c, 0x6c, 0x94, 0x4d,
|
||||
0xa9, 0xd4, 0xda, 0x25, 0x02, 0xc4, 0x36, 0x29, 0xb4, 0x8d, 0xfa, 0x87, 0x26, 0x15, 0x95, 0xaa,
|
||||
0x2e, 0x6a, 0x3b, 0x17, 0xc7, 0x0d, 0xb1, 0xad, 0x99, 0x71, 0x24, 0x76, 0x7d, 0x84, 0xbe, 0x43,
|
||||
0x5f, 0x86, 0x55, 0xc5, 0xa2, 0x0b, 0xba, 0x89, 0x4a, 0xfa, 0x00, 0xdd, 0xb3, 0xaa, 0xec, 0x8c,
|
||||
0x7f, 0x42, 0x40, 0x44, 0x5d, 0x74, 0x81, 0x84, 0xcf, 0x3d, 0xe7, 0xdc, 0x7b, 0x2e, 0xd7, 0x06,
|
||||
0x3d, 0xeb, 0xef, 0x31, 0xcd, 0xf1, 0xf4, 0x7e, 0x60, 0x02, 0x75, 0x81, 0x03, 0xd3, 0x87, 0xe0,
|
||||
0x76, 0x3d, 0xaa, 0x8b, 0x82, 0xe1, 0x3b, 0xe1, 0x4f, 0xd7, 0x61, 0x96, 0x37, 0x04, 0x7a, 0xaa,
|
||||
0x0f, 0x1b, 0x26, 0x70, 0x63, 0x4b, 0xb7, 0xc1, 0x05, 0x6a, 0x70, 0xe8, 0x6a, 0x3e, 0xf5, 0xb8,
|
||||
0x87, 0xd5, 0x89, 0x40, 0x33, 0x7c, 0x47, 0xcb, 0x0a, 0x34, 0x21, 0x58, 0x7f, 0x68, 0x3b, 0xbc,
|
||||
0x17, 0x98, 0x9a, 0xe5, 0x0d, 0x74, 0xdb, 0xb3, 0x3d, 0x3d, 0xd2, 0x99, 0xc1, 0x71, 0xf4, 0x14,
|
||||
0x3d, 0x44, 0xbf, 0x4d, 0xfc, 0xd6, 0xb7, 0xd3, 0x01, 0x06, 0x86, 0xd5, 0x73, 0xdc, 0xb0, 0xb9,
|
||||
0xdf, 0xb7, 0x43, 0x80, 0xe9, 0x03, 0xe0, 0x86, 0x3e, 0x9c, 0x99, 0x62, 0x5d, 0xbf, 0x4d, 0x45,
|
||||
0x03, 0x97, 0x3b, 0x03, 0x98, 0x11, 0xec, 0xde, 0x25, 0x60, 0x56, 0x0f, 0x06, 0xc6, 0x75, 0x5d,
|
||||
0xfd, 0xbb, 0x84, 0x96, 0x9b, 0x87, 0xed, 0xa7, 0xd4, 0x0b, 0xfc, 0xfd, 0x38, 0x2b, 0xfe, 0x80,
|
||||
0x8a, 0xe1, 0x64, 0x5d, 0x83, 0x1b, 0xb2, 0xb4, 0x21, 0x6d, 0x96, 0x1b, 0x8f, 0xb4, 0x74, 0x2f,
|
||||
0x49, 0x03, 0xcd, 0xef, 0xdb, 0x21, 0xc0, 0xb4, 0x90, 0xad, 0x0d, 0xb7, 0xb4, 0xd7, 0xe6, 0x47,
|
||||
0xb0, 0xf8, 0x4b, 0xe0, 0x46, 0x0b, 0x9f, 0x8d, 0xd4, 0xdc, 0x78, 0xa4, 0xa2, 0x14, 0x23, 0x89,
|
||||
0x2b, 0x36, 0x51, 0x71, 0x08, 0x94, 0x39, 0x9e, 0xcb, 0xe4, 0x85, 0x8d, 0xfc, 0x66, 0xb9, 0xb1,
|
||||
0xad, 0xdd, 0xb1, 0x79, 0xad, 0x79, 0xd8, 0x3e, 0x9a, 0x68, 0x92, 0x49, 0x5b, 0x35, 0xd1, 0xa5,
|
||||
0x28, 0x2a, 0x8c, 0x24, 0xbe, 0xf5, 0xaf, 0x12, 0x5a, 0x9b, 0xc9, 0xf6, 0xc2, 0x61, 0x1c, 0xbf,
|
||||
0x9f, 0xc9, 0xa7, 0xcd, 0x97, 0x2f, 0x54, 0x47, 0xe9, 0x92, 0xbe, 0x31, 0x92, 0xc9, 0xf6, 0x16,
|
||||
0x15, 0x1c, 0x0e, 0x83, 0x38, 0x58, 0x63, 0x9e, 0x60, 0xd3, 0x43, 0xb6, 0xaa, 0xc2, 0xbe, 0xd0,
|
||||
0x0e, 0x8d, 0xc8, 0xc4, 0xaf, 0xfe, 0x65, 0x11, 0xad, 0x36, 0x0f, 0xdb, 0x04, 0x98, 0x17, 0x50,
|
||||
0x0b, 0xd2, 0xbf, 0xd7, 0x03, 0x54, 0xa4, 0x02, 0x8c, 0xf2, 0x94, 0xd2, 0xf9, 0x62, 0x32, 0x49,
|
||||
0x18, 0xf8, 0x04, 0x55, 0x28, 0x30, 0xdf, 0x73, 0x19, 0x3c, 0x77, 0xdc, 0xae, 0xbc, 0x10, 0x6d,
|
||||
0x60, 0x77, 0xbe, 0x0d, 0x44, 0x83, 0x8a, 0x65, 0x87, 0xea, 0x56, 0x6d, 0x3c, 0x52, 0x2b, 0x24,
|
||||
0xe3, 0x47, 0xa6, 0xdc, 0xf1, 0x36, 0x2a, 0x30, 0xcb, 0xf3, 0x41, 0xce, 0x47, 0x83, 0x29, 0x71,
|
||||
0xb2, 0x4e, 0x08, 0x5e, 0x8d, 0xd4, 0x6a, 0x3c, 0x61, 0x04, 0x90, 0x09, 0x19, 0xef, 0xa3, 0x1a,
|
||||
0x73, 0x5c, 0x3b, 0x38, 0x31, 0x68, 0x5c, 0x97, 0x17, 0x23, 0x03, 0x59, 0x18, 0xd4, 0x3a, 0xd7,
|
||||
0xea, 0x64, 0x46, 0x81, 0x55, 0x54, 0x18, 0x02, 0x35, 0x99, 0x5c, 0xd8, 0xc8, 0x6f, 0x96, 0x5a,
|
||||
0xa5, 0xb0, 0xef, 0x51, 0x08, 0x90, 0x09, 0x8e, 0x35, 0x84, 0x58, 0xcf, 0xa3, 0xfc, 0x95, 0x31,
|
||||
0x00, 0x26, 0xff, 0x13, 0xb1, 0xfe, 0x0d, 0x8f, 0xb6, 0x93, 0xa0, 0x24, 0xc3, 0x08, 0xf9, 0x96,
|
||||
0xc1, 0xc1, 0xf6, 0xa8, 0x03, 0x4c, 0x5e, 0x4a, 0xf9, 0x8f, 0x13, 0x94, 0x64, 0x18, 0x98, 0xa2,
|
||||
0x0a, 0x0b, 0xcc, 0x78, 0xf3, 0x4c, 0x2e, 0x46, 0x17, 0xb1, 0x37, 0xcf, 0x45, 0x74, 0x52, 0x5d,
|
||||
0x7a, 0x17, 0xab, 0x22, 0x7c, 0x25, 0x53, 0x65, 0x64, 0xaa, 0x47, 0xfd, 0xdb, 0x02, 0xfa, 0xff,
|
||||
0x16, 0x3d, 0xde, 0x41, 0xe5, 0x0c, 0x57, 0xdc, 0xca, 0x8a, 0x30, 0x2d, 0x67, 0x24, 0x24, 0xcb,
|
||||
0xfb, 0xcb, 0x17, 0xc3, 0x50, 0xd5, 0xb0, 0x2c, 0xf0, 0x39, 0x74, 0xdf, 0x9c, 0xfa, 0xc0, 0xe4,
|
||||
0x7c, 0xb4, 0xb5, 0x3f, 0x6d, 0xb7, 0x26, 0xe2, 0x55, 0x9b, 0x59, 0x53, 0x32, 0xdd, 0x23, 0x3d,
|
||||
0x95, 0xc5, 0x9b, 0x4f, 0xa5, 0xfe, 0x4b, 0x42, 0x2b, 0x37, 0x7c, 0x81, 0xf0, 0x7d, 0xb4, 0x24,
|
||||
0xbe, 0x38, 0x62, 0x9d, 0xff, 0x89, 0x7e, 0x4b, 0x82, 0x4a, 0xe2, 0x3a, 0x3e, 0x46, 0xa5, 0xf4,
|
||||
0x14, 0x26, 0x1f, 0x87, 0x9d, 0x79, 0x4e, 0x61, 0xe6, 0x85, 0x6f, 0x2d, 0x8b, 0x1e, 0x25, 0x92,
|
||||
0x1c, 0x41, 0x6a, 0x8d, 0x0f, 0x50, 0xe9, 0x98, 0x02, 0xeb, 0xb9, 0xc0, 0x98, 0x78, 0xed, 0xee,
|
||||
0xc5, 0x82, 0x27, 0x71, 0xe1, 0x6a, 0xa4, 0xe2, 0xc4, 0x30, 0x41, 0x49, 0xaa, 0x6c, 0x1d, 0x9c,
|
||||
0x5d, 0x2a, 0xb9, 0xf3, 0x4b, 0x25, 0x77, 0x71, 0xa9, 0xe4, 0x3e, 0x8d, 0x15, 0xe9, 0x6c, 0xac,
|
||||
0x48, 0xe7, 0x63, 0x45, 0xba, 0x18, 0x2b, 0xd2, 0x8f, 0xb1, 0x22, 0x7d, 0xfe, 0xa9, 0xe4, 0xde,
|
||||
0xa9, 0x77, 0xfc, 0x87, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x66, 0x3b, 0x84, 0x9c, 0x07,
|
||||
0x00, 0x00,
|
||||
var fileDescriptor_48661e6ba3d554f3 = []byte{
|
||||
// 740 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x4e, 0xdb, 0x4a,
|
||||
0x18, 0x8d, 0x09, 0xb9, 0x24, 0x93, 0xe4, 0xde, 0x30, 0x80, 0xae, 0xc5, 0xc2, 0x46, 0xd9, 0x5c,
|
||||
0xae, 0xd4, 0x8e, 0x4b, 0x04, 0x88, 0x6d, 0x52, 0x68, 0x15, 0xf5, 0x0f, 0x4d, 0x2a, 0x2a, 0x55,
|
||||
0x5d, 0xd4, 0x71, 0x06, 0xc7, 0x85, 0xd8, 0xd6, 0xcc, 0x24, 0x12, 0xbb, 0x3e, 0x42, 0xdf, 0xa1,
|
||||
0x2f, 0xc3, 0xaa, 0x62, 0xd1, 0x05, 0xdd, 0x44, 0x25, 0x7d, 0x80, 0xee, 0x59, 0x55, 0x33, 0x1e,
|
||||
0xff, 0x84, 0x80, 0x88, 0xba, 0xe8, 0x22, 0x52, 0x7c, 0xe6, 0x9c, 0xf3, 0x7d, 0xe7, 0xcb, 0xe7,
|
||||
0x09, 0xb0, 0x4e, 0xf6, 0x18, 0xf2, 0x02, 0xcb, 0x0e, 0x3d, 0xf1, 0xe9, 0x79, 0xcc, 0x09, 0x46,
|
||||
0x84, 0x9e, 0x59, 0xa3, 0x46, 0x97, 0x70, 0x7b, 0xcb, 0x72, 0x89, 0x4f, 0xa8, 0xcd, 0x49, 0x0f,
|
||||
0x85, 0x34, 0xe0, 0x01, 0x34, 0x23, 0x01, 0xb2, 0x43, 0x0f, 0x65, 0x05, 0x48, 0x09, 0xd6, 0x1f,
|
||||
0xba, 0x1e, 0xef, 0x0f, 0xbb, 0xc8, 0x09, 0x06, 0x96, 0x1b, 0xb8, 0x81, 0x25, 0x75, 0xdd, 0xe1,
|
||||
0xb1, 0x7c, 0x92, 0x0f, 0xf2, 0x5b, 0xe4, 0xb7, 0xbe, 0x9d, 0x36, 0x30, 0xb0, 0x9d, 0xbe, 0xe7,
|
||||
0x8b, 0xe2, 0xe1, 0x89, 0x2b, 0x00, 0x66, 0x0d, 0x08, 0xb7, 0xad, 0xd1, 0x4c, 0x17, 0xeb, 0xd6,
|
||||
0x5d, 0x2a, 0x3a, 0xf4, 0xb9, 0x37, 0x20, 0x33, 0x82, 0xdd, 0xfb, 0x04, 0xcc, 0xe9, 0x93, 0x81,
|
||||
0x7d, 0x53, 0x57, 0xff, 0xa6, 0x81, 0xe5, 0xe6, 0x61, 0xfb, 0x29, 0x0d, 0x86, 0xe1, 0x7e, 0x9c,
|
||||
0x15, 0xbe, 0x07, 0x45, 0xd1, 0x59, 0xcf, 0xe6, 0xb6, 0xae, 0x6d, 0x68, 0x9b, 0xe5, 0xc6, 0x23,
|
||||
0x94, 0xce, 0x25, 0x29, 0x80, 0xc2, 0x13, 0x57, 0x00, 0x0c, 0x09, 0x36, 0x1a, 0x6d, 0xa1, 0x57,
|
||||
0xdd, 0x0f, 0xc4, 0xe1, 0x2f, 0x08, 0xb7, 0x5b, 0xf0, 0x7c, 0x6c, 0xe6, 0x26, 0x63, 0x13, 0xa4,
|
||||
0x18, 0x4e, 0x5c, 0x61, 0x17, 0x14, 0x47, 0x84, 0x32, 0x2f, 0xf0, 0x99, 0xbe, 0xb0, 0x91, 0xdf,
|
||||
0x2c, 0x37, 0xb6, 0xd1, 0x3d, 0x93, 0x47, 0xcd, 0xc3, 0xf6, 0x51, 0xa4, 0x49, 0x3a, 0x6d, 0xd5,
|
||||
0x54, 0x95, 0xa2, 0x3a, 0x61, 0x38, 0xf1, 0xad, 0x7f, 0xd1, 0xc0, 0xda, 0x4c, 0xb6, 0xe7, 0x1e,
|
||||
0xe3, 0xf0, 0xdd, 0x4c, 0x3e, 0x34, 0x5f, 0x3e, 0xa1, 0x96, 0xe9, 0x92, 0xba, 0x31, 0x92, 0xc9,
|
||||
0xf6, 0x06, 0x14, 0x3c, 0x4e, 0x06, 0x71, 0xb0, 0xc6, 0x3c, 0xc1, 0xa6, 0x9b, 0x6c, 0x55, 0x95,
|
||||
0x7d, 0xa1, 0x2d, 0x8c, 0x70, 0xe4, 0x57, 0xff, 0xbc, 0x08, 0x56, 0x9b, 0x87, 0x6d, 0x4c, 0x58,
|
||||
0x30, 0xa4, 0x0e, 0x49, 0x7f, 0xaf, 0x07, 0xa0, 0x48, 0x15, 0x28, 0xf3, 0x94, 0xd2, 0xfe, 0x62,
|
||||
0x32, 0x4e, 0x18, 0xf0, 0x14, 0x54, 0x28, 0x61, 0x61, 0xe0, 0x33, 0xf2, 0xcc, 0xf3, 0x7b, 0xfa,
|
||||
0x82, 0x9c, 0xc0, 0xee, 0x7c, 0x13, 0x90, 0x8d, 0xaa, 0x61, 0x0b, 0x75, 0xab, 0x36, 0x19, 0x9b,
|
||||
0x15, 0x9c, 0xf1, 0xc3, 0x53, 0xee, 0x70, 0x1b, 0x14, 0x98, 0x13, 0x84, 0x44, 0xcf, 0xcb, 0xc6,
|
||||
0x8c, 0x38, 0x59, 0x47, 0x80, 0xd7, 0x63, 0xb3, 0x1a, 0x77, 0x28, 0x01, 0x1c, 0x91, 0xe1, 0x3e,
|
||||
0xa8, 0x31, 0xcf, 0x77, 0x87, 0xa7, 0x36, 0x8d, 0xcf, 0xf5, 0x45, 0x69, 0xa0, 0x2b, 0x83, 0x5a,
|
||||
0xe7, 0xc6, 0x39, 0x9e, 0x51, 0x40, 0x13, 0x14, 0x46, 0x84, 0x76, 0x99, 0x5e, 0xd8, 0xc8, 0x6f,
|
||||
0x96, 0x5a, 0x25, 0x51, 0xf7, 0x48, 0x00, 0x38, 0xc2, 0x21, 0x02, 0x80, 0xf5, 0x03, 0xca, 0x5f,
|
||||
0xda, 0x03, 0xc2, 0xf4, 0xbf, 0x24, 0xeb, 0x6f, 0xb1, 0xb4, 0x9d, 0x04, 0xc5, 0x19, 0x86, 0xe0,
|
||||
0x3b, 0x36, 0x27, 0x6e, 0x40, 0x3d, 0xc2, 0xf4, 0xa5, 0x94, 0xff, 0x38, 0x41, 0x71, 0x86, 0x01,
|
||||
0x29, 0xa8, 0xb0, 0x61, 0x37, 0x9e, 0x3c, 0xd3, 0x8b, 0x72, 0x23, 0xf6, 0xe6, 0xd9, 0x88, 0x4e,
|
||||
0xaa, 0x4b, 0xf7, 0x62, 0x55, 0x85, 0xaf, 0x64, 0x4e, 0x19, 0x9e, 0xaa, 0x51, 0xff, 0xba, 0x00,
|
||||
0xfe, 0xbd, 0x43, 0x0f, 0x77, 0x40, 0x39, 0xc3, 0x55, 0xbb, 0xb2, 0xa2, 0x4c, 0xcb, 0x19, 0x09,
|
||||
0xce, 0xf2, 0xfe, 0xf0, 0xc6, 0x30, 0x50, 0xb5, 0x1d, 0x87, 0x84, 0x9c, 0xf4, 0x5e, 0x9f, 0x85,
|
||||
0x84, 0xe9, 0x79, 0x39, 0xb5, 0xdf, 0x2d, 0xb7, 0xa6, 0xe2, 0x55, 0x9b, 0x59, 0x53, 0x3c, 0x5d,
|
||||
0x23, 0x5d, 0x95, 0xc5, 0xdb, 0x57, 0xa5, 0xfe, 0x53, 0x03, 0x2b, 0xb7, 0xdc, 0x40, 0xf0, 0x7f,
|
||||
0xb0, 0xa4, 0x6e, 0x1c, 0x35, 0xce, 0x7f, 0x54, 0xbd, 0x25, 0x45, 0xc5, 0xf1, 0x39, 0x3c, 0x06,
|
||||
0xa5, 0x74, 0x15, 0xa2, 0xcb, 0x61, 0x67, 0x9e, 0x55, 0x98, 0x79, 0xe1, 0x5b, 0xcb, 0xaa, 0x46,
|
||||
0x09, 0x27, 0x4b, 0x90, 0x5a, 0xc3, 0x03, 0x50, 0x3a, 0xa6, 0x84, 0xf5, 0x7d, 0xc2, 0x98, 0x7a,
|
||||
0xed, 0xfe, 0x8b, 0x05, 0x4f, 0xe2, 0x83, 0xeb, 0xb1, 0x09, 0x13, 0xc3, 0x04, 0xc5, 0xa9, 0xb2,
|
||||
0x75, 0x70, 0x7e, 0x65, 0xe4, 0x2e, 0xae, 0x8c, 0xdc, 0xe5, 0x95, 0x91, 0xfb, 0x38, 0x31, 0xb4,
|
||||
0xf3, 0x89, 0xa1, 0x5d, 0x4c, 0x0c, 0xed, 0x72, 0x62, 0x68, 0xdf, 0x27, 0x86, 0xf6, 0xe9, 0x87,
|
||||
0x91, 0x7b, 0x6b, 0xde, 0xf3, 0x0f, 0xfb, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xe4, 0x85, 0x3b, 0x06,
|
||||
0x83, 0x07, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *APIGroupDiscovery) Marshal() (dAtA []byte, err error) {
|
||||
|
||||
10
vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto
generated
vendored
10
vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto
generated
vendored
@@ -38,7 +38,7 @@ message APIGroupDiscovery {
|
||||
// name is allowed to be "" to represent the legacy, ungroupified resources.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// versions are the versions supported in this group. They are sorted in descending order of preference,
|
||||
// with the preferred version being the first entry.
|
||||
@@ -55,7 +55,7 @@ message APIGroupDiscoveryList {
|
||||
// ResourceVersion will not be set, because this does not have a replayable ordering among multiple apiservers.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// items is the list of groups for discovery. The groups are listed in priority order.
|
||||
repeated APIGroupDiscovery items = 2;
|
||||
@@ -72,7 +72,7 @@ message APIResourceDiscovery {
|
||||
// responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
|
||||
// APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior.
|
||||
// This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
|
||||
|
||||
// scope indicates the scope of a resource, either Cluster or Namespaced
|
||||
optional string scope = 3;
|
||||
@@ -112,7 +112,7 @@ message APISubresourceDiscovery {
|
||||
|
||||
// responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns.
|
||||
// Some subresources do not return normal resources, these will have null or empty return types.
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2;
|
||||
|
||||
// acceptedTypes describes the kinds that this endpoint accepts.
|
||||
// Subresources may accept the standard content types or define
|
||||
@@ -122,7 +122,7 @@ message APISubresourceDiscovery {
|
||||
// +listMapKey=group
|
||||
// +listMapKey=version
|
||||
// +listMapKey=kind
|
||||
repeated k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3;
|
||||
repeated .k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind acceptedTypes = 3;
|
||||
|
||||
// verbs is a list of supported API operation types (this includes
|
||||
// but is not limited to get, list, watch, create, update, patch,
|
||||
|
||||
119
vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go
generated
vendored
119
vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.pb.go
generated
vendored
@@ -15,7 +15,7 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
|
||||
// source: k8s.io/api/apiserverinternal/v1alpha1/generated.proto
|
||||
|
||||
package v1alpha1
|
||||
|
||||
@@ -46,7 +46,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
func (m *ServerStorageVersion) Reset() { *m = ServerStorageVersion{} }
|
||||
func (*ServerStorageVersion) ProtoMessage() {}
|
||||
func (*ServerStorageVersion) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a3903ff5e3cc7a03, []int{0}
|
||||
return fileDescriptor_126bcbf538b54729, []int{0}
|
||||
}
|
||||
func (m *ServerStorageVersion) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -74,7 +74,7 @@ var xxx_messageInfo_ServerStorageVersion proto.InternalMessageInfo
|
||||
func (m *StorageVersion) Reset() { *m = StorageVersion{} }
|
||||
func (*StorageVersion) ProtoMessage() {}
|
||||
func (*StorageVersion) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a3903ff5e3cc7a03, []int{1}
|
||||
return fileDescriptor_126bcbf538b54729, []int{1}
|
||||
}
|
||||
func (m *StorageVersion) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -102,7 +102,7 @@ var xxx_messageInfo_StorageVersion proto.InternalMessageInfo
|
||||
func (m *StorageVersionCondition) Reset() { *m = StorageVersionCondition{} }
|
||||
func (*StorageVersionCondition) ProtoMessage() {}
|
||||
func (*StorageVersionCondition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a3903ff5e3cc7a03, []int{2}
|
||||
return fileDescriptor_126bcbf538b54729, []int{2}
|
||||
}
|
||||
func (m *StorageVersionCondition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -130,7 +130,7 @@ var xxx_messageInfo_StorageVersionCondition proto.InternalMessageInfo
|
||||
func (m *StorageVersionList) Reset() { *m = StorageVersionList{} }
|
||||
func (*StorageVersionList) ProtoMessage() {}
|
||||
func (*StorageVersionList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a3903ff5e3cc7a03, []int{3}
|
||||
return fileDescriptor_126bcbf538b54729, []int{3}
|
||||
}
|
||||
func (m *StorageVersionList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -158,7 +158,7 @@ var xxx_messageInfo_StorageVersionList proto.InternalMessageInfo
|
||||
func (m *StorageVersionSpec) Reset() { *m = StorageVersionSpec{} }
|
||||
func (*StorageVersionSpec) ProtoMessage() {}
|
||||
func (*StorageVersionSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a3903ff5e3cc7a03, []int{4}
|
||||
return fileDescriptor_126bcbf538b54729, []int{4}
|
||||
}
|
||||
func (m *StorageVersionSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -186,7 +186,7 @@ var xxx_messageInfo_StorageVersionSpec proto.InternalMessageInfo
|
||||
func (m *StorageVersionStatus) Reset() { *m = StorageVersionStatus{} }
|
||||
func (*StorageVersionStatus) ProtoMessage() {}
|
||||
func (*StorageVersionStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a3903ff5e3cc7a03, []int{5}
|
||||
return fileDescriptor_126bcbf538b54729, []int{5}
|
||||
}
|
||||
func (m *StorageVersionStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -221,61 +221,60 @@ func init() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_a3903ff5e3cc7a03)
|
||||
proto.RegisterFile("k8s.io/api/apiserverinternal/v1alpha1/generated.proto", fileDescriptor_126bcbf538b54729)
|
||||
}
|
||||
|
||||
var fileDescriptor_a3903ff5e3cc7a03 = []byte{
|
||||
// 790 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x4f, 0xdb, 0x48,
|
||||
0x14, 0x8e, 0x49, 0x08, 0x30, 0xd9, 0x4d, 0x96, 0x59, 0x10, 0xd9, 0xac, 0xe4, 0xb0, 0x91, 0x58,
|
||||
0xb1, 0xbb, 0x5a, 0x7b, 0x89, 0x96, 0xaa, 0xb4, 0x52, 0x2b, 0x0c, 0xa8, 0xa2, 0x85, 0x52, 0x4d,
|
||||
0x50, 0x0f, 0xb4, 0x87, 0x4e, 0xec, 0xa9, 0xe3, 0x26, 0xf6, 0x58, 0x9e, 0x49, 0x24, 0x2e, 0x55,
|
||||
0x7f, 0x42, 0xfb, 0x3f, 0x7a, 0xec, 0x8f, 0xe0, 0x54, 0x71, 0x44, 0xaa, 0x14, 0x15, 0xf7, 0x5f,
|
||||
0x70, 0xaa, 0x66, 0xec, 0x38, 0x38, 0x09, 0x6a, 0xc4, 0x21, 0x52, 0xe6, 0xbd, 0xf7, 0x7d, 0xef,
|
||||
0xcd, 0x37, 0xdf, 0x8c, 0xc1, 0xd3, 0xf6, 0x5d, 0xa6, 0x39, 0x54, 0x6f, 0x77, 0x9b, 0x24, 0xf0,
|
||||
0x08, 0x27, 0x4c, 0xef, 0x11, 0xcf, 0xa2, 0x81, 0x1e, 0x27, 0xb0, 0xef, 0x88, 0x1f, 0x23, 0x41,
|
||||
0x8f, 0x04, 0x8e, 0xc7, 0x49, 0xe0, 0xe1, 0x8e, 0xde, 0xdb, 0xc0, 0x1d, 0xbf, 0x85, 0x37, 0x74,
|
||||
0x9b, 0x78, 0x24, 0xc0, 0x9c, 0x58, 0x9a, 0x1f, 0x50, 0x4e, 0xe1, 0x5a, 0x04, 0xd3, 0xb0, 0xef,
|
||||
0x68, 0x63, 0x30, 0x6d, 0x00, 0xab, 0xfc, 0x6b, 0x3b, 0xbc, 0xd5, 0x6d, 0x6a, 0x26, 0x75, 0x75,
|
||||
0x9b, 0xda, 0x54, 0x97, 0xe8, 0x66, 0xf7, 0xb5, 0x5c, 0xc9, 0x85, 0xfc, 0x17, 0xb1, 0x56, 0xfe,
|
||||
0x1f, 0x0e, 0xe3, 0x62, 0xb3, 0xe5, 0x78, 0x24, 0x38, 0xd5, 0xfd, 0xb6, 0x2d, 0x27, 0xd3, 0x5d,
|
||||
0xc2, 0xb1, 0xde, 0x1b, 0x9b, 0xa5, 0xa2, 0xdf, 0x84, 0x0a, 0xba, 0x1e, 0x77, 0x5c, 0x32, 0x06,
|
||||
0xb8, 0xf3, 0x23, 0x00, 0x33, 0x5b, 0xc4, 0xc5, 0xa3, 0xb8, 0xda, 0x87, 0x19, 0xb0, 0xd4, 0x90,
|
||||
0x3b, 0x6d, 0x70, 0x1a, 0x60, 0x9b, 0x3c, 0x27, 0x01, 0x73, 0xa8, 0x07, 0x37, 0x41, 0x01, 0xfb,
|
||||
0x4e, 0x94, 0xda, 0xdf, 0x2d, 0x2b, 0xab, 0xca, 0xfa, 0x82, 0xf1, 0xeb, 0x59, 0xbf, 0x9a, 0x09,
|
||||
0xfb, 0xd5, 0xc2, 0xf6, 0xb3, 0xfd, 0x41, 0x0a, 0x5d, 0xaf, 0x83, 0xdb, 0xa0, 0x44, 0x3c, 0x93,
|
||||
0x5a, 0x8e, 0x67, 0xc7, 0x4c, 0xe5, 0x19, 0x09, 0x5d, 0x89, 0xa1, 0xa5, 0xbd, 0x74, 0x1a, 0x8d,
|
||||
0xd6, 0xc3, 0x1d, 0xb0, 0x68, 0x11, 0x93, 0x5a, 0xb8, 0xd9, 0x19, 0x4c, 0xc3, 0xca, 0xd9, 0xd5,
|
||||
0xec, 0xfa, 0x82, 0xb1, 0x1c, 0xf6, 0xab, 0x8b, 0xbb, 0xa3, 0x49, 0x34, 0x5e, 0x0f, 0xef, 0x81,
|
||||
0xa2, 0x3c, 0x40, 0x2b, 0x61, 0xc8, 0x49, 0x06, 0x18, 0xf6, 0xab, 0xc5, 0x46, 0x2a, 0x83, 0x46,
|
||||
0x2a, 0x6b, 0x9f, 0x66, 0x40, 0x71, 0x44, 0x8d, 0x57, 0x60, 0x5e, 0x1c, 0x95, 0x85, 0x39, 0x96,
|
||||
0x52, 0x14, 0xea, 0xff, 0x69, 0x43, 0xbb, 0x24, 0x8a, 0x6b, 0x7e, 0xdb, 0x96, 0xde, 0xd1, 0x44,
|
||||
0xb5, 0xd6, 0xdb, 0xd0, 0x8e, 0x9a, 0x6f, 0x88, 0xc9, 0x0f, 0x09, 0xc7, 0x06, 0x8c, 0x15, 0x00,
|
||||
0xc3, 0x18, 0x4a, 0x58, 0xe1, 0x0b, 0x90, 0x63, 0x3e, 0x31, 0xa5, 0x5a, 0x85, 0xfa, 0x96, 0x36,
|
||||
0x95, 0x19, 0xb5, 0xf4, 0x98, 0x0d, 0x9f, 0x98, 0xc6, 0x4f, 0x71, 0x9b, 0x9c, 0x58, 0x21, 0x49,
|
||||
0x0a, 0x4d, 0x90, 0x67, 0x1c, 0xf3, 0xae, 0xd0, 0x51, 0xd0, 0xdf, 0xbf, 0x1d, 0xbd, 0xa4, 0x30,
|
||||
0x8a, 0x71, 0x83, 0x7c, 0xb4, 0x46, 0x31, 0x75, 0xed, 0x63, 0x16, 0xac, 0xa4, 0x01, 0x3b, 0xd4,
|
||||
0xb3, 0x1c, 0x2e, 0xf4, 0x7b, 0x08, 0x72, 0xfc, 0xd4, 0x27, 0xb1, 0x8d, 0xfe, 0x19, 0x8c, 0x78,
|
||||
0x7c, 0xea, 0x93, 0xab, 0x7e, 0xf5, 0xf7, 0x1b, 0x60, 0x22, 0x8d, 0x24, 0x10, 0x6e, 0x25, 0x3b,
|
||||
0x88, 0xec, 0xf4, 0x47, 0x7a, 0x88, 0xab, 0x7e, 0xb5, 0x94, 0xc0, 0xd2, 0x73, 0xc1, 0xc7, 0x00,
|
||||
0xd2, 0x66, 0x74, 0xc4, 0x8f, 0x22, 0xf7, 0x0b, 0x57, 0x0a, 0x21, 0xb2, 0x46, 0x25, 0xa6, 0x81,
|
||||
0x47, 0x63, 0x15, 0x68, 0x02, 0x0a, 0xf6, 0x00, 0xec, 0x60, 0xc6, 0x8f, 0x03, 0xec, 0xb1, 0x68,
|
||||
0x44, 0xc7, 0x25, 0xe5, 0x9c, 0x14, 0xf5, 0xef, 0xe9, 0x1c, 0x21, 0x10, 0xc3, 0xbe, 0x07, 0x63,
|
||||
0x6c, 0x68, 0x42, 0x07, 0xf8, 0x27, 0xc8, 0x07, 0x04, 0x33, 0xea, 0x95, 0x67, 0xe5, 0xf6, 0x93,
|
||||
0x33, 0x40, 0x32, 0x8a, 0xe2, 0x2c, 0xfc, 0x0b, 0xcc, 0xb9, 0x84, 0x31, 0x6c, 0x93, 0x72, 0x5e,
|
||||
0x16, 0x96, 0xe2, 0xc2, 0xb9, 0xc3, 0x28, 0x8c, 0x06, 0xf9, 0xda, 0x67, 0x05, 0xc0, 0xb4, 0xee,
|
||||
0x07, 0x0e, 0xe3, 0xf0, 0xe5, 0x98, 0xd3, 0xb5, 0xe9, 0xf6, 0x25, 0xd0, 0xd2, 0xe7, 0xbf, 0xc4,
|
||||
0x2d, 0xe7, 0x07, 0x91, 0x6b, 0x2e, 0x3f, 0x01, 0xb3, 0x0e, 0x27, 0xae, 0x38, 0xc5, 0xec, 0x7a,
|
||||
0xa1, 0xbe, 0x79, 0x2b, 0x1f, 0x1a, 0x3f, 0xc7, 0x1d, 0x66, 0xf7, 0x05, 0x17, 0x8a, 0x28, 0x6b,
|
||||
0x4b, 0xa3, 0xfb, 0x11, 0x17, 0xa0, 0xf6, 0x45, 0x3c, 0x70, 0x13, 0x6c, 0x0c, 0xdf, 0x82, 0x12,
|
||||
0x4b, 0xc5, 0x59, 0x59, 0x91, 0x43, 0x4d, 0x7d, 0x39, 0x26, 0x3c, 0x9b, 0xc3, 0x67, 0x2e, 0x1d,
|
||||
0x67, 0x68, 0xb4, 0x19, 0x3c, 0x02, 0xcb, 0x26, 0x75, 0x5d, 0xea, 0xed, 0x4d, 0x7c, 0x2f, 0x7f,
|
||||
0x0b, 0xfb, 0xd5, 0xe5, 0x9d, 0x49, 0x05, 0x68, 0x32, 0x0e, 0x06, 0x00, 0x98, 0x83, 0x2b, 0x10,
|
||||
0x3d, 0x98, 0x85, 0xfa, 0x83, 0x5b, 0x09, 0x9c, 0xdc, 0xa4, 0xe1, 0x9b, 0x95, 0x84, 0x18, 0xba,
|
||||
0xd6, 0xc5, 0x78, 0x72, 0x76, 0xa9, 0x66, 0xce, 0x2f, 0xd5, 0xcc, 0xc5, 0xa5, 0x9a, 0x79, 0x17,
|
||||
0xaa, 0xca, 0x59, 0xa8, 0x2a, 0xe7, 0xa1, 0xaa, 0x5c, 0x84, 0xaa, 0xf2, 0x35, 0x54, 0x95, 0xf7,
|
||||
0xdf, 0xd4, 0xcc, 0xc9, 0xda, 0x54, 0x1f, 0xe4, 0xef, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x3a,
|
||||
0x2e, 0x07, 0xd1, 0x07, 0x00, 0x00,
|
||||
var fileDescriptor_126bcbf538b54729 = []byte{
|
||||
// 770 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x41, 0x4f, 0x13, 0x41,
|
||||
0x14, 0xee, 0xd2, 0x52, 0x60, 0xaa, 0xad, 0x8c, 0x10, 0x6a, 0x4d, 0xb6, 0xd8, 0x04, 0x83, 0x1a,
|
||||
0x77, 0xa5, 0x11, 0x23, 0x9a, 0x68, 0x58, 0x20, 0x06, 0x85, 0x60, 0xa6, 0xc4, 0x03, 0x7a, 0x70,
|
||||
0xba, 0x1d, 0xb7, 0x2b, 0xdd, 0x9d, 0xcd, 0xce, 0xb4, 0x09, 0x17, 0xe3, 0x4f, 0xd0, 0xff, 0xe1,
|
||||
0xd1, 0x1f, 0xc1, 0xc9, 0x70, 0x24, 0x31, 0x69, 0x64, 0xfd, 0x17, 0x9c, 0xcc, 0xcc, 0x6e, 0xb7,
|
||||
0x6c, 0x5b, 0x62, 0xc3, 0xa1, 0x49, 0xe7, 0xbd, 0xf7, 0x7d, 0xef, 0xcd, 0x37, 0xdf, 0xcc, 0x82,
|
||||
0xd5, 0xc3, 0xa7, 0x4c, 0xb3, 0xa9, 0x8e, 0x3d, 0x5b, 0xfc, 0x18, 0xf1, 0x3b, 0xc4, 0xb7, 0x5d,
|
||||
0x4e, 0x7c, 0x17, 0xb7, 0xf4, 0xce, 0x0a, 0x6e, 0x79, 0x4d, 0xbc, 0xa2, 0x5b, 0xc4, 0x25, 0x3e,
|
||||
0xe6, 0xa4, 0xa1, 0x79, 0x3e, 0xe5, 0x14, 0x2e, 0x85, 0x30, 0x0d, 0x7b, 0xb6, 0x36, 0x04, 0xd3,
|
||||
0x7a, 0xb0, 0xd2, 0x43, 0xcb, 0xe6, 0xcd, 0x76, 0x5d, 0x33, 0xa9, 0xa3, 0x5b, 0xd4, 0xa2, 0xba,
|
||||
0x44, 0xd7, 0xdb, 0x9f, 0xe4, 0x4a, 0x2e, 0xe4, 0xbf, 0x90, 0xb5, 0xf4, 0xb8, 0x3f, 0x8c, 0x83,
|
||||
0xcd, 0xa6, 0xed, 0x12, 0xff, 0x48, 0xf7, 0x0e, 0x2d, 0x39, 0x99, 0xee, 0x10, 0x8e, 0xf5, 0xce,
|
||||
0xd0, 0x2c, 0x25, 0xfd, 0x32, 0x94, 0xdf, 0x76, 0xb9, 0xed, 0x90, 0x21, 0xc0, 0x93, 0xff, 0x01,
|
||||
0x98, 0xd9, 0x24, 0x0e, 0x1e, 0xc4, 0x55, 0xbe, 0x4f, 0x80, 0xb9, 0x9a, 0xdc, 0x69, 0x8d, 0x53,
|
||||
0x1f, 0x5b, 0xe4, 0x1d, 0xf1, 0x99, 0x4d, 0x5d, 0xb8, 0x0a, 0x72, 0xd8, 0xb3, 0xc3, 0xd4, 0xf6,
|
||||
0x66, 0x51, 0x59, 0x54, 0x96, 0x67, 0x8c, 0x9b, 0xc7, 0xdd, 0x72, 0x2a, 0xe8, 0x96, 0x73, 0xeb,
|
||||
0x6f, 0xb7, 0x7b, 0x29, 0x74, 0xb1, 0x0e, 0xae, 0x83, 0x02, 0x71, 0x4d, 0xda, 0xb0, 0x5d, 0x2b,
|
||||
0x62, 0x2a, 0x4e, 0x48, 0xe8, 0x42, 0x04, 0x2d, 0x6c, 0x25, 0xd3, 0x68, 0xb0, 0x1e, 0x6e, 0x80,
|
||||
0xd9, 0x06, 0x31, 0x69, 0x03, 0xd7, 0x5b, 0xbd, 0x69, 0x58, 0x31, 0xbd, 0x98, 0x5e, 0x9e, 0x31,
|
||||
0xe6, 0x83, 0x6e, 0x79, 0x76, 0x73, 0x30, 0x89, 0x86, 0xeb, 0xe1, 0x33, 0x90, 0x97, 0x07, 0xd8,
|
||||
0x88, 0x19, 0x32, 0x92, 0x01, 0x06, 0xdd, 0x72, 0xbe, 0x96, 0xc8, 0xa0, 0x81, 0xca, 0xca, 0xcf,
|
||||
0x09, 0x90, 0x1f, 0x50, 0xe3, 0x23, 0x98, 0x16, 0x47, 0xd5, 0xc0, 0x1c, 0x4b, 0x29, 0x72, 0xd5,
|
||||
0x47, 0x5a, 0xdf, 0x2e, 0xb1, 0xe2, 0x9a, 0x77, 0x68, 0x49, 0xef, 0x68, 0xa2, 0x5a, 0xeb, 0xac,
|
||||
0x68, 0x7b, 0xf5, 0xcf, 0xc4, 0xe4, 0xbb, 0x84, 0x63, 0x03, 0x46, 0x0a, 0x80, 0x7e, 0x0c, 0xc5,
|
||||
0xac, 0xf0, 0x3d, 0xc8, 0x30, 0x8f, 0x98, 0x52, 0xad, 0x5c, 0x75, 0x4d, 0x1b, 0xcb, 0x8c, 0x5a,
|
||||
0x72, 0xcc, 0x9a, 0x47, 0x4c, 0xe3, 0x5a, 0xd4, 0x26, 0x23, 0x56, 0x48, 0x92, 0x42, 0x13, 0x64,
|
||||
0x19, 0xc7, 0xbc, 0x2d, 0x74, 0x14, 0xf4, 0xcf, 0xaf, 0x46, 0x2f, 0x29, 0x8c, 0x7c, 0xd4, 0x20,
|
||||
0x1b, 0xae, 0x51, 0x44, 0x5d, 0xf9, 0x91, 0x06, 0x0b, 0x49, 0xc0, 0x06, 0x75, 0x1b, 0x36, 0x17,
|
||||
0xfa, 0xbd, 0x04, 0x19, 0x7e, 0xe4, 0x91, 0xc8, 0x46, 0x0f, 0x7a, 0x23, 0xee, 0x1f, 0x79, 0xe4,
|
||||
0xbc, 0x5b, 0xbe, 0x7d, 0x09, 0x4c, 0xa4, 0x91, 0x04, 0xc2, 0xb5, 0x78, 0x07, 0xa1, 0x9d, 0xee,
|
||||
0x24, 0x87, 0x38, 0xef, 0x96, 0x0b, 0x31, 0x2c, 0x39, 0x17, 0x7c, 0x0d, 0x20, 0xad, 0x87, 0x47,
|
||||
0xfc, 0x2a, 0x74, 0xbf, 0x70, 0xa5, 0x10, 0x22, 0x6d, 0x94, 0x22, 0x1a, 0xb8, 0x37, 0x54, 0x81,
|
||||
0x46, 0xa0, 0x60, 0x07, 0xc0, 0x16, 0x66, 0x7c, 0xdf, 0xc7, 0x2e, 0x0b, 0x47, 0xb4, 0x1d, 0x52,
|
||||
0xcc, 0x48, 0x51, 0xef, 0x8f, 0xe7, 0x08, 0x81, 0xe8, 0xf7, 0xdd, 0x19, 0x62, 0x43, 0x23, 0x3a,
|
||||
0xc0, 0xbb, 0x20, 0xeb, 0x13, 0xcc, 0xa8, 0x5b, 0x9c, 0x94, 0xdb, 0x8f, 0xcf, 0x00, 0xc9, 0x28,
|
||||
0x8a, 0xb2, 0xf0, 0x1e, 0x98, 0x72, 0x08, 0x63, 0xd8, 0x22, 0xc5, 0xac, 0x2c, 0x2c, 0x44, 0x85,
|
||||
0x53, 0xbb, 0x61, 0x18, 0xf5, 0xf2, 0x95, 0x5f, 0x0a, 0x80, 0x49, 0xdd, 0x77, 0x6c, 0xc6, 0xe1,
|
||||
0x87, 0x21, 0xa7, 0x6b, 0xe3, 0xed, 0x4b, 0xa0, 0xa5, 0xcf, 0x6f, 0x44, 0x2d, 0xa7, 0x7b, 0x91,
|
||||
0x0b, 0x2e, 0x3f, 0x00, 0x93, 0x36, 0x27, 0x8e, 0x38, 0xc5, 0xf4, 0x72, 0xae, 0xba, 0x7a, 0x25,
|
||||
0x1f, 0x1a, 0xd7, 0xa3, 0x0e, 0x93, 0xdb, 0x82, 0x0b, 0x85, 0x94, 0x95, 0xb9, 0xc1, 0xfd, 0x88,
|
||||
0x0b, 0x50, 0xf9, 0x2d, 0x1e, 0xb8, 0x11, 0x36, 0x86, 0x5f, 0x40, 0x81, 0x25, 0xe2, 0xac, 0xa8,
|
||||
0xc8, 0xa1, 0xc6, 0xbe, 0x1c, 0x23, 0x9e, 0xcd, 0xfe, 0x33, 0x97, 0x8c, 0x33, 0x34, 0xd8, 0x0c,
|
||||
0xee, 0x81, 0x79, 0x93, 0x3a, 0x0e, 0x75, 0xb7, 0x46, 0xbe, 0x97, 0xb7, 0x82, 0x6e, 0x79, 0x7e,
|
||||
0x63, 0x54, 0x01, 0x1a, 0x8d, 0x83, 0x3e, 0x00, 0x66, 0xef, 0x0a, 0x84, 0x0f, 0x66, 0xae, 0xfa,
|
||||
0xe2, 0x4a, 0x02, 0xc7, 0x37, 0xa9, 0xff, 0x66, 0xc5, 0x21, 0x86, 0x2e, 0x74, 0x31, 0xde, 0x1c,
|
||||
0x9f, 0xa9, 0xa9, 0x93, 0x33, 0x35, 0x75, 0x7a, 0xa6, 0xa6, 0xbe, 0x06, 0xaa, 0x72, 0x1c, 0xa8,
|
||||
0xca, 0x49, 0xa0, 0x2a, 0xa7, 0x81, 0xaa, 0xfc, 0x09, 0x54, 0xe5, 0xdb, 0x5f, 0x35, 0x75, 0xb0,
|
||||
0x34, 0xd6, 0x07, 0xf9, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x79, 0x04, 0x7d, 0x78, 0xb8, 0x07,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *ServerStorageVersion) Marshal() (dAtA []byte, err error) {
|
||||
|
||||
7
vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
generated
vendored
7
vendor/k8s.io/api/apiserverinternal/v1alpha1/generated.proto
generated
vendored
@@ -52,7 +52,7 @@ message ServerStorageVersion {
|
||||
// Storage version of a specific resource.
|
||||
message StorageVersion {
|
||||
// The name is <group>.<resource>.
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Spec is an empty spec. It is here to comply with Kubernetes API style.
|
||||
optional StorageVersionSpec spec = 2;
|
||||
@@ -77,8 +77,7 @@ message StorageVersionCondition {
|
||||
optional int64 observedGeneration = 3;
|
||||
|
||||
// Last time the condition transitioned from one status to another.
|
||||
// +required
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4;
|
||||
|
||||
// The reason for the condition's last transition.
|
||||
// +required
|
||||
@@ -94,7 +93,7 @@ message StorageVersionList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// Items holds a list of StorageVersion
|
||||
repeated StorageVersion items = 2;
|
||||
|
||||
1
vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go
generated
vendored
1
vendor/k8s.io/api/apiserverinternal/v1alpha1/types.go
generated
vendored
@@ -111,7 +111,6 @@ type StorageVersionCondition struct {
|
||||
// +optional
|
||||
ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"`
|
||||
// Last time the condition transitioned from one status to another.
|
||||
// +required
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"`
|
||||
// The reason for the condition's last transition.
|
||||
// +required
|
||||
|
||||
1
vendor/k8s.io/api/apps/v1/doc.go
generated
vendored
1
vendor/k8s.io/api/apps/v1/doc.go
generated
vendored
@@ -17,5 +17,6 @@ limitations under the License.
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:prerelease-lifecycle-gen=true
|
||||
|
||||
package v1 // import "k8s.io/api/apps/v1"
|
||||
|
||||
341
vendor/k8s.io/api/apps/v1/generated.pb.go
generated
vendored
341
vendor/k8s.io/api/apps/v1/generated.pb.go
generated
vendored
@@ -15,7 +15,7 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto
|
||||
// source: k8s.io/api/apps/v1/generated.proto
|
||||
|
||||
package v1
|
||||
|
||||
@@ -51,7 +51,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
func (m *ControllerRevision) Reset() { *m = ControllerRevision{} }
|
||||
func (*ControllerRevision) ProtoMessage() {}
|
||||
func (*ControllerRevision) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{0}
|
||||
return fileDescriptor_5b781835628d5338, []int{0}
|
||||
}
|
||||
func (m *ControllerRevision) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -79,7 +79,7 @@ var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo
|
||||
func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} }
|
||||
func (*ControllerRevisionList) ProtoMessage() {}
|
||||
func (*ControllerRevisionList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{1}
|
||||
return fileDescriptor_5b781835628d5338, []int{1}
|
||||
}
|
||||
func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -107,7 +107,7 @@ var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo
|
||||
func (m *DaemonSet) Reset() { *m = DaemonSet{} }
|
||||
func (*DaemonSet) ProtoMessage() {}
|
||||
func (*DaemonSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{2}
|
||||
return fileDescriptor_5b781835628d5338, []int{2}
|
||||
}
|
||||
func (m *DaemonSet) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -135,7 +135,7 @@ var xxx_messageInfo_DaemonSet proto.InternalMessageInfo
|
||||
func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} }
|
||||
func (*DaemonSetCondition) ProtoMessage() {}
|
||||
func (*DaemonSetCondition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{3}
|
||||
return fileDescriptor_5b781835628d5338, []int{3}
|
||||
}
|
||||
func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -163,7 +163,7 @@ var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo
|
||||
func (m *DaemonSetList) Reset() { *m = DaemonSetList{} }
|
||||
func (*DaemonSetList) ProtoMessage() {}
|
||||
func (*DaemonSetList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{4}
|
||||
return fileDescriptor_5b781835628d5338, []int{4}
|
||||
}
|
||||
func (m *DaemonSetList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -191,7 +191,7 @@ var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo
|
||||
func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} }
|
||||
func (*DaemonSetSpec) ProtoMessage() {}
|
||||
func (*DaemonSetSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{5}
|
||||
return fileDescriptor_5b781835628d5338, []int{5}
|
||||
}
|
||||
func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -219,7 +219,7 @@ var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo
|
||||
func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} }
|
||||
func (*DaemonSetStatus) ProtoMessage() {}
|
||||
func (*DaemonSetStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{6}
|
||||
return fileDescriptor_5b781835628d5338, []int{6}
|
||||
}
|
||||
func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -247,7 +247,7 @@ var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo
|
||||
func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} }
|
||||
func (*DaemonSetUpdateStrategy) ProtoMessage() {}
|
||||
func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{7}
|
||||
return fileDescriptor_5b781835628d5338, []int{7}
|
||||
}
|
||||
func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -275,7 +275,7 @@ var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo
|
||||
func (m *Deployment) Reset() { *m = Deployment{} }
|
||||
func (*Deployment) ProtoMessage() {}
|
||||
func (*Deployment) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{8}
|
||||
return fileDescriptor_5b781835628d5338, []int{8}
|
||||
}
|
||||
func (m *Deployment) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -303,7 +303,7 @@ var xxx_messageInfo_Deployment proto.InternalMessageInfo
|
||||
func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} }
|
||||
func (*DeploymentCondition) ProtoMessage() {}
|
||||
func (*DeploymentCondition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{9}
|
||||
return fileDescriptor_5b781835628d5338, []int{9}
|
||||
}
|
||||
func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -331,7 +331,7 @@ var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo
|
||||
func (m *DeploymentList) Reset() { *m = DeploymentList{} }
|
||||
func (*DeploymentList) ProtoMessage() {}
|
||||
func (*DeploymentList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{10}
|
||||
return fileDescriptor_5b781835628d5338, []int{10}
|
||||
}
|
||||
func (m *DeploymentList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -359,7 +359,7 @@ var xxx_messageInfo_DeploymentList proto.InternalMessageInfo
|
||||
func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} }
|
||||
func (*DeploymentSpec) ProtoMessage() {}
|
||||
func (*DeploymentSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{11}
|
||||
return fileDescriptor_5b781835628d5338, []int{11}
|
||||
}
|
||||
func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -387,7 +387,7 @@ var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo
|
||||
func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} }
|
||||
func (*DeploymentStatus) ProtoMessage() {}
|
||||
func (*DeploymentStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{12}
|
||||
return fileDescriptor_5b781835628d5338, []int{12}
|
||||
}
|
||||
func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -415,7 +415,7 @@ var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo
|
||||
func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} }
|
||||
func (*DeploymentStrategy) ProtoMessage() {}
|
||||
func (*DeploymentStrategy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{13}
|
||||
return fileDescriptor_5b781835628d5338, []int{13}
|
||||
}
|
||||
func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -443,7 +443,7 @@ var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo
|
||||
func (m *ReplicaSet) Reset() { *m = ReplicaSet{} }
|
||||
func (*ReplicaSet) ProtoMessage() {}
|
||||
func (*ReplicaSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{14}
|
||||
return fileDescriptor_5b781835628d5338, []int{14}
|
||||
}
|
||||
func (m *ReplicaSet) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -471,7 +471,7 @@ var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo
|
||||
func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} }
|
||||
func (*ReplicaSetCondition) ProtoMessage() {}
|
||||
func (*ReplicaSetCondition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{15}
|
||||
return fileDescriptor_5b781835628d5338, []int{15}
|
||||
}
|
||||
func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -499,7 +499,7 @@ var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo
|
||||
func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} }
|
||||
func (*ReplicaSetList) ProtoMessage() {}
|
||||
func (*ReplicaSetList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{16}
|
||||
return fileDescriptor_5b781835628d5338, []int{16}
|
||||
}
|
||||
func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -527,7 +527,7 @@ var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo
|
||||
func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} }
|
||||
func (*ReplicaSetSpec) ProtoMessage() {}
|
||||
func (*ReplicaSetSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{17}
|
||||
return fileDescriptor_5b781835628d5338, []int{17}
|
||||
}
|
||||
func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -555,7 +555,7 @@ var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo
|
||||
func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} }
|
||||
func (*ReplicaSetStatus) ProtoMessage() {}
|
||||
func (*ReplicaSetStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{18}
|
||||
return fileDescriptor_5b781835628d5338, []int{18}
|
||||
}
|
||||
func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -583,7 +583,7 @@ var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo
|
||||
func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} }
|
||||
func (*RollingUpdateDaemonSet) ProtoMessage() {}
|
||||
func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{19}
|
||||
return fileDescriptor_5b781835628d5338, []int{19}
|
||||
}
|
||||
func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -611,7 +611,7 @@ var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo
|
||||
func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} }
|
||||
func (*RollingUpdateDeployment) ProtoMessage() {}
|
||||
func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{20}
|
||||
return fileDescriptor_5b781835628d5338, []int{20}
|
||||
}
|
||||
func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -639,7 +639,7 @@ var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo
|
||||
func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} }
|
||||
func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {}
|
||||
func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{21}
|
||||
return fileDescriptor_5b781835628d5338, []int{21}
|
||||
}
|
||||
func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -667,7 +667,7 @@ var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo
|
||||
func (m *StatefulSet) Reset() { *m = StatefulSet{} }
|
||||
func (*StatefulSet) ProtoMessage() {}
|
||||
func (*StatefulSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{22}
|
||||
return fileDescriptor_5b781835628d5338, []int{22}
|
||||
}
|
||||
func (m *StatefulSet) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -695,7 +695,7 @@ var xxx_messageInfo_StatefulSet proto.InternalMessageInfo
|
||||
func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} }
|
||||
func (*StatefulSetCondition) ProtoMessage() {}
|
||||
func (*StatefulSetCondition) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{23}
|
||||
return fileDescriptor_5b781835628d5338, []int{23}
|
||||
}
|
||||
func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -723,7 +723,7 @@ var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo
|
||||
func (m *StatefulSetList) Reset() { *m = StatefulSetList{} }
|
||||
func (*StatefulSetList) ProtoMessage() {}
|
||||
func (*StatefulSetList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{24}
|
||||
return fileDescriptor_5b781835628d5338, []int{24}
|
||||
}
|
||||
func (m *StatefulSetList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -751,7 +751,7 @@ var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo
|
||||
func (m *StatefulSetOrdinals) Reset() { *m = StatefulSetOrdinals{} }
|
||||
func (*StatefulSetOrdinals) ProtoMessage() {}
|
||||
func (*StatefulSetOrdinals) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{25}
|
||||
return fileDescriptor_5b781835628d5338, []int{25}
|
||||
}
|
||||
func (m *StatefulSetOrdinals) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -781,7 +781,7 @@ func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) Reset() {
|
||||
}
|
||||
func (*StatefulSetPersistentVolumeClaimRetentionPolicy) ProtoMessage() {}
|
||||
func (*StatefulSetPersistentVolumeClaimRetentionPolicy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{26}
|
||||
return fileDescriptor_5b781835628d5338, []int{26}
|
||||
}
|
||||
func (m *StatefulSetPersistentVolumeClaimRetentionPolicy) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -809,7 +809,7 @@ var xxx_messageInfo_StatefulSetPersistentVolumeClaimRetentionPolicy proto.Intern
|
||||
func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} }
|
||||
func (*StatefulSetSpec) ProtoMessage() {}
|
||||
func (*StatefulSetSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{27}
|
||||
return fileDescriptor_5b781835628d5338, []int{27}
|
||||
}
|
||||
func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -837,7 +837,7 @@ var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo
|
||||
func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} }
|
||||
func (*StatefulSetStatus) ProtoMessage() {}
|
||||
func (*StatefulSetStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{28}
|
||||
return fileDescriptor_5b781835628d5338, []int{28}
|
||||
}
|
||||
func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -865,7 +865,7 @@ var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo
|
||||
func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} }
|
||||
func (*StatefulSetUpdateStrategy) ProtoMessage() {}
|
||||
func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_e1014cab6f31e43b, []int{29}
|
||||
return fileDescriptor_5b781835628d5338, []int{29}
|
||||
}
|
||||
func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
@@ -924,150 +924,149 @@ func init() {
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b)
|
||||
proto.RegisterFile("k8s.io/api/apps/v1/generated.proto", fileDescriptor_5b781835628d5338)
|
||||
}
|
||||
|
||||
var fileDescriptor_e1014cab6f31e43b = []byte{
|
||||
// 2211 bytes of a gzipped FileDescriptorProto
|
||||
var fileDescriptor_5b781835628d5338 = []byte{
|
||||
// 2194 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1b, 0xc7,
|
||||
0x15, 0xd7, 0xf2, 0x43, 0xa2, 0x86, 0x96, 0x64, 0x8f, 0x54, 0x89, 0xb1, 0x1b, 0xd2, 0xdd, 0xb8,
|
||||
0xb6, 0x12, 0xc7, 0x64, 0xed, 0x38, 0x41, 0x60, 0x17, 0x09, 0x44, 0x2a, 0x4d, 0xd3, 0xe8, 0xab,
|
||||
0x43, 0xcb, 0x01, 0xdc, 0xb4, 0xe8, 0x88, 0x1c, 0x53, 0x1b, 0xed, 0x17, 0x76, 0x87, 0x8a, 0x89,
|
||||
0x5e, 0x8a, 0x02, 0xbd, 0xf5, 0xd0, 0xbf, 0xa1, 0xff, 0x40, 0x51, 0x14, 0xcd, 0x2d, 0x08, 0x82,
|
||||
0x5e, 0x7c, 0x29, 0x10, 0xf4, 0xd2, 0x9c, 0x88, 0x9a, 0x39, 0x15, 0x45, 0x6f, 0xed, 0xc5, 0x97,
|
||||
0x16, 0x33, 0x3b, 0xfb, 0x3d, 0x2b, 0x52, 0x72, 0xac, 0x34, 0x81, 0x6f, 0xdc, 0x99, 0xdf, 0xfb,
|
||||
0xed, 0x9b, 0x99, 0xf7, 0xe6, 0xfd, 0x66, 0x96, 0xe0, 0xf6, 0xc1, 0xeb, 0x6e, 0x5d, 0xb3, 0x1a,
|
||||
0x07, 0xfd, 0x3d, 0xe2, 0x98, 0x84, 0x12, 0xb7, 0x71, 0x48, 0xcc, 0xae, 0xe5, 0x34, 0x44, 0x07,
|
||||
0xb6, 0xb5, 0x06, 0xb6, 0x6d, 0xb7, 0x71, 0x78, 0xbd, 0xd1, 0x23, 0x26, 0x71, 0x30, 0x25, 0xdd,
|
||||
0xba, 0xed, 0x58, 0xd4, 0x82, 0xd0, 0xc3, 0xd4, 0xb1, 0xad, 0xd5, 0x19, 0xa6, 0x7e, 0x78, 0xfd,
|
||||
0xfc, 0xb5, 0x9e, 0x46, 0xf7, 0xfb, 0x7b, 0xf5, 0x8e, 0x65, 0x34, 0x7a, 0x56, 0xcf, 0x6a, 0x70,
|
||||
0xe8, 0x5e, 0xff, 0x3e, 0x7f, 0xe2, 0x0f, 0xfc, 0x97, 0x47, 0x71, 0x5e, 0x8d, 0xbc, 0xa6, 0x63,
|
||||
0x39, 0x44, 0xf2, 0x9a, 0xf3, 0x37, 0x43, 0x8c, 0x81, 0x3b, 0xfb, 0x9a, 0x49, 0x9c, 0x41, 0xc3,
|
||||
0x3e, 0xe8, 0xb1, 0x06, 0xb7, 0x61, 0x10, 0x8a, 0x65, 0x56, 0x8d, 0x2c, 0x2b, 0xa7, 0x6f, 0x52,
|
||||
0xcd, 0x20, 0x29, 0x83, 0xd7, 0xc6, 0x19, 0xb8, 0x9d, 0x7d, 0x62, 0xe0, 0x94, 0xdd, 0x2b, 0x59,
|
||||
0x76, 0x7d, 0xaa, 0xe9, 0x0d, 0xcd, 0xa4, 0x2e, 0x75, 0x92, 0x46, 0xea, 0x7f, 0x14, 0x00, 0x5b,
|
||||
0x96, 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0x83, 0xc8, 0xa1, 0xe6, 0x6a, 0x96, 0x09, 0x7f, 0x0e, 0x4a,
|
||||
0x6c, 0x3c, 0x5d, 0x4c, 0x71, 0x45, 0xb9, 0xa8, 0xac, 0x96, 0x6f, 0x7c, 0xaf, 0x1e, 0x4e, 0x72,
|
||||
0x40, 0x5f, 0xb7, 0x0f, 0x7a, 0xac, 0xc1, 0xad, 0x33, 0x74, 0xfd, 0xf0, 0x7a, 0x7d, 0x7b, 0xef,
|
||||
0x03, 0xd2, 0xa1, 0x9b, 0x84, 0xe2, 0x26, 0x7c, 0x38, 0xac, 0x4d, 0x8d, 0x86, 0x35, 0x10, 0xb6,
|
||||
0xa1, 0x80, 0x15, 0x6e, 0x83, 0x02, 0x67, 0xcf, 0x71, 0xf6, 0x6b, 0x99, 0xec, 0x62, 0xd0, 0x75,
|
||||
0x84, 0x3f, 0x7c, 0xeb, 0x01, 0x25, 0x26, 0x73, 0xaf, 0x79, 0x46, 0x50, 0x17, 0xd6, 0x31, 0xc5,
|
||||
0x88, 0x13, 0xc1, 0x97, 0x41, 0xc9, 0x11, 0xee, 0x57, 0xf2, 0x17, 0x95, 0xd5, 0x7c, 0xf3, 0xac,
|
||||
0x40, 0x95, 0xfc, 0x61, 0xa1, 0x00, 0xa1, 0xfe, 0x59, 0x01, 0xcb, 0xe9, 0x71, 0x6f, 0x68, 0x2e,
|
||||
0x85, 0xef, 0xa7, 0xc6, 0x5e, 0x9f, 0x6c, 0xec, 0xcc, 0x9a, 0x8f, 0x3c, 0x78, 0xb1, 0xdf, 0x12,
|
||||
0x19, 0xf7, 0xbb, 0xa0, 0xa8, 0x51, 0x62, 0xb8, 0x95, 0xdc, 0xc5, 0xfc, 0x6a, 0xf9, 0xc6, 0xe5,
|
||||
0x7a, 0x3a, 0x76, 0xeb, 0x69, 0xc7, 0x9a, 0x73, 0x82, 0xb2, 0xf8, 0x0e, 0x33, 0x46, 0x1e, 0x87,
|
||||
0xfa, 0x5f, 0x05, 0xcc, 0xae, 0x63, 0x62, 0x58, 0x66, 0x9b, 0xd0, 0x53, 0x58, 0xb4, 0x16, 0x28,
|
||||
0xb8, 0x36, 0xe9, 0x88, 0x45, 0xfb, 0x8e, 0xcc, 0xf7, 0xc0, 0x9d, 0xb6, 0x4d, 0x3a, 0xe1, 0x42,
|
||||
0xb1, 0x27, 0xc4, 0x8d, 0xe1, 0xbb, 0x60, 0xda, 0xa5, 0x98, 0xf6, 0x5d, 0xbe, 0x4c, 0xe5, 0x1b,
|
||||
0x2f, 0x1c, 0x4d, 0xc3, 0xa1, 0xcd, 0x79, 0x41, 0x34, 0xed, 0x3d, 0x23, 0x41, 0xa1, 0xfe, 0x23,
|
||||
0x07, 0x60, 0x80, 0x6d, 0x59, 0x66, 0x57, 0xa3, 0x2c, 0x7e, 0x6f, 0x81, 0x02, 0x1d, 0xd8, 0x84,
|
||||
0x4f, 0xc3, 0x6c, 0xf3, 0xb2, 0xef, 0xc5, 0x9d, 0x81, 0x4d, 0x1e, 0x0f, 0x6b, 0xcb, 0x69, 0x0b,
|
||||
0xd6, 0x83, 0xb8, 0x0d, 0xdc, 0x08, 0xfc, 0xcb, 0x71, 0xeb, 0x9b, 0xf1, 0x57, 0x3f, 0x1e, 0xd6,
|
||||
0x24, 0x9b, 0x45, 0x3d, 0x60, 0x8a, 0x3b, 0x08, 0x0f, 0x01, 0xd4, 0xb1, 0x4b, 0xef, 0x38, 0xd8,
|
||||
0x74, 0xbd, 0x37, 0x69, 0x06, 0x11, 0x23, 0x7f, 0x69, 0xb2, 0xe5, 0x61, 0x16, 0xcd, 0xf3, 0xc2,
|
||||
0x0b, 0xb8, 0x91, 0x62, 0x43, 0x92, 0x37, 0xc0, 0xcb, 0x60, 0xda, 0x21, 0xd8, 0xb5, 0xcc, 0x4a,
|
||||
0x81, 0x8f, 0x22, 0x98, 0x40, 0xc4, 0x5b, 0x91, 0xe8, 0x85, 0x2f, 0x82, 0x19, 0x83, 0xb8, 0x2e,
|
||||
0xee, 0x91, 0x4a, 0x91, 0x03, 0x17, 0x04, 0x70, 0x66, 0xd3, 0x6b, 0x46, 0x7e, 0xbf, 0xfa, 0x07,
|
||||
0x05, 0xcc, 0x05, 0x33, 0x77, 0x0a, 0xa9, 0xd2, 0x8c, 0xa7, 0xca, 0xf3, 0x47, 0xc6, 0x49, 0x46,
|
||||
0x86, 0x7c, 0x92, 0x8f, 0xf8, 0xcc, 0x82, 0x10, 0xfe, 0x14, 0x94, 0x5c, 0xa2, 0x93, 0x0e, 0xb5,
|
||||
0x1c, 0xe1, 0xf3, 0x2b, 0x13, 0xfa, 0x8c, 0xf7, 0x88, 0xde, 0x16, 0xa6, 0xcd, 0x33, 0xcc, 0x69,
|
||||
0xff, 0x09, 0x05, 0x94, 0xf0, 0xc7, 0xa0, 0x44, 0x89, 0x61, 0xeb, 0x98, 0x12, 0x91, 0x26, 0xb1,
|
||||
0xf8, 0x66, 0xe1, 0xc2, 0xc8, 0x76, 0xac, 0xee, 0x1d, 0x01, 0xe3, 0x89, 0x12, 0xcc, 0x83, 0xdf,
|
||||
0x8a, 0x02, 0x1a, 0x78, 0x00, 0xe6, 0xfb, 0x76, 0x97, 0x21, 0x29, 0xdb, 0xba, 0x7b, 0x03, 0x11,
|
||||
0x3e, 0x57, 0x8f, 0x9c, 0x90, 0xdd, 0x98, 0x49, 0x73, 0x59, 0xbc, 0x60, 0x3e, 0xde, 0x8e, 0x12,
|
||||
0xd4, 0x70, 0x0d, 0x2c, 0x18, 0x9a, 0x89, 0x08, 0xee, 0x0e, 0xda, 0xa4, 0x63, 0x99, 0x5d, 0x97,
|
||||
0x07, 0x50, 0xb1, 0xb9, 0x22, 0x08, 0x16, 0x36, 0xe3, 0xdd, 0x28, 0x89, 0x87, 0x1b, 0x60, 0xc9,
|
||||
0xdf, 0x67, 0x7f, 0xa8, 0xb9, 0xd4, 0x72, 0x06, 0x1b, 0x9a, 0xa1, 0xd1, 0xca, 0x34, 0xe7, 0xa9,
|
||||
0x8c, 0x86, 0xb5, 0x25, 0x24, 0xe9, 0x47, 0x52, 0x2b, 0xf5, 0x37, 0xd3, 0x60, 0x21, 0xb1, 0x1b,
|
||||
0xc0, 0xbb, 0x60, 0xb9, 0xd3, 0x77, 0x1c, 0x62, 0xd2, 0xad, 0xbe, 0xb1, 0x47, 0x9c, 0x76, 0x67,
|
||||
0x9f, 0x74, 0xfb, 0x3a, 0xe9, 0xf2, 0x15, 0x2d, 0x36, 0xab, 0xc2, 0xd7, 0xe5, 0x96, 0x14, 0x85,
|
||||
0x32, 0xac, 0xe1, 0x8f, 0x00, 0x34, 0x79, 0xd3, 0xa6, 0xe6, 0xba, 0x01, 0x67, 0x8e, 0x73, 0x06,
|
||||
0x09, 0xb8, 0x95, 0x42, 0x20, 0x89, 0x15, 0xf3, 0xb1, 0x4b, 0x5c, 0xcd, 0x21, 0xdd, 0xa4, 0x8f,
|
||||
0xf9, 0xb8, 0x8f, 0xeb, 0x52, 0x14, 0xca, 0xb0, 0x86, 0xaf, 0x82, 0xb2, 0xf7, 0x36, 0x3e, 0xe7,
|
||||
0x62, 0x71, 0x16, 0x05, 0x59, 0x79, 0x2b, 0xec, 0x42, 0x51, 0x1c, 0x1b, 0x9a, 0xb5, 0xe7, 0x12,
|
||||
0xe7, 0x90, 0x74, 0xdf, 0xf6, 0x34, 0x00, 0x2b, 0x94, 0x45, 0x5e, 0x28, 0x83, 0xa1, 0x6d, 0xa7,
|
||||
0x10, 0x48, 0x62, 0xc5, 0x86, 0xe6, 0x45, 0x4d, 0x6a, 0x68, 0xd3, 0xf1, 0xa1, 0xed, 0x4a, 0x51,
|
||||
0x28, 0xc3, 0x9a, 0xc5, 0x9e, 0xe7, 0xf2, 0xda, 0x21, 0xd6, 0x74, 0xbc, 0xa7, 0x93, 0xca, 0x4c,
|
||||
0x3c, 0xf6, 0xb6, 0xe2, 0xdd, 0x28, 0x89, 0x87, 0x6f, 0x83, 0x73, 0x5e, 0xd3, 0xae, 0x89, 0x03,
|
||||
0x92, 0x12, 0x27, 0x79, 0x4e, 0x90, 0x9c, 0xdb, 0x4a, 0x02, 0x50, 0xda, 0x06, 0xde, 0x02, 0xf3,
|
||||
0x1d, 0x4b, 0xd7, 0x79, 0x3c, 0xb6, 0xac, 0xbe, 0x49, 0x2b, 0xb3, 0x9c, 0x05, 0xb2, 0x1c, 0x6a,
|
||||
0xc5, 0x7a, 0x50, 0x02, 0x09, 0xef, 0x01, 0xd0, 0xf1, 0xcb, 0x81, 0x5b, 0x01, 0xd9, 0x85, 0x3e,
|
||||
0x5d, 0x87, 0xc2, 0x02, 0x1c, 0x34, 0xb9, 0x28, 0xc2, 0xa6, 0x7e, 0xa2, 0x80, 0x95, 0x8c, 0x1c,
|
||||
0x87, 0x6f, 0xc6, 0xaa, 0xde, 0xd5, 0x44, 0xd5, 0xbb, 0x90, 0x61, 0x16, 0x29, 0x7d, 0x1d, 0x30,
|
||||
0xc7, 0x74, 0x87, 0x66, 0xf6, 0x3c, 0x88, 0xd8, 0xc1, 0x5e, 0x92, 0xf9, 0x8e, 0xa2, 0xc0, 0x70,
|
||||
0x1b, 0x3e, 0x37, 0x1a, 0xd6, 0xe6, 0x62, 0x7d, 0x28, 0xce, 0xa9, 0xfe, 0x2a, 0x07, 0xc0, 0x3a,
|
||||
0xb1, 0x75, 0x6b, 0x60, 0x10, 0xf3, 0x34, 0x54, 0xcb, 0x7a, 0x4c, 0xb5, 0xa8, 0xd2, 0x85, 0x08,
|
||||
0xfc, 0xc9, 0x94, 0x2d, 0x1b, 0x09, 0xd9, 0x72, 0x69, 0x0c, 0xcf, 0xd1, 0xba, 0xe5, 0x6f, 0x79,
|
||||
0xb0, 0x18, 0x82, 0x43, 0xe1, 0x72, 0x3b, 0xb6, 0x84, 0x57, 0x12, 0x4b, 0xb8, 0x22, 0x31, 0x79,
|
||||
0x6a, 0xca, 0xe5, 0x03, 0x30, 0xcf, 0x74, 0x85, 0xb7, 0x6a, 0x5c, 0xb5, 0x4c, 0x1f, 0x5b, 0xb5,
|
||||
0x04, 0x55, 0x67, 0x23, 0xc6, 0x84, 0x12, 0xcc, 0x19, 0x2a, 0x69, 0xe6, 0xeb, 0xa8, 0x92, 0xfe,
|
||||
0xa8, 0x80, 0xf9, 0x70, 0x99, 0x4e, 0x41, 0x26, 0xb5, 0xe2, 0x32, 0xa9, 0x7a, 0x74, 0x5c, 0x66,
|
||||
0xe8, 0xa4, 0xbf, 0x16, 0xa2, 0x5e, 0x73, 0xa1, 0xb4, 0xca, 0x0e, 0x54, 0xb6, 0xae, 0x75, 0xb0,
|
||||
0x2b, 0xca, 0xea, 0x19, 0xef, 0x30, 0xe5, 0xb5, 0xa1, 0xa0, 0x37, 0x26, 0xa9, 0x72, 0x4f, 0x57,
|
||||
0x52, 0xe5, 0xbf, 0x1c, 0x49, 0x75, 0x07, 0x94, 0x5c, 0x5f, 0x4c, 0x15, 0x38, 0xe5, 0xe5, 0x71,
|
||||
0xe9, 0x2c, 0x74, 0x54, 0xc0, 0x1a, 0x28, 0xa8, 0x80, 0x49, 0xa6, 0x9d, 0x8a, 0x5f, 0xa5, 0x76,
|
||||
0x62, 0xe1, 0x6d, 0xe3, 0xbe, 0x4b, 0xba, 0x3c, 0x95, 0x4a, 0x61, 0x78, 0xef, 0xf0, 0x56, 0x24,
|
||||
0x7a, 0xe1, 0x2e, 0x58, 0xb1, 0x1d, 0xab, 0xe7, 0x10, 0xd7, 0x5d, 0x27, 0xb8, 0xab, 0x6b, 0x26,
|
||||
0xf1, 0x07, 0xe0, 0x55, 0xbd, 0x0b, 0xa3, 0x61, 0x6d, 0x65, 0x47, 0x0e, 0x41, 0x59, 0xb6, 0xea,
|
||||
0xc7, 0x05, 0x70, 0x36, 0xb9, 0x23, 0x66, 0x08, 0x11, 0xe5, 0x44, 0x42, 0xe4, 0xe5, 0x48, 0x88,
|
||||
0x7a, 0x2a, 0x2d, 0x72, 0xe6, 0x4f, 0x85, 0xe9, 0x1a, 0x58, 0x10, 0xc2, 0xc3, 0xef, 0x14, 0x52,
|
||||
0x2c, 0x58, 0x9e, 0xdd, 0x78, 0x37, 0x4a, 0xe2, 0xe1, 0x6d, 0x30, 0xe7, 0x70, 0x6d, 0xe5, 0x13,
|
||||
0x78, 0xfa, 0xe4, 0x5b, 0x82, 0x60, 0x0e, 0x45, 0x3b, 0x51, 0x1c, 0xcb, 0xb4, 0x49, 0x28, 0x39,
|
||||
0x7c, 0x82, 0x42, 0x5c, 0x9b, 0xac, 0x25, 0x01, 0x28, 0x6d, 0x03, 0x37, 0xc1, 0x62, 0xdf, 0x4c,
|
||||
0x53, 0x79, 0xb1, 0x76, 0x41, 0x50, 0x2d, 0xee, 0xa6, 0x21, 0x48, 0x66, 0x07, 0x7f, 0x12, 0x93,
|
||||
0x2b, 0xd3, 0x7c, 0x17, 0xb9, 0x72, 0x74, 0x3a, 0x4c, 0xac, 0x57, 0x24, 0x3a, 0xaa, 0x34, 0xa9,
|
||||
0x8e, 0x52, 0x3f, 0x52, 0x00, 0x4c, 0xa7, 0xe0, 0xd8, 0xc3, 0x7d, 0xca, 0x22, 0x52, 0x22, 0xbb,
|
||||
0x72, 0x85, 0x73, 0x75, 0xbc, 0xc2, 0x09, 0x77, 0xd0, 0xc9, 0x24, 0x8e, 0x98, 0xde, 0xd3, 0xb9,
|
||||
0x98, 0x99, 0x40, 0xe2, 0x84, 0xfe, 0x3c, 0x99, 0xc4, 0x89, 0xf0, 0x1c, 0x2d, 0x71, 0xfe, 0x99,
|
||||
0x03, 0x8b, 0x21, 0x78, 0x62, 0x89, 0x23, 0x31, 0x79, 0x76, 0x39, 0x33, 0x99, 0xec, 0x08, 0xa7,
|
||||
0xee, 0xff, 0x44, 0x76, 0x84, 0x0e, 0x65, 0xc8, 0x8e, 0xdf, 0xe7, 0xa2, 0x5e, 0x1f, 0x53, 0x76,
|
||||
0x7c, 0x09, 0x57, 0x15, 0x5f, 0x3b, 0xe5, 0xa2, 0x7e, 0x9a, 0x07, 0x67, 0x93, 0x29, 0x18, 0xab,
|
||||
0x83, 0xca, 0xd8, 0x3a, 0xb8, 0x03, 0x96, 0xee, 0xf7, 0x75, 0x7d, 0xc0, 0xc7, 0x10, 0x29, 0x86,
|
||||
0x5e, 0x05, 0xfd, 0xb6, 0xb0, 0x5c, 0xfa, 0x81, 0x04, 0x83, 0xa4, 0x96, 0xe9, 0xb2, 0x58, 0x78,
|
||||
0xd2, 0xb2, 0x58, 0x3c, 0x41, 0x59, 0x94, 0x2b, 0x8b, 0xfc, 0x89, 0x94, 0xc5, 0xc4, 0x35, 0x51,
|
||||
0xb2, 0x5d, 0x8d, 0x3d, 0xc3, 0x8f, 0x14, 0xb0, 0x2c, 0x3f, 0x3e, 0x43, 0x1d, 0xcc, 0x1b, 0xf8,
|
||||
0x41, 0xf4, 0xf2, 0x62, 0x5c, 0xc1, 0xe8, 0x53, 0x4d, 0xaf, 0x7b, 0x5f, 0x77, 0xea, 0xef, 0x98,
|
||||
0x74, 0xdb, 0x69, 0x53, 0x47, 0x33, 0x7b, 0x5e, 0x81, 0xdd, 0x8c, 0x71, 0xa1, 0x04, 0x37, 0xbc,
|
||||
0x07, 0x4a, 0x06, 0x7e, 0xd0, 0xee, 0x3b, 0x3d, 0xbf, 0x10, 0x1e, 0xff, 0x3d, 0x3c, 0xf6, 0x37,
|
||||
0x05, 0x0b, 0x0a, 0xf8, 0xd4, 0x2f, 0x14, 0xb0, 0x92, 0x51, 0x41, 0xbf, 0x41, 0xa3, 0xfc, 0x58,
|
||||
0x01, 0x17, 0x63, 0xa3, 0x64, 0x19, 0x49, 0xee, 0xf7, 0x75, 0x9e, 0x9c, 0x42, 0xb0, 0x5c, 0x05,
|
||||
0xb3, 0x36, 0x76, 0xa8, 0x16, 0x28, 0xdd, 0x62, 0x73, 0x6e, 0x34, 0xac, 0xcd, 0xee, 0xf8, 0x8d,
|
||||
0x28, 0xec, 0x97, 0xcc, 0x4d, 0xee, 0xe9, 0xcd, 0x8d, 0xfa, 0xeb, 0x1c, 0x28, 0x47, 0x5c, 0x3e,
|
||||
0x05, 0xa9, 0xf2, 0x56, 0x4c, 0xaa, 0x48, 0x3f, 0xfe, 0x44, 0xe7, 0x30, 0x4b, 0xab, 0x6c, 0x26,
|
||||
0xb4, 0xca, 0x77, 0xc7, 0x11, 0x1d, 0x2d, 0x56, 0xfe, 0x95, 0x03, 0x4b, 0x11, 0x74, 0xa8, 0x56,
|
||||
0xbe, 0x1f, 0x53, 0x2b, 0xab, 0x09, 0xb5, 0x52, 0x91, 0xd9, 0x3c, 0x93, 0x2b, 0xe3, 0xe5, 0xca,
|
||||
0x9f, 0x14, 0xb0, 0x10, 0x99, 0xbb, 0x53, 0xd0, 0x2b, 0xeb, 0x71, 0xbd, 0x52, 0x1b, 0x13, 0x2f,
|
||||
0x19, 0x82, 0xe5, 0x16, 0x58, 0x8c, 0x80, 0xb6, 0x9d, 0xae, 0x66, 0x62, 0xdd, 0x85, 0x2f, 0x80,
|
||||
0xa2, 0x4b, 0xb1, 0x43, 0xfd, 0xec, 0xf6, 0x6d, 0xdb, 0xac, 0x11, 0x79, 0x7d, 0xea, 0xbf, 0x15,
|
||||
0xd0, 0x88, 0x18, 0xef, 0x10, 0xc7, 0xd5, 0x5c, 0x4a, 0x4c, 0x7a, 0xd7, 0xd2, 0xfb, 0x06, 0x69,
|
||||
0xe9, 0x58, 0x33, 0x10, 0x61, 0x0d, 0x9a, 0x65, 0xee, 0x58, 0xba, 0xd6, 0x19, 0x40, 0x0c, 0xca,
|
||||
0x1f, 0xee, 0x13, 0x73, 0x9d, 0xe8, 0x84, 0x8a, 0xcf, 0x1b, 0xb3, 0xcd, 0x37, 0xfd, 0xdb, 0xfe,
|
||||
0xf7, 0xc2, 0xae, 0xc7, 0xc3, 0xda, 0xea, 0x24, 0x8c, 0x3c, 0x38, 0xa3, 0x9c, 0xf0, 0x67, 0x00,
|
||||
0xb0, 0xc7, 0x76, 0x07, 0xfb, 0x1f, 0x3b, 0x66, 0x9b, 0x6f, 0xf8, 0x29, 0xfc, 0x5e, 0xd0, 0x73,
|
||||
0xac, 0x17, 0x44, 0x18, 0xd5, 0xdf, 0x95, 0x62, 0x4b, 0xfd, 0x8d, 0xbf, 0x5b, 0xfa, 0x05, 0x58,
|
||||
0x3a, 0x0c, 0x67, 0xc7, 0x07, 0x30, 0x4d, 0xc4, 0xe2, 0xee, 0x45, 0x29, 0xbd, 0x6c, 0x5e, 0x43,
|
||||
0x25, 0x76, 0x57, 0x42, 0x87, 0xa4, 0x2f, 0x81, 0xaf, 0x82, 0x32, 0xd3, 0x32, 0x5a, 0x87, 0x6c,
|
||||
0x61, 0xc3, 0x4f, 0xc3, 0xe0, 0xeb, 0x50, 0x3b, 0xec, 0x42, 0x51, 0x1c, 0xdc, 0x07, 0x8b, 0xb6,
|
||||
0xd5, 0xdd, 0xc4, 0x26, 0xee, 0x11, 0x56, 0xa1, 0xbd, 0xa5, 0xe4, 0xb7, 0x4e, 0xb3, 0xcd, 0xd7,
|
||||
0xfc, 0x1b, 0x85, 0x9d, 0x34, 0x84, 0x9d, 0xd8, 0x24, 0xcd, 0x3c, 0x08, 0x64, 0x94, 0xd0, 0x48,
|
||||
0x7d, 0xcc, 0x9c, 0x49, 0xfd, 0x03, 0x44, 0x96, 0x8f, 0x27, 0xfc, 0x9c, 0x99, 0x75, 0x9f, 0x56,
|
||||
0x3a, 0xd1, 0x7d, 0x9a, 0xe4, 0xc4, 0x31, 0x7b, 0xcc, 0x13, 0xc7, 0xa7, 0x0a, 0xb8, 0x64, 0x4f,
|
||||
0x90, 0x46, 0x15, 0xc0, 0xa7, 0xa5, 0x35, 0x66, 0x5a, 0x26, 0xc9, 0xc8, 0xe6, 0xea, 0x68, 0x58,
|
||||
0xbb, 0x34, 0x09, 0x12, 0x4d, 0xe4, 0x1a, 0x4b, 0x1a, 0x4b, 0xec, 0x7c, 0x95, 0x32, 0x77, 0xf3,
|
||||
0xca, 0x18, 0x37, 0xfd, 0x8d, 0xd2, 0xcb, 0x43, 0xff, 0x09, 0x05, 0x34, 0xea, 0x47, 0x45, 0x70,
|
||||
0x2e, 0x55, 0xad, 0xbf, 0xc2, 0xbb, 0xc2, 0xd4, 0x89, 0x26, 0x7f, 0x8c, 0x13, 0xcd, 0x1a, 0x58,
|
||||
0x10, 0x1f, 0x98, 0x13, 0x07, 0xa2, 0x20, 0x4c, 0x5a, 0xf1, 0x6e, 0x94, 0xc4, 0xcb, 0xee, 0x2a,
|
||||
0x8b, 0xc7, 0xbc, 0xab, 0x8c, 0x7a, 0x21, 0xfe, 0x17, 0xe5, 0xe5, 0x73, 0xda, 0x0b, 0xf1, 0xf7,
|
||||
0xa8, 0x24, 0x1e, 0xbe, 0xe1, 0x27, 0x6b, 0xc0, 0x30, 0xc3, 0x19, 0x12, 0xd9, 0x17, 0x10, 0x24,
|
||||
0xd0, 0x4f, 0xf4, 0x11, 0xf5, 0x7d, 0xc9, 0x47, 0xd4, 0xd5, 0x31, 0x61, 0x36, 0xf9, 0xb5, 0xa4,
|
||||
0xf4, 0xd0, 0x59, 0x3e, 0xfe, 0xa1, 0x53, 0xfd, 0x8b, 0x02, 0x9e, 0xcb, 0xdc, 0xa6, 0xe0, 0x5a,
|
||||
0x4c, 0x3d, 0x5e, 0x4b, 0xa8, 0xc7, 0xe7, 0x33, 0x0d, 0x23, 0x12, 0xd2, 0x90, 0xdf, 0x58, 0xde,
|
||||
0x1c, 0x7b, 0x63, 0x29, 0x39, 0x89, 0x8c, 0xbf, 0xba, 0x6c, 0xbe, 0xfe, 0xf0, 0x51, 0x75, 0xea,
|
||||
0xb3, 0x47, 0xd5, 0xa9, 0xcf, 0x1f, 0x55, 0xa7, 0x7e, 0x39, 0xaa, 0x2a, 0x0f, 0x47, 0x55, 0xe5,
|
||||
0xb3, 0x51, 0x55, 0xf9, 0x7c, 0x54, 0x55, 0xfe, 0x3e, 0xaa, 0x2a, 0xbf, 0xfd, 0xa2, 0x3a, 0x75,
|
||||
0x0f, 0xa6, 0xff, 0x95, 0xf9, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xae, 0x39, 0x4c, 0x13, 0xc3,
|
||||
0x29, 0x00, 0x00,
|
||||
0xb6, 0x12, 0xc7, 0x64, 0xed, 0x38, 0x41, 0xe0, 0x14, 0x09, 0x44, 0x2a, 0x4d, 0xd3, 0xe8, 0xab,
|
||||
0x43, 0xcb, 0x01, 0xdc, 0xb4, 0xe8, 0x68, 0x39, 0xa6, 0x36, 0xde, 0x2f, 0xec, 0x0e, 0x15, 0x0b,
|
||||
0xbd, 0x14, 0x05, 0x7a, 0xeb, 0xa1, 0x7f, 0x43, 0xff, 0x81, 0xa2, 0x28, 0x9a, 0x5b, 0x10, 0x04,
|
||||
0xbd, 0xf8, 0x52, 0x20, 0xe8, 0xa5, 0x39, 0x11, 0x35, 0x73, 0x2a, 0x8a, 0xde, 0xda, 0x8b, 0x2f,
|
||||
0x2d, 0x66, 0x76, 0xf6, 0x7b, 0x56, 0xa4, 0xe4, 0x58, 0x69, 0x82, 0xdc, 0xb8, 0x33, 0xbf, 0xf7,
|
||||
0xdb, 0x37, 0x33, 0xef, 0xcd, 0xfb, 0xcd, 0x2c, 0x81, 0x7a, 0xff, 0x55, 0xaf, 0xa9, 0xdb, 0x2d,
|
||||
0xec, 0xe8, 0x2d, 0xec, 0x38, 0x5e, 0xeb, 0xe0, 0x7a, 0xab, 0x4f, 0x2c, 0xe2, 0x62, 0x4a, 0x7a,
|
||||
0x4d, 0xc7, 0xb5, 0xa9, 0x0d, 0xa1, 0x8f, 0x69, 0x62, 0x47, 0x6f, 0x32, 0x4c, 0xf3, 0xe0, 0xfa,
|
||||
0xf9, 0x6b, 0x7d, 0x9d, 0xee, 0x0f, 0xf6, 0x9a, 0x9a, 0x6d, 0xb6, 0xfa, 0x76, 0xdf, 0x6e, 0x71,
|
||||
0xe8, 0xde, 0xe0, 0x1e, 0x7f, 0xe2, 0x0f, 0xfc, 0x97, 0x4f, 0x71, 0x3e, 0xfe, 0x1a, 0xcd, 0x76,
|
||||
0x89, 0xe4, 0x35, 0xe7, 0x6f, 0x46, 0x18, 0x13, 0x6b, 0xfb, 0xba, 0x45, 0xdc, 0xc3, 0x96, 0x73,
|
||||
0xbf, 0xcf, 0x1a, 0xbc, 0x96, 0x49, 0x28, 0x96, 0x59, 0xb5, 0xf2, 0xac, 0xdc, 0x81, 0x45, 0x75,
|
||||
0x93, 0x64, 0x0c, 0x5e, 0x19, 0x67, 0xe0, 0x69, 0xfb, 0xc4, 0xc4, 0x19, 0xbb, 0x97, 0xf2, 0xec,
|
||||
0x06, 0x54, 0x37, 0x5a, 0xba, 0x45, 0x3d, 0xea, 0xa6, 0x8d, 0xd4, 0xff, 0x28, 0x00, 0x76, 0x6c,
|
||||
0x8b, 0xba, 0xb6, 0x61, 0x10, 0x17, 0x91, 0x03, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x1c, 0x54, 0xd8,
|
||||
0x78, 0x7a, 0x98, 0xe2, 0x9a, 0x72, 0x51, 0x59, 0xad, 0xde, 0xf8, 0x5e, 0x33, 0x9a, 0xe4, 0x90,
|
||||
0xbe, 0xe9, 0xdc, 0xef, 0xb3, 0x06, 0xaf, 0xc9, 0xd0, 0xcd, 0x83, 0xeb, 0xcd, 0xed, 0xbd, 0xf7,
|
||||
0x89, 0x46, 0x37, 0x09, 0xc5, 0x6d, 0xf8, 0x70, 0xd8, 0x98, 0x1a, 0x0d, 0x1b, 0x20, 0x6a, 0x43,
|
||||
0x21, 0x2b, 0xdc, 0x06, 0x25, 0xce, 0x5e, 0xe0, 0xec, 0xd7, 0x72, 0xd9, 0xc5, 0xa0, 0x9b, 0x08,
|
||||
0x7f, 0xf0, 0xe6, 0x03, 0x4a, 0x2c, 0xe6, 0x5e, 0xfb, 0x8c, 0xa0, 0x2e, 0xad, 0x63, 0x8a, 0x11,
|
||||
0x27, 0x82, 0x2f, 0x82, 0x8a, 0x2b, 0xdc, 0xaf, 0x15, 0x2f, 0x2a, 0xab, 0xc5, 0xf6, 0x59, 0x81,
|
||||
0xaa, 0x04, 0xc3, 0x42, 0x21, 0x42, 0xfd, 0xb3, 0x02, 0x96, 0xb3, 0xe3, 0xde, 0xd0, 0x3d, 0x0a,
|
||||
0xdf, 0xcb, 0x8c, 0xbd, 0x39, 0xd9, 0xd8, 0x99, 0x35, 0x1f, 0x79, 0xf8, 0xe2, 0xa0, 0x25, 0x36,
|
||||
0xee, 0x77, 0x40, 0x59, 0xa7, 0xc4, 0xf4, 0x6a, 0x85, 0x8b, 0xc5, 0xd5, 0xea, 0x8d, 0xcb, 0xcd,
|
||||
0x6c, 0xec, 0x36, 0xb3, 0x8e, 0xb5, 0xe7, 0x04, 0x65, 0xf9, 0x6d, 0x66, 0x8c, 0x7c, 0x0e, 0xf5,
|
||||
0xbf, 0x0a, 0x98, 0x5d, 0xc7, 0xc4, 0xb4, 0xad, 0x2e, 0xa1, 0xa7, 0xb0, 0x68, 0x1d, 0x50, 0xf2,
|
||||
0x1c, 0xa2, 0x89, 0x45, 0xfb, 0x8e, 0xcc, 0xf7, 0xd0, 0x9d, 0xae, 0x43, 0xb4, 0x68, 0xa1, 0xd8,
|
||||
0x13, 0xe2, 0xc6, 0xf0, 0x1d, 0x30, 0xed, 0x51, 0x4c, 0x07, 0x1e, 0x5f, 0xa6, 0xea, 0x8d, 0xe7,
|
||||
0x8e, 0xa6, 0xe1, 0xd0, 0xf6, 0xbc, 0x20, 0x9a, 0xf6, 0x9f, 0x91, 0xa0, 0x50, 0xff, 0x51, 0x00,
|
||||
0x30, 0xc4, 0x76, 0x6c, 0xab, 0xa7, 0x53, 0x16, 0xbf, 0xb7, 0x40, 0x89, 0x1e, 0x3a, 0x84, 0x4f,
|
||||
0xc3, 0x6c, 0xfb, 0x72, 0xe0, 0xc5, 0xed, 0x43, 0x87, 0x3c, 0x1e, 0x36, 0x96, 0xb3, 0x16, 0xac,
|
||||
0x07, 0x71, 0x1b, 0xb8, 0x11, 0xfa, 0x57, 0xe0, 0xd6, 0x37, 0x93, 0xaf, 0x7e, 0x3c, 0x6c, 0x48,
|
||||
0x36, 0x8b, 0x66, 0xc8, 0x94, 0x74, 0x10, 0x1e, 0x00, 0x68, 0x60, 0x8f, 0xde, 0x76, 0xb1, 0xe5,
|
||||
0xf9, 0x6f, 0xd2, 0x4d, 0x22, 0x46, 0xfe, 0xc2, 0x64, 0xcb, 0xc3, 0x2c, 0xda, 0xe7, 0x85, 0x17,
|
||||
0x70, 0x23, 0xc3, 0x86, 0x24, 0x6f, 0x80, 0x97, 0xc1, 0xb4, 0x4b, 0xb0, 0x67, 0x5b, 0xb5, 0x12,
|
||||
0x1f, 0x45, 0x38, 0x81, 0x88, 0xb7, 0x22, 0xd1, 0x0b, 0x9f, 0x07, 0x33, 0x26, 0xf1, 0x3c, 0xdc,
|
||||
0x27, 0xb5, 0x32, 0x07, 0x2e, 0x08, 0xe0, 0xcc, 0xa6, 0xdf, 0x8c, 0x82, 0x7e, 0xf5, 0x0f, 0x0a,
|
||||
0x98, 0x0b, 0x67, 0xee, 0x14, 0x52, 0xa5, 0x9d, 0x4c, 0x95, 0x67, 0x8f, 0x8c, 0x93, 0x9c, 0x0c,
|
||||
0xf9, 0xb8, 0x18, 0xf3, 0x99, 0x05, 0x21, 0xfc, 0x29, 0xa8, 0x78, 0xc4, 0x20, 0x1a, 0xb5, 0x5d,
|
||||
0xe1, 0xf3, 0x4b, 0x13, 0xfa, 0x8c, 0xf7, 0x88, 0xd1, 0x15, 0xa6, 0xed, 0x33, 0xcc, 0xe9, 0xe0,
|
||||
0x09, 0x85, 0x94, 0xf0, 0xc7, 0xa0, 0x42, 0x89, 0xe9, 0x18, 0x98, 0x12, 0x91, 0x26, 0x89, 0xf8,
|
||||
0x66, 0xe1, 0xc2, 0xc8, 0x76, 0xec, 0xde, 0x6d, 0x01, 0xe3, 0x89, 0x12, 0xce, 0x43, 0xd0, 0x8a,
|
||||
0x42, 0x1a, 0x78, 0x1f, 0xcc, 0x0f, 0x9c, 0x1e, 0x43, 0x52, 0xb6, 0x75, 0xf7, 0x0f, 0x45, 0xf8,
|
||||
0x5c, 0x3d, 0x72, 0x42, 0x76, 0x13, 0x26, 0xed, 0x65, 0xf1, 0x82, 0xf9, 0x64, 0x3b, 0x4a, 0x51,
|
||||
0xc3, 0x35, 0xb0, 0x60, 0xea, 0x16, 0x22, 0xb8, 0x77, 0xd8, 0x25, 0x9a, 0x6d, 0xf5, 0x3c, 0x1e,
|
||||
0x40, 0xe5, 0xf6, 0x8a, 0x20, 0x58, 0xd8, 0x4c, 0x76, 0xa3, 0x34, 0x1e, 0x6e, 0x80, 0xa5, 0x60,
|
||||
0x9f, 0xfd, 0xa1, 0xee, 0x51, 0xdb, 0x3d, 0xdc, 0xd0, 0x4d, 0x9d, 0xd6, 0xa6, 0x39, 0x4f, 0x6d,
|
||||
0x34, 0x6c, 0x2c, 0x21, 0x49, 0x3f, 0x92, 0x5a, 0xa9, 0xbf, 0x99, 0x06, 0x0b, 0xa9, 0xdd, 0x00,
|
||||
0xde, 0x01, 0xcb, 0xda, 0xc0, 0x75, 0x89, 0x45, 0xb7, 0x06, 0xe6, 0x1e, 0x71, 0xbb, 0xda, 0x3e,
|
||||
0xe9, 0x0d, 0x0c, 0xd2, 0xe3, 0x2b, 0x5a, 0x6e, 0xd7, 0x85, 0xaf, 0xcb, 0x1d, 0x29, 0x0a, 0xe5,
|
||||
0x58, 0xc3, 0x1f, 0x01, 0x68, 0xf1, 0xa6, 0x4d, 0xdd, 0xf3, 0x42, 0xce, 0x02, 0xe7, 0x0c, 0x13,
|
||||
0x70, 0x2b, 0x83, 0x40, 0x12, 0x2b, 0xe6, 0x63, 0x8f, 0x78, 0xba, 0x4b, 0x7a, 0x69, 0x1f, 0x8b,
|
||||
0x49, 0x1f, 0xd7, 0xa5, 0x28, 0x94, 0x63, 0x0d, 0x5f, 0x06, 0x55, 0xff, 0x6d, 0x7c, 0xce, 0xc5,
|
||||
0xe2, 0x2c, 0x0a, 0xb2, 0xea, 0x56, 0xd4, 0x85, 0xe2, 0x38, 0x36, 0x34, 0x7b, 0xcf, 0x23, 0xee,
|
||||
0x01, 0xe9, 0xbd, 0xe5, 0x6b, 0x00, 0x56, 0x28, 0xcb, 0xbc, 0x50, 0x86, 0x43, 0xdb, 0xce, 0x20,
|
||||
0x90, 0xc4, 0x8a, 0x0d, 0xcd, 0x8f, 0x9a, 0xcc, 0xd0, 0xa6, 0x93, 0x43, 0xdb, 0x95, 0xa2, 0x50,
|
||||
0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xb5, 0x03, 0xac, 0x1b, 0x78, 0xcf, 0x20, 0xb5, 0x99, 0x64,
|
||||
0xec, 0x6d, 0x25, 0xbb, 0x51, 0x1a, 0x0f, 0xdf, 0x02, 0xe7, 0xfc, 0xa6, 0x5d, 0x0b, 0x87, 0x24,
|
||||
0x15, 0x4e, 0xf2, 0x8c, 0x20, 0x39, 0xb7, 0x95, 0x06, 0xa0, 0xac, 0x0d, 0xbc, 0x05, 0xe6, 0x35,
|
||||
0xdb, 0x30, 0x78, 0x3c, 0x76, 0xec, 0x81, 0x45, 0x6b, 0xb3, 0x9c, 0x05, 0xb2, 0x1c, 0xea, 0x24,
|
||||
0x7a, 0x50, 0x0a, 0x09, 0xef, 0x02, 0xa0, 0x05, 0xe5, 0xc0, 0xab, 0x81, 0xfc, 0x42, 0x9f, 0xad,
|
||||
0x43, 0x51, 0x01, 0x0e, 0x9b, 0x3c, 0x14, 0x63, 0x53, 0x3f, 0x56, 0xc0, 0x4a, 0x4e, 0x8e, 0xc3,
|
||||
0x37, 0x12, 0x55, 0xef, 0x6a, 0xaa, 0xea, 0x5d, 0xc8, 0x31, 0x8b, 0x95, 0x3e, 0x0d, 0xcc, 0x31,
|
||||
0xdd, 0xa1, 0x5b, 0x7d, 0x1f, 0x22, 0x76, 0xb0, 0x17, 0x64, 0xbe, 0xa3, 0x38, 0x30, 0xda, 0x86,
|
||||
0xcf, 0x8d, 0x86, 0x8d, 0xb9, 0x44, 0x1f, 0x4a, 0x72, 0xaa, 0xbf, 0x2a, 0x00, 0xb0, 0x4e, 0x1c,
|
||||
0xc3, 0x3e, 0x34, 0x89, 0x75, 0x1a, 0xaa, 0x65, 0x3d, 0xa1, 0x5a, 0x54, 0xe9, 0x42, 0x84, 0xfe,
|
||||
0xe4, 0xca, 0x96, 0x8d, 0x94, 0x6c, 0xb9, 0x34, 0x86, 0xe7, 0x68, 0xdd, 0xf2, 0xb7, 0x22, 0x58,
|
||||
0x8c, 0xc0, 0x91, 0x70, 0x79, 0x2d, 0xb1, 0x84, 0x57, 0x52, 0x4b, 0xb8, 0x22, 0x31, 0x79, 0x6a,
|
||||
0xca, 0xe5, 0x7d, 0x30, 0xcf, 0x74, 0x85, 0xbf, 0x6a, 0x5c, 0xb5, 0x4c, 0x1f, 0x5b, 0xb5, 0x84,
|
||||
0x55, 0x67, 0x23, 0xc1, 0x84, 0x52, 0xcc, 0x39, 0x2a, 0x69, 0xe6, 0xab, 0xa8, 0x92, 0xfe, 0xa8,
|
||||
0x80, 0xf9, 0x68, 0x99, 0x4e, 0x41, 0x26, 0x75, 0x92, 0x32, 0xa9, 0x7e, 0x74, 0x5c, 0xe6, 0xe8,
|
||||
0xa4, 0xbf, 0x96, 0xe2, 0x5e, 0x73, 0xa1, 0xb4, 0xca, 0x0e, 0x54, 0x8e, 0xa1, 0x6b, 0xd8, 0x13,
|
||||
0x65, 0xf5, 0x8c, 0x7f, 0x98, 0xf2, 0xdb, 0x50, 0xd8, 0x9b, 0x90, 0x54, 0x85, 0xa7, 0x2b, 0xa9,
|
||||
0x8a, 0x5f, 0x8c, 0xa4, 0xba, 0x0d, 0x2a, 0x5e, 0x20, 0xa6, 0x4a, 0x9c, 0xf2, 0xf2, 0xb8, 0x74,
|
||||
0x16, 0x3a, 0x2a, 0x64, 0x0d, 0x15, 0x54, 0xc8, 0x24, 0xd3, 0x4e, 0xe5, 0x2f, 0x53, 0x3b, 0xb1,
|
||||
0xf0, 0x76, 0xf0, 0xc0, 0x23, 0x3d, 0x9e, 0x4a, 0x95, 0x28, 0xbc, 0x77, 0x78, 0x2b, 0x12, 0xbd,
|
||||
0x70, 0x17, 0xac, 0x38, 0xae, 0xdd, 0x77, 0x89, 0xe7, 0xad, 0x13, 0xdc, 0x33, 0x74, 0x8b, 0x04,
|
||||
0x03, 0xf0, 0xab, 0xde, 0x85, 0xd1, 0xb0, 0xb1, 0xb2, 0x23, 0x87, 0xa0, 0x3c, 0x5b, 0xf5, 0xa3,
|
||||
0x12, 0x38, 0x9b, 0xde, 0x11, 0x73, 0x84, 0x88, 0x72, 0x22, 0x21, 0xf2, 0x62, 0x2c, 0x44, 0x7d,
|
||||
0x95, 0x16, 0x3b, 0xf3, 0x67, 0xc2, 0x74, 0x0d, 0x2c, 0x08, 0xe1, 0x11, 0x74, 0x0a, 0x29, 0x16,
|
||||
0x2e, 0xcf, 0x6e, 0xb2, 0x1b, 0xa5, 0xf1, 0xf0, 0x35, 0x30, 0xe7, 0x72, 0x6d, 0x15, 0x10, 0xf8,
|
||||
0xfa, 0xe4, 0x5b, 0x82, 0x60, 0x0e, 0xc5, 0x3b, 0x51, 0x12, 0xcb, 0xb4, 0x49, 0x24, 0x39, 0x02,
|
||||
0x82, 0x52, 0x52, 0x9b, 0xac, 0xa5, 0x01, 0x28, 0x6b, 0x03, 0x37, 0xc1, 0xe2, 0xc0, 0xca, 0x52,
|
||||
0xf9, 0xb1, 0x76, 0x41, 0x50, 0x2d, 0xee, 0x66, 0x21, 0x48, 0x66, 0x07, 0x7f, 0x92, 0x90, 0x2b,
|
||||
0xd3, 0x7c, 0x17, 0xb9, 0x72, 0x74, 0x3a, 0x4c, 0xac, 0x57, 0x24, 0x3a, 0xaa, 0x32, 0xa9, 0x8e,
|
||||
0x52, 0x3f, 0x54, 0x00, 0xcc, 0xa6, 0xe0, 0xd8, 0xc3, 0x7d, 0xc6, 0x22, 0x56, 0x22, 0x7b, 0x72,
|
||||
0x85, 0x73, 0x75, 0xbc, 0xc2, 0x89, 0x76, 0xd0, 0xc9, 0x24, 0x8e, 0x98, 0xde, 0xd3, 0xb9, 0x98,
|
||||
0x99, 0x40, 0xe2, 0x44, 0xfe, 0x3c, 0x99, 0xc4, 0x89, 0xf1, 0x1c, 0x2d, 0x71, 0xfe, 0x59, 0x00,
|
||||
0x8b, 0x11, 0x78, 0x62, 0x89, 0x23, 0x31, 0xf9, 0xe6, 0x72, 0x66, 0x32, 0xd9, 0x11, 0x4d, 0xdd,
|
||||
0xff, 0x89, 0xec, 0x88, 0x1c, 0xca, 0x91, 0x1d, 0xbf, 0x2f, 0xc4, 0xbd, 0x3e, 0xa6, 0xec, 0xf8,
|
||||
0x02, 0xae, 0x2a, 0xbe, 0x72, 0xca, 0x45, 0xfd, 0xa4, 0x08, 0xce, 0xa6, 0x53, 0x30, 0x51, 0x07,
|
||||
0x95, 0xb1, 0x75, 0x70, 0x07, 0x2c, 0xdd, 0x1b, 0x18, 0xc6, 0x21, 0x1f, 0x43, 0xac, 0x18, 0xfa,
|
||||
0x15, 0xf4, 0xdb, 0xc2, 0x72, 0xe9, 0x07, 0x12, 0x0c, 0x92, 0x5a, 0x66, 0xcb, 0x62, 0xe9, 0x49,
|
||||
0xcb, 0x62, 0xf9, 0x04, 0x65, 0x51, 0xae, 0x2c, 0x8a, 0x27, 0x52, 0x16, 0x13, 0xd7, 0x44, 0xc9,
|
||||
0x76, 0x35, 0xf6, 0x0c, 0x3f, 0x52, 0xc0, 0xb2, 0xfc, 0xf8, 0x0c, 0x0d, 0x30, 0x6f, 0xe2, 0x07,
|
||||
0xf1, 0xcb, 0x8b, 0x71, 0x05, 0x63, 0x40, 0x75, 0xa3, 0xe9, 0x7f, 0xdd, 0x69, 0xbe, 0x6d, 0xd1,
|
||||
0x6d, 0xb7, 0x4b, 0x5d, 0xdd, 0xea, 0xfb, 0x05, 0x76, 0x33, 0xc1, 0x85, 0x52, 0xdc, 0xf0, 0x2e,
|
||||
0xa8, 0x98, 0xf8, 0x41, 0x77, 0xe0, 0xf6, 0x83, 0x42, 0x78, 0xfc, 0xf7, 0xf0, 0xd8, 0xdf, 0x14,
|
||||
0x2c, 0x28, 0xe4, 0x53, 0x3f, 0x57, 0xc0, 0x4a, 0x4e, 0x05, 0xfd, 0x1a, 0x8d, 0xf2, 0x23, 0x05,
|
||||
0x5c, 0x4c, 0x8c, 0x92, 0x65, 0x24, 0xb9, 0x37, 0x30, 0x78, 0x72, 0x0a, 0xc1, 0x72, 0x15, 0xcc,
|
||||
0x3a, 0xd8, 0xa5, 0x7a, 0xa8, 0x74, 0xcb, 0xed, 0xb9, 0xd1, 0xb0, 0x31, 0xbb, 0x13, 0x34, 0xa2,
|
||||
0xa8, 0x5f, 0x32, 0x37, 0x85, 0xa7, 0x37, 0x37, 0xea, 0xaf, 0x0b, 0xa0, 0x1a, 0x73, 0xf9, 0x14,
|
||||
0xa4, 0xca, 0x9b, 0x09, 0xa9, 0x22, 0xfd, 0xf8, 0x13, 0x9f, 0xc3, 0x3c, 0xad, 0xb2, 0x99, 0xd2,
|
||||
0x2a, 0xdf, 0x1d, 0x47, 0x74, 0xb4, 0x58, 0xf9, 0x57, 0x01, 0x2c, 0xc5, 0xd0, 0x91, 0x5a, 0xf9,
|
||||
0x7e, 0x42, 0xad, 0xac, 0xa6, 0xd4, 0x4a, 0x4d, 0x66, 0xf3, 0x8d, 0x5c, 0x19, 0x2f, 0x57, 0xfe,
|
||||
0xa4, 0x80, 0x85, 0xd8, 0xdc, 0x9d, 0x82, 0x5e, 0x59, 0x4f, 0xea, 0x95, 0xc6, 0x98, 0x78, 0xc9,
|
||||
0x11, 0x2c, 0xb7, 0xc0, 0x62, 0x0c, 0xb4, 0xed, 0xf6, 0x74, 0x0b, 0x1b, 0x1e, 0x7c, 0x0e, 0x94,
|
||||
0x3d, 0x8a, 0x5d, 0x1a, 0x64, 0x77, 0x60, 0xdb, 0x65, 0x8d, 0xc8, 0xef, 0x53, 0xff, 0xad, 0x80,
|
||||
0x56, 0xcc, 0x78, 0x87, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x3b, 0xb6, 0x31, 0x30, 0x49, 0xc7,
|
||||
0xc0, 0xba, 0x89, 0x08, 0x6b, 0xd0, 0x6d, 0x6b, 0xc7, 0x36, 0x74, 0xed, 0x10, 0x62, 0x50, 0xfd,
|
||||
0x60, 0x9f, 0x58, 0xeb, 0xc4, 0x20, 0x54, 0x7c, 0xde, 0x98, 0x6d, 0xbf, 0x11, 0xdc, 0xf6, 0xbf,
|
||||
0x1b, 0x75, 0x3d, 0x1e, 0x36, 0x56, 0x27, 0x61, 0xe4, 0xc1, 0x19, 0xe7, 0x84, 0x3f, 0x03, 0x80,
|
||||
0x3d, 0x76, 0x35, 0x1c, 0x7c, 0xec, 0x98, 0x6d, 0xbf, 0x1e, 0xa4, 0xf0, 0xbb, 0x61, 0xcf, 0xb1,
|
||||
0x5e, 0x10, 0x63, 0x54, 0x7f, 0x57, 0x49, 0x2c, 0xf5, 0xd7, 0xfe, 0x6e, 0xe9, 0x17, 0x60, 0xe9,
|
||||
0x20, 0x9a, 0x9d, 0x00, 0xc0, 0x34, 0x11, 0x8b, 0xbb, 0xe7, 0xa5, 0xf4, 0xb2, 0x79, 0x8d, 0x94,
|
||||
0xd8, 0x1d, 0x09, 0x1d, 0x92, 0xbe, 0x04, 0xbe, 0x0c, 0xaa, 0x4c, 0xcb, 0xe8, 0x1a, 0xd9, 0xc2,
|
||||
0x66, 0x90, 0x86, 0xe1, 0xd7, 0xa1, 0x6e, 0xd4, 0x85, 0xe2, 0x38, 0xb8, 0x0f, 0x16, 0x1d, 0xbb,
|
||||
0xb7, 0x89, 0x2d, 0xdc, 0x27, 0xac, 0x42, 0xfb, 0x4b, 0xc9, 0x6f, 0x9d, 0x66, 0xdb, 0xaf, 0x04,
|
||||
0x37, 0x0a, 0x3b, 0x59, 0x08, 0x3b, 0xb1, 0x49, 0x9a, 0x79, 0x10, 0xc8, 0x28, 0xa1, 0x99, 0xf9,
|
||||
0x98, 0x39, 0x93, 0xf9, 0x07, 0x88, 0x2c, 0x1f, 0x4f, 0xf8, 0x39, 0x33, 0xef, 0x3e, 0xad, 0x72,
|
||||
0xa2, 0xfb, 0x34, 0xc9, 0x89, 0x63, 0xf6, 0x98, 0x27, 0x8e, 0x4f, 0x14, 0x70, 0xc9, 0x99, 0x20,
|
||||
0x8d, 0x6a, 0x80, 0x4f, 0x4b, 0x67, 0xcc, 0xb4, 0x4c, 0x92, 0x91, 0xed, 0xd5, 0xd1, 0xb0, 0x71,
|
||||
0x69, 0x12, 0x24, 0x9a, 0xc8, 0x35, 0x96, 0x34, 0xb6, 0xd8, 0xf9, 0x6a, 0x55, 0xee, 0xe6, 0x95,
|
||||
0x31, 0x6e, 0x06, 0x1b, 0xa5, 0x9f, 0x87, 0xc1, 0x13, 0x0a, 0x69, 0xd4, 0x0f, 0xcb, 0xe0, 0x5c,
|
||||
0xa6, 0x5a, 0x7f, 0x89, 0x77, 0x85, 0x99, 0x13, 0x4d, 0xf1, 0x18, 0x27, 0x9a, 0x35, 0xb0, 0x20,
|
||||
0x3e, 0x30, 0xa7, 0x0e, 0x44, 0x61, 0x98, 0x74, 0x92, 0xdd, 0x28, 0x8d, 0x97, 0xdd, 0x55, 0x96,
|
||||
0x8f, 0x79, 0x57, 0x19, 0xf7, 0x42, 0xfc, 0x2f, 0xca, 0xcf, 0xe7, 0xac, 0x17, 0xe2, 0xef, 0x51,
|
||||
0x69, 0x3c, 0x7c, 0x3d, 0x48, 0xd6, 0x90, 0x61, 0x86, 0x33, 0xa4, 0xb2, 0x2f, 0x24, 0x48, 0xa1,
|
||||
0x9f, 0xe8, 0x23, 0xea, 0x7b, 0x92, 0x8f, 0xa8, 0xab, 0x63, 0xc2, 0x6c, 0xf2, 0x6b, 0x49, 0xe9,
|
||||
0xa1, 0xb3, 0x7a, 0xfc, 0x43, 0xa7, 0xfa, 0x17, 0x05, 0x3c, 0x93, 0xbb, 0x4d, 0xc1, 0xb5, 0x84,
|
||||
0x7a, 0xbc, 0x96, 0x52, 0x8f, 0xcf, 0xe6, 0x1a, 0xc6, 0x24, 0xa4, 0x29, 0xbf, 0xb1, 0xbc, 0x39,
|
||||
0xf6, 0xc6, 0x52, 0x72, 0x12, 0x19, 0x7f, 0x75, 0xd9, 0x7e, 0xf5, 0xe1, 0xa3, 0xfa, 0xd4, 0xa7,
|
||||
0x8f, 0xea, 0x53, 0x9f, 0x3d, 0xaa, 0x4f, 0xfd, 0x72, 0x54, 0x57, 0x1e, 0x8e, 0xea, 0xca, 0xa7,
|
||||
0xa3, 0xba, 0xf2, 0xd9, 0xa8, 0xae, 0xfc, 0x7d, 0x54, 0x57, 0x7e, 0xfb, 0x79, 0x7d, 0xea, 0x2e,
|
||||
0xcc, 0xfe, 0x2b, 0xf3, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd3, 0xfa, 0xed, 0x70, 0xaa, 0x29,
|
||||
0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *ControllerRevision) Marshal() (dAtA []byte, err error) {
|
||||
|
||||
77
vendor/k8s.io/api/apps/v1/generated.proto
generated
vendored
77
vendor/k8s.io/api/apps/v1/generated.proto
generated
vendored
@@ -43,10 +43,10 @@ message ControllerRevision {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Data is the serialized representation of the state.
|
||||
optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
|
||||
optional .k8s.io.apimachinery.pkg.runtime.RawExtension data = 2;
|
||||
|
||||
// Revision indicates the revision of the state represented by Data.
|
||||
optional int64 revision = 3;
|
||||
@@ -56,7 +56,7 @@ message ControllerRevision {
|
||||
message ControllerRevisionList {
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// Items is the list of ControllerRevisions
|
||||
repeated ControllerRevision items = 2;
|
||||
@@ -67,7 +67,7 @@ message DaemonSet {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// The desired behavior of this daemon set.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
@@ -93,7 +93,7 @@ message DaemonSetCondition {
|
||||
|
||||
// Last time the condition transitioned from one status to another.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
|
||||
|
||||
// The reason for the condition's last transition.
|
||||
// +optional
|
||||
@@ -109,7 +109,7 @@ message DaemonSetList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// A list of daemon sets.
|
||||
repeated DaemonSet items = 2;
|
||||
@@ -121,7 +121,7 @@ message DaemonSetSpec {
|
||||
// Must match in order to be controlled.
|
||||
// It must match the pod template's labels.
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1;
|
||||
|
||||
// An object that describes the pod that will be created.
|
||||
// The DaemonSet will create exactly one copy of this pod on every node
|
||||
@@ -129,7 +129,7 @@ message DaemonSetSpec {
|
||||
// selector is specified).
|
||||
// The only allowed template.spec.restartPolicy value is "Always".
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
|
||||
optional k8s.io.api.core.v1.PodTemplateSpec template = 2;
|
||||
optional .k8s.io.api.core.v1.PodTemplateSpec template = 2;
|
||||
|
||||
// An update strategy to replace existing DaemonSet pods with new pods.
|
||||
// +optional
|
||||
@@ -200,6 +200,8 @@ message DaemonSetStatus {
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
repeated DaemonSetCondition conditions = 10;
|
||||
}
|
||||
|
||||
@@ -223,7 +225,7 @@ message Deployment {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the Deployment.
|
||||
// +optional
|
||||
@@ -243,10 +245,10 @@ message DeploymentCondition {
|
||||
optional string status = 2;
|
||||
|
||||
// The last time this condition was updated.
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6;
|
||||
|
||||
// Last time the condition transitioned from one status to another.
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7;
|
||||
|
||||
// The reason for the condition's last transition.
|
||||
optional string reason = 4;
|
||||
@@ -259,7 +261,7 @@ message DeploymentCondition {
|
||||
message DeploymentList {
|
||||
// Standard list metadata.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// Items is the list of Deployments.
|
||||
repeated Deployment items = 2;
|
||||
@@ -275,11 +277,11 @@ message DeploymentSpec {
|
||||
// Label selector for pods. Existing ReplicaSets whose pods are
|
||||
// selected by this will be the ones affected by this deployment.
|
||||
// It must match the pod template's labels.
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
|
||||
|
||||
// Template describes the pods that will be created.
|
||||
// The only allowed template.spec.restartPolicy value is "Always".
|
||||
optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
|
||||
optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
|
||||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
@@ -341,6 +343,8 @@ message DeploymentStatus {
|
||||
// Represents the latest available observations of a deployment's current state.
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
repeated DeploymentCondition conditions = 6;
|
||||
|
||||
// Count of hash collisions for the Deployment. The Deployment controller uses this
|
||||
@@ -372,7 +376,7 @@ message ReplicaSet {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Spec defines the specification of the desired behavior of the ReplicaSet.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
@@ -398,7 +402,7 @@ message ReplicaSetCondition {
|
||||
|
||||
// The last time the condition transitioned from one status to another.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
|
||||
|
||||
// The reason for the condition's last transition.
|
||||
// +optional
|
||||
@@ -414,7 +418,7 @@ message ReplicaSetList {
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of ReplicaSets.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller
|
||||
@@ -440,13 +444,13 @@ message ReplicaSetSpec {
|
||||
// Label keys and values that must match in order to be controlled by this replica set.
|
||||
// It must match the pod template's labels.
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
|
||||
|
||||
// Template is the object that describes the pod that will be created if
|
||||
// insufficient replicas are detected.
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template
|
||||
// +optional
|
||||
optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
|
||||
optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
|
||||
}
|
||||
|
||||
// ReplicaSetStatus represents the current status of a ReplicaSet.
|
||||
@@ -475,6 +479,8 @@ message ReplicaSetStatus {
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
repeated ReplicaSetCondition conditions = 6;
|
||||
}
|
||||
|
||||
@@ -495,7 +501,7 @@ message RollingUpdateDaemonSet {
|
||||
// 70% of original number of DaemonSet pods are available at all times during
|
||||
// the update.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
|
||||
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
|
||||
|
||||
// The maximum number of nodes with an existing available DaemonSet pod that
|
||||
// can have an updated DaemonSet pod during during an update.
|
||||
@@ -516,7 +522,7 @@ message RollingUpdateDaemonSet {
|
||||
// so resource intensive daemonsets should take into account that they may
|
||||
// cause evictions during disruption.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
}
|
||||
|
||||
// Spec to control the desired behavior of rolling update.
|
||||
@@ -532,7 +538,7 @@ message RollingUpdateDeployment {
|
||||
// that the total number of pods available at all times during the update is at
|
||||
// least 70% of desired pods.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
|
||||
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1;
|
||||
|
||||
// The maximum number of pods that can be scheduled above the desired number of
|
||||
// pods.
|
||||
@@ -546,7 +552,7 @@ message RollingUpdateDeployment {
|
||||
// new ReplicaSet can be scaled up further, ensuring that total number of pods running
|
||||
// at any time during the update is at most 130% of desired pods.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
}
|
||||
|
||||
// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.
|
||||
@@ -566,7 +572,7 @@ message RollingUpdateStatefulSetStrategy {
|
||||
// Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it
|
||||
// will be counted towards MaxUnavailable.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
|
||||
optional .k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 2;
|
||||
}
|
||||
|
||||
// StatefulSet represents a set of pods with consistent identities.
|
||||
@@ -580,7 +586,7 @@ message StatefulSet {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Spec defines the desired identities of pods in this set.
|
||||
// +optional
|
||||
@@ -602,7 +608,7 @@ message StatefulSetCondition {
|
||||
|
||||
// Last time the condition transitioned from one status to another.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3;
|
||||
|
||||
// The reason for the condition's last transition.
|
||||
// +optional
|
||||
@@ -618,7 +624,7 @@ message StatefulSetList {
|
||||
// Standard list's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// Items is the list of stateful sets.
|
||||
repeated StatefulSet items = 2;
|
||||
@@ -669,7 +675,7 @@ message StatefulSetSpec {
|
||||
// selector is a label query over pods that should match the replica count.
|
||||
// It must match the pod template's labels.
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
|
||||
optional .k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
|
||||
|
||||
// template is the object that describes the pod that will be created if
|
||||
// insufficient replicas are detected. Each pod stamped out by the StatefulSet
|
||||
@@ -678,7 +684,7 @@ message StatefulSetSpec {
|
||||
// <statefulsetname>-<podindex>. For example, a pod in a StatefulSet named
|
||||
// "web" with index number "3" would be named "web-3".
|
||||
// The only allowed template.spec.restartPolicy value is "Always".
|
||||
optional k8s.io.api.core.v1.PodTemplateSpec template = 3;
|
||||
optional .k8s.io.api.core.v1.PodTemplateSpec template = 3;
|
||||
|
||||
// volumeClaimTemplates is a list of claims that pods are allowed to reference.
|
||||
// The StatefulSet controller is responsible for mapping network identities to
|
||||
@@ -688,7 +694,8 @@ message StatefulSetSpec {
|
||||
// any volumes in the template, with the same name.
|
||||
// TODO: Define the behavior if a claim already exists with the same name.
|
||||
// +optional
|
||||
repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
|
||||
// +listType=atomic
|
||||
repeated .k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4;
|
||||
|
||||
// serviceName is the name of the service that governs this StatefulSet.
|
||||
// This service must exist before the StatefulSet, and is responsible for
|
||||
@@ -730,15 +737,13 @@ message StatefulSetSpec {
|
||||
// volume claims are created as needed and retained until manually deleted. This
|
||||
// policy allows the lifecycle to be altered, for example by deleting persistent
|
||||
// volume claims when their stateful set is deleted, or when their pod is scaled
|
||||
// down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled,
|
||||
// which is alpha. +optional
|
||||
// down.
|
||||
// +optional
|
||||
optional StatefulSetPersistentVolumeClaimRetentionPolicy persistentVolumeClaimRetentionPolicy = 10;
|
||||
|
||||
// ordinals controls the numbering of replica indices in a StatefulSet. The
|
||||
// default ordinals behavior assigns a "0" index to the first replica and
|
||||
// increments the index by one for each additional replica requested. Using
|
||||
// the ordinals field requires the StatefulSetStartOrdinal feature gate to be
|
||||
// enabled, which is beta.
|
||||
// increments the index by one for each additional replica requested.
|
||||
// +optional
|
||||
optional StatefulSetOrdinals ordinals = 11;
|
||||
}
|
||||
@@ -782,6 +787,8 @@ message StatefulSetStatus {
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
repeated StatefulSetCondition conditions = 10;
|
||||
|
||||
// Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user