Merge pull request #1338 from crazy-max/dependabot/go_modules/k8s.io/client-go-0.32.1

chore(deps): bump k8s.io/client-go from 0.32.0 to 0.32.1
This commit is contained in:
CrazyMax
2025-01-19 13:06:20 +01:00
committed by GitHub
9 changed files with 22 additions and 22 deletions

6
go.mod
View File

@@ -47,9 +47,9 @@ require (
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
google.golang.org/protobuf v1.35.2
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.32.0
k8s.io/apimachinery v0.32.0
k8s.io/client-go v0.32.0
k8s.io/api v0.32.1
k8s.io/apimachinery v0.32.1
k8s.io/client-go v0.32.1
)
require (

12
go.sum
View File

@@ -474,12 +474,12 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE=
k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0=
k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg=
k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8=
k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8=
k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc=
k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k=
k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs=
k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE=
k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU=
k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y=

View File

@@ -675,7 +675,7 @@ message ResourceClaimStatus {
// which issued it knows that it must put the pod back into the queue,
// waiting for the ResourceClaim to become usable again.
//
// There can be at most 32 such reservations. This may get increased in
// There can be at most 256 such reservations. This may get increased in
// the future, but not reduced.
//
// +optional

View File

@@ -687,7 +687,7 @@ type ResourceClaimStatus struct {
// which issued it knows that it must put the pod back into the queue,
// waiting for the ResourceClaim to become usable again.
//
// There can be at most 32 such reservations. This may get increased in
// There can be at most 256 such reservations. This may get increased in
// the future, but not reduced.
//
// +optional
@@ -715,9 +715,9 @@ type ResourceClaimStatus struct {
Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"`
}
// ReservedForMaxSize is the maximum number of entries in
// ResourceClaimReservedForMaxSize is the maximum number of entries in
// claim.status.reservedFor.
const ResourceClaimReservedForMaxSize = 32
const ResourceClaimReservedForMaxSize = 256
// ResourceClaimConsumerReference contains enough information to let you
// locate the consumer of a ResourceClaim. The user must be a resource in the same

View File

@@ -291,7 +291,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string {
var map_ResourceClaimStatus = map[string]string{
"": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.",
"allocation": "Allocation is set once the claim has been allocated successfully.",
"reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.",
"reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.",
"devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.",
}

View File

@@ -683,7 +683,7 @@ message ResourceClaimStatus {
// which issued it knows that it must put the pod back into the queue,
// waiting for the ResourceClaim to become usable again.
//
// There can be at most 32 such reservations. This may get increased in
// There can be at most 256 such reservations. This may get increased in
// the future, but not reduced.
//
// +optional

View File

@@ -695,7 +695,7 @@ type ResourceClaimStatus struct {
// which issued it knows that it must put the pod back into the queue,
// waiting for the ResourceClaim to become usable again.
//
// There can be at most 32 such reservations. This may get increased in
// There can be at most 256 such reservations. This may get increased in
// the future, but not reduced.
//
// +optional
@@ -723,9 +723,9 @@ type ResourceClaimStatus struct {
Devices []AllocatedDeviceStatus `json:"devices,omitempty" protobuf:"bytes,4,opt,name=devices"`
}
// ReservedForMaxSize is the maximum number of entries in
// ResourceClaimReservedForMaxSize is the maximum number of entries in
// claim.status.reservedFor.
const ResourceClaimReservedForMaxSize = 32
const ResourceClaimReservedForMaxSize = 256
// ResourceClaimConsumerReference contains enough information to let you
// locate the consumer of a ResourceClaim. The user must be a resource in the same

View File

@@ -300,7 +300,7 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string {
var map_ResourceClaimStatus = map[string]string{
"": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.",
"allocation": "Allocation is set once the claim has been allocated successfully.",
"reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.",
"reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 256 such reservations. This may get increased in the future, but not reduced.",
"devices": "Devices contains the status of each device allocated for this claim, as reported by the driver. This can include driver-specific information. Entries are owned by their respective drivers.",
}

6
vendor/modules.txt vendored
View File

@@ -679,7 +679,7 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
# k8s.io/api v0.32.0
# k8s.io/api v0.32.1
## explicit; go 1.23.0
k8s.io/api/admissionregistration/v1
k8s.io/api/admissionregistration/v1alpha1
@@ -737,7 +737,7 @@ k8s.io/api/storage/v1
k8s.io/api/storage/v1alpha1
k8s.io/api/storage/v1beta1
k8s.io/api/storagemigration/v1alpha1
# k8s.io/apimachinery v0.32.0
# k8s.io/apimachinery v0.32.1
## explicit; go 1.23.0
k8s.io/apimachinery/pkg/api/equality
k8s.io/apimachinery/pkg/api/errors
@@ -789,7 +789,7 @@ k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/client-go v0.32.0
# k8s.io/client-go v0.32.1
## explicit; go 1.23.0
k8s.io/client-go/applyconfigurations/admissionregistration/v1
k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1