Files
sablier/pkg/provider/kubernetes/workload_scale.go
Alexis Couvreur edbf7d9d15 fix(kubernetes): consider workload not ready when scaled to 0 (#543)
* test(kubernetes): use testcontainers for test

* fix(kubernetes): get state properly reports the workload as down when scaled to 0

* refactor(kubernetes): split provider in multiple files

* refactor(provider): use Instance prefix for actions

* test(testcontainers): use provider.PullImage

* squash

* Revert "test(testcontainers): use provider.PullImage"

This reverts commit 6f958c48a5.

* test: add random generator thread safety
2025-03-02 23:30:59 -05:00

37 lines
1.0 KiB
Go

package kubernetes
import (
"context"
"fmt"
autoscalingv1 "k8s.io/api/autoscaling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Workload interface {
GetScale(ctx context.Context, workloadName string, options metav1.GetOptions) (*autoscalingv1.Scale, error)
UpdateScale(ctx context.Context, workloadName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
}
func (p *KubernetesProvider) scale(ctx context.Context, config ParsedName, replicas int32) error {
var workload Workload
switch config.Kind {
case "deployment":
workload = p.Client.AppsV1().Deployments(config.Namespace)
case "statefulset":
workload = p.Client.AppsV1().StatefulSets(config.Namespace)
default:
return fmt.Errorf("unsupported kind \"%s\" must be one of \"deployment\", \"statefulset\"", config.Kind)
}
s, err := workload.GetScale(ctx, config.Name, metav1.GetOptions{})
if err != nil {
return err
}
s.Spec.Replicas = replicas
_, err = workload.UpdateScale(ctx, config.Name, s, metav1.UpdateOptions{})
return err
}