mirror of
https://github.com/sablierapp/sablier.git
synced 2025-12-31 18:17:20 +01:00
perf(providers): retrieve state on start instead of assuming starting (#350)
When an instance does not exist yet and needs to be started, its status is not assumed to be starting anymore. Instead, the statue will be retrieved from the provider. This changes one thing, it's that you may be able to start and access your services instantly because they'll be instantly seen as ready. With this change, you might want to make sure that your containers have a proper healthcheck used to determine when the application is able to process incoming requests. * refactor: add interface guards * refactor(providers): remove instance.State as a return value from Stop and Start * test(e2e): add healthcheck on nginx container Because now the container check is so fast, we need to add a delay on which the container is considered started and healthy to have a proper waiting page. * fix(tests): using acouvreur/whoami:v1.10.2 instead of containous/whoami:v1.5.0 This image simply retrieve the curl binary from curlimages/curl:8.8.0 to be able to add proper docker healthcheck commands. Once this is merged with traefik/whoami, I'll update back to the original image. See https://github.com/traefik/whoami/issues/33
This commit is contained in:
@@ -48,7 +48,7 @@ func StopAllUnregisteredInstances(ctx context.Context, provider providers.Provid
|
||||
func stopFunc(ctx context.Context, name string, provider providers.Provider) func() error {
|
||||
return func() error {
|
||||
log.Tracef("Stopping %v...", name)
|
||||
_, err := provider.Stop(ctx, name)
|
||||
err := provider.Stop(ctx, name)
|
||||
if err != nil {
|
||||
log.Errorf("Could not stop %v: %v", name, err)
|
||||
return err
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"github.com/acouvreur/sablier/app/instance"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"github.com/acouvreur/sablier/app/providers/mock"
|
||||
"github.com/acouvreur/sablier/app/types"
|
||||
@@ -30,8 +29,8 @@ func TestStopAllUnregisteredInstances(t *testing.T) {
|
||||
}).Return(instances, nil)
|
||||
|
||||
// Set up expectations for Stop
|
||||
mockProvider.On("Stop", ctx, "instance2").Return(instance.State{}, nil)
|
||||
mockProvider.On("Stop", ctx, "instance3").Return(instance.State{}, nil)
|
||||
mockProvider.On("Stop", ctx, "instance2").Return(nil)
|
||||
mockProvider.On("Stop", ctx, "instance3").Return(nil)
|
||||
|
||||
// Call the function under test
|
||||
err := discovery.StopAllUnregisteredInstances(ctx, mockProvider, registered)
|
||||
@@ -62,8 +61,8 @@ func TestStopAllUnregisteredInstances_WithError(t *testing.T) {
|
||||
}).Return(instances, nil)
|
||||
|
||||
// Set up expectations for Stop with error
|
||||
mockProvider.On("Stop", ctx, "instance2").Return(instance.State{}, errors.New("stop error"))
|
||||
mockProvider.On("Stop", ctx, "instance3").Return(instance.State{}, nil)
|
||||
mockProvider.On("Stop", ctx, "instance2").Return(errors.New("stop error"))
|
||||
mockProvider.On("Stop", ctx, "instance3").Return(nil)
|
||||
|
||||
// Call the function under test
|
||||
err := discovery.StopAllUnregisteredInstances(ctx, mockProvider, registered)
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
@@ -18,6 +19,9 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Interface guard
|
||||
var _ providers.Provider = (*DockerClassicProvider)(nil)
|
||||
|
||||
type DockerClassicProvider struct {
|
||||
Client client.APIClient
|
||||
desiredReplicas int
|
||||
@@ -71,39 +75,16 @@ func (provider *DockerClassicProvider) GetGroups(ctx context.Context) (map[strin
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func (provider *DockerClassicProvider) Start(ctx context.Context, name string) (instance.State, error) {
|
||||
err := provider.Client.ContainerStart(ctx, name, container.StartOptions{})
|
||||
|
||||
if err != nil {
|
||||
return instance.ErrorInstanceState(name, err, provider.desiredReplicas)
|
||||
}
|
||||
|
||||
return instance.State{
|
||||
Name: name,
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: provider.desiredReplicas,
|
||||
Status: instance.NotReady,
|
||||
}, err
|
||||
func (provider *DockerClassicProvider) Start(ctx context.Context, name string) error {
|
||||
return provider.Client.ContainerStart(ctx, name, container.StartOptions{})
|
||||
}
|
||||
|
||||
func (provider *DockerClassicProvider) Stop(ctx context.Context, name string) (instance.State, error) {
|
||||
err := provider.Client.ContainerStop(ctx, name, container.StopOptions{})
|
||||
|
||||
if err != nil {
|
||||
return instance.ErrorInstanceState(name, err, provider.desiredReplicas)
|
||||
}
|
||||
|
||||
return instance.State{
|
||||
Name: name,
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: provider.desiredReplicas,
|
||||
Status: instance.NotReady,
|
||||
}, nil
|
||||
func (provider *DockerClassicProvider) Stop(ctx context.Context, name string) error {
|
||||
return provider.Client.ContainerStop(ctx, name, container.StopOptions{})
|
||||
}
|
||||
|
||||
func (provider *DockerClassicProvider) GetState(ctx context.Context, name string) (instance.State, error) {
|
||||
spec, err := provider.Client.ContainerInspect(ctx, name)
|
||||
|
||||
if err != nil {
|
||||
return instance.ErrorInstanceState(name, err, provider.desiredReplicas)
|
||||
}
|
||||
|
||||
@@ -271,7 +271,6 @@ func TestDockerClassicProvider_Stop(t *testing.T) {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want instance.State
|
||||
wantErr bool
|
||||
err error
|
||||
}{
|
||||
@@ -283,13 +282,6 @@ func TestDockerClassicProvider_Stop(t *testing.T) {
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "container with name \"nginx\" was not found",
|
||||
},
|
||||
wantErr: true,
|
||||
err: fmt.Errorf("container with name \"nginx\" was not found"),
|
||||
},
|
||||
@@ -301,12 +293,6 @@ func TestDockerClassicProvider_Stop(t *testing.T) {
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: false,
|
||||
err: nil,
|
||||
},
|
||||
@@ -320,14 +306,11 @@ func TestDockerClassicProvider_Stop(t *testing.T) {
|
||||
|
||||
tt.fields.Client.On("ContainerStop", mock.Anything, mock.Anything, mock.Anything).Return(tt.err)
|
||||
|
||||
got, err := provider.Stop(context.Background(), tt.args.name)
|
||||
err := provider.Stop(context.Background(), tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("DockerClassicProvider.Stop() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("DockerClassicProvider.Stop() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -343,7 +326,6 @@ func TestDockerClassicProvider_Start(t *testing.T) {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want instance.State
|
||||
wantErr bool
|
||||
err error
|
||||
}{
|
||||
@@ -355,13 +337,6 @@ func TestDockerClassicProvider_Start(t *testing.T) {
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "container with name \"nginx\" was not found",
|
||||
},
|
||||
wantErr: true,
|
||||
err: fmt.Errorf("container with name \"nginx\" was not found"),
|
||||
},
|
||||
@@ -373,12 +348,6 @@ func TestDockerClassicProvider_Start(t *testing.T) {
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: false,
|
||||
err: nil,
|
||||
},
|
||||
@@ -392,14 +361,11 @@ func TestDockerClassicProvider_Start(t *testing.T) {
|
||||
|
||||
tt.fields.Client.On("ContainerStart", mock.Anything, mock.Anything, mock.Anything).Return(tt.err)
|
||||
|
||||
got, err := provider.Start(context.Background(), tt.args.name)
|
||||
err := provider.Start(context.Background(), tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("DockerClassicProvider.Start() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("DockerClassicProvider.Start() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
@@ -16,6 +17,9 @@ import (
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Interface guard
|
||||
var _ providers.Provider = (*DockerSwarmProvider)(nil)
|
||||
|
||||
type DockerSwarmProvider struct {
|
||||
Client client.APIClient
|
||||
desiredReplicas int
|
||||
@@ -41,40 +45,37 @@ func NewDockerSwarmProvider() (*DockerSwarmProvider, error) {
|
||||
|
||||
}
|
||||
|
||||
func (provider *DockerSwarmProvider) Start(ctx context.Context, name string) (instance.State, error) {
|
||||
func (provider *DockerSwarmProvider) Start(ctx context.Context, name string) error {
|
||||
return provider.scale(ctx, name, uint64(provider.desiredReplicas))
|
||||
}
|
||||
|
||||
func (provider *DockerSwarmProvider) Stop(ctx context.Context, name string) (instance.State, error) {
|
||||
func (provider *DockerSwarmProvider) Stop(ctx context.Context, name string) error {
|
||||
return provider.scale(ctx, name, 0)
|
||||
}
|
||||
|
||||
func (provider *DockerSwarmProvider) scale(ctx context.Context, name string, replicas uint64) (instance.State, error) {
|
||||
func (provider *DockerSwarmProvider) scale(ctx context.Context, name string, replicas uint64) error {
|
||||
service, err := provider.getServiceByName(name, ctx)
|
||||
|
||||
if err != nil {
|
||||
return instance.ErrorInstanceState(name, err, provider.desiredReplicas)
|
||||
return err
|
||||
}
|
||||
|
||||
foundName := provider.getInstanceName(name, *service)
|
||||
|
||||
if service.Spec.Mode.Replicated == nil {
|
||||
return instance.UnrecoverableInstanceState(foundName, "swarm service is not in \"replicated\" mode", provider.desiredReplicas)
|
||||
return errors.New("swarm service is not in \"replicated\" mode")
|
||||
}
|
||||
|
||||
service.Spec.Mode.Replicated.Replicas = &replicas
|
||||
|
||||
response, err := provider.Client.ServiceUpdate(ctx, service.ID, service.Meta.Version, service.Spec, types.ServiceUpdateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return instance.ErrorInstanceState(foundName, err, provider.desiredReplicas)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(response.Warnings) > 0 {
|
||||
return instance.UnrecoverableInstanceState(foundName, strings.Join(response.Warnings, ", "), provider.desiredReplicas)
|
||||
return fmt.Errorf("warning received updating swarm service [%s]: %s", foundName, strings.Join(response.Warnings, ", "))
|
||||
}
|
||||
|
||||
return instance.NotReadyInstanceState(foundName, 0, provider.desiredReplicas)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *DockerSwarmProvider) GetGroups(ctx context.Context) (map[string][]string, error) {
|
||||
|
||||
@@ -19,7 +19,6 @@ func TestDockerSwarmProvider_Start(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want instance.State
|
||||
serviceList []swarm.Service
|
||||
response swarm.ServiceUpdateResponse
|
||||
wantService swarm.Service
|
||||
@@ -36,12 +35,6 @@ func TestDockerSwarmProvider_Start(t *testing.T) {
|
||||
response: swarm.ServiceUpdateResponse{
|
||||
Warnings: []string{},
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantService: mocks.ServiceReplicated("nginx", 1),
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -58,12 +51,6 @@ func TestDockerSwarmProvider_Start(t *testing.T) {
|
||||
response: swarm.ServiceUpdateResponse{
|
||||
Warnings: []string{},
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantService: mocks.ServiceReplicated("nginx", 1),
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -78,15 +65,8 @@ func TestDockerSwarmProvider_Start(t *testing.T) {
|
||||
response: swarm.ServiceUpdateResponse{
|
||||
Warnings: []string{},
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "swarm service is not in \"replicated\" mode",
|
||||
},
|
||||
wantService: mocks.ServiceReplicated("nginx", 1),
|
||||
wantErr: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -100,14 +80,11 @@ func TestDockerSwarmProvider_Start(t *testing.T) {
|
||||
clientMock.On("ServiceList", mock.Anything, mock.Anything).Return(tt.serviceList, nil)
|
||||
clientMock.On("ServiceUpdate", mock.Anything, tt.wantService.ID, tt.wantService.Meta.Version, tt.wantService.Spec, mock.Anything).Return(tt.response, nil)
|
||||
|
||||
got, err := provider.Start(context.Background(), tt.args.name)
|
||||
err := provider.Start(context.Background(), tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("DockerSwarmProvider.Start() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("DockerSwarmProvider.Start() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -119,7 +96,6 @@ func TestDockerSwarmProvider_Stop(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want instance.State
|
||||
serviceList []swarm.Service
|
||||
response swarm.ServiceUpdateResponse
|
||||
wantService swarm.Service
|
||||
@@ -136,12 +112,6 @@ func TestDockerSwarmProvider_Stop(t *testing.T) {
|
||||
response: swarm.ServiceUpdateResponse{
|
||||
Warnings: []string{},
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantService: mocks.ServiceReplicated("nginx", 0),
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -158,12 +128,6 @@ func TestDockerSwarmProvider_Stop(t *testing.T) {
|
||||
response: swarm.ServiceUpdateResponse{
|
||||
Warnings: []string{},
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantService: mocks.ServiceReplicated("nginx", 0),
|
||||
wantErr: false,
|
||||
},
|
||||
@@ -178,15 +142,8 @@ func TestDockerSwarmProvider_Stop(t *testing.T) {
|
||||
response: swarm.ServiceUpdateResponse{
|
||||
Warnings: []string{},
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "swarm service is not in \"replicated\" mode",
|
||||
},
|
||||
wantService: mocks.ServiceReplicated("nginx", 1),
|
||||
wantErr: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -200,14 +157,11 @@ func TestDockerSwarmProvider_Stop(t *testing.T) {
|
||||
clientMock.On("ServiceList", mock.Anything, mock.Anything).Return(tt.serviceList, nil)
|
||||
clientMock.On("ServiceUpdate", mock.Anything, tt.wantService.ID, tt.wantService.Meta.Version, tt.wantService.Spec, mock.Anything).Return(tt.response, nil)
|
||||
|
||||
got, err := provider.Stop(context.Background(), tt.args.name)
|
||||
err := provider.Stop(context.Background(), tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("DockerSwarmProvider.Stop() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("DockerSwarmProvider.Stop() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -23,6 +24,9 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// Interface guard
|
||||
var _ providers.Provider = (*KubernetesProvider)(nil)
|
||||
|
||||
type Config struct {
|
||||
OriginalName string
|
||||
Kind string // deployment or statefulset
|
||||
@@ -83,19 +87,19 @@ func NewKubernetesProvider(providerConfig providerConfig.Kubernetes) (*Kubernete
|
||||
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) Start(ctx context.Context, name string) (instance.State, error) {
|
||||
func (provider *KubernetesProvider) Start(ctx context.Context, name string) error {
|
||||
config, err := provider.convertName(name)
|
||||
if err != nil {
|
||||
return instance.UnrecoverableInstanceState(name, err.Error(), int(config.Replicas))
|
||||
return err
|
||||
}
|
||||
|
||||
return provider.scale(ctx, config, config.Replicas)
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) Stop(ctx context.Context, name string) (instance.State, error) {
|
||||
func (provider *KubernetesProvider) Stop(ctx context.Context, name string) error {
|
||||
config, err := provider.convertName(name)
|
||||
if err != nil {
|
||||
return instance.UnrecoverableInstanceState(name, err.Error(), int(config.Replicas))
|
||||
return err
|
||||
}
|
||||
|
||||
return provider.scale(ctx, config, 0)
|
||||
@@ -147,7 +151,7 @@ func (provider *KubernetesProvider) GetGroups(ctx context.Context) (map[string][
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) scale(ctx context.Context, config *Config, replicas int32) (instance.State, error) {
|
||||
func (provider *KubernetesProvider) scale(ctx context.Context, config *Config, replicas int32) error {
|
||||
var workload Workload
|
||||
|
||||
switch config.Kind {
|
||||
@@ -156,22 +160,18 @@ func (provider *KubernetesProvider) scale(ctx context.Context, config *Config, r
|
||||
case "statefulset":
|
||||
workload = provider.Client.AppsV1().StatefulSets(config.Namespace)
|
||||
default:
|
||||
return instance.UnrecoverableInstanceState(config.OriginalName, fmt.Sprintf("unsupported kind \"%s\" must be one of \"deployment\", \"statefulset\"", config.Kind), int(config.Replicas))
|
||||
return fmt.Errorf("unsupported kind \"%s\" must be one of \"deployment\", \"statefulset\"", config.Kind)
|
||||
}
|
||||
|
||||
s, err := workload.GetScale(ctx, config.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return instance.ErrorInstanceState(config.OriginalName, err, int(config.Replicas))
|
||||
return err
|
||||
}
|
||||
|
||||
s.Spec.Replicas = replicas
|
||||
_, err = workload.UpdateScale(ctx, config.Name, s, metav1.UpdateOptions{})
|
||||
|
||||
if err != nil {
|
||||
return instance.ErrorInstanceState(config.OriginalName, err, int(config.Replicas))
|
||||
}
|
||||
|
||||
return instance.NotReadyInstanceState(config.OriginalName, 0, int(config.Replicas))
|
||||
return err
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) GetState(ctx context.Context, name string) (instance.State, error) {
|
||||
|
||||
@@ -34,12 +34,6 @@ func TestKubernetesProvider_Start(t *testing.T) {
|
||||
args: args{
|
||||
name: "deployment_default_nginx_2",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "deployment_default_nginx_2",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 2,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
data: data{
|
||||
name: "nginx",
|
||||
get: mocks.V1Scale(0),
|
||||
@@ -52,12 +46,6 @@ func TestKubernetesProvider_Start(t *testing.T) {
|
||||
args: args{
|
||||
name: "statefulset_default_nginx_2",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "statefulset_default_nginx_2",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 2,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
data: data{
|
||||
name: "nginx",
|
||||
get: mocks.V1Scale(0),
|
||||
@@ -70,19 +58,12 @@ func TestKubernetesProvider_Start(t *testing.T) {
|
||||
args: args{
|
||||
name: "gateway_default_nginx_2",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "gateway_default_nginx_2",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 2,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "unsupported kind \"gateway\" must be one of \"deployment\", \"statefulset\"",
|
||||
},
|
||||
data: data{
|
||||
name: "nginx",
|
||||
get: mocks.V1Scale(0),
|
||||
update: mocks.V1Scale(0),
|
||||
},
|
||||
wantErr: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -100,14 +81,11 @@ func TestKubernetesProvider_Start(t *testing.T) {
|
||||
statefulsetAPI.On("GetScale", mock.Anything, tt.data.name, metav1.GetOptions{}).Return(tt.data.get, nil)
|
||||
statefulsetAPI.On("UpdateScale", mock.Anything, tt.data.name, tt.data.update, metav1.UpdateOptions{}).Return(nil, nil)
|
||||
|
||||
got, err := provider.Start(context.Background(), tt.args.name)
|
||||
err := provider.Start(context.Background(), tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("KubernetesProvider.Start() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("KubernetesProvider.Start() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -133,12 +111,6 @@ func TestKubernetesProvider_Stop(t *testing.T) {
|
||||
args: args{
|
||||
name: "deployment_default_nginx_2",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "deployment_default_nginx_2",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 2,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
data: data{
|
||||
name: "nginx",
|
||||
get: mocks.V1Scale(2),
|
||||
@@ -151,12 +123,6 @@ func TestKubernetesProvider_Stop(t *testing.T) {
|
||||
args: args{
|
||||
name: "statefulset_default_nginx_2",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "statefulset_default_nginx_2",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 2,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
data: data{
|
||||
name: "nginx",
|
||||
get: mocks.V1Scale(2),
|
||||
@@ -169,19 +135,12 @@ func TestKubernetesProvider_Stop(t *testing.T) {
|
||||
args: args{
|
||||
name: "gateway_default_nginx_2",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "gateway_default_nginx_2",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 2,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "unsupported kind \"gateway\" must be one of \"deployment\", \"statefulset\"",
|
||||
},
|
||||
data: data{
|
||||
name: "nginx",
|
||||
get: mocks.V1Scale(0),
|
||||
update: mocks.V1Scale(0),
|
||||
},
|
||||
wantErr: false,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
@@ -199,14 +158,11 @@ func TestKubernetesProvider_Stop(t *testing.T) {
|
||||
statefulsetAPI.On("GetScale", mock.Anything, tt.data.name, metav1.GetOptions{}).Return(tt.data.get, nil)
|
||||
statefulsetAPI.On("UpdateScale", mock.Anything, tt.data.name, tt.data.update, metav1.UpdateOptions{}).Return(nil, nil)
|
||||
|
||||
got, err := provider.Stop(context.Background(), tt.args.name)
|
||||
err := provider.Stop(context.Background(), tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("KubernetesProvider.Stop() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("KubernetesProvider.Stop() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,18 +8,21 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// Interface guard
|
||||
var _ providers.Provider = (*ProviderMock)(nil)
|
||||
|
||||
// ProviderMock is a structure that allows to define the behavior of a Provider
|
||||
type ProviderMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *ProviderMock) Start(ctx context.Context, name string) (instance.State, error) {
|
||||
func (m *ProviderMock) Start(ctx context.Context, name string) error {
|
||||
args := m.Called(ctx, name)
|
||||
return args.Get(0).(instance.State), args.Error(1)
|
||||
return args.Error(0)
|
||||
}
|
||||
func (m *ProviderMock) Stop(ctx context.Context, name string) (instance.State, error) {
|
||||
func (m *ProviderMock) Stop(ctx context.Context, name string) error {
|
||||
args := m.Called(ctx, name)
|
||||
return args.Get(0).(instance.State), args.Error(1)
|
||||
return args.Error(0)
|
||||
}
|
||||
func (m *ProviderMock) GetState(ctx context.Context, name string) (instance.State, error) {
|
||||
args := m.Called(ctx, name)
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
)
|
||||
|
||||
type Provider interface {
|
||||
Start(ctx context.Context, name string) (instance.State, error)
|
||||
Stop(ctx context.Context, name string) (instance.State, error)
|
||||
Start(ctx context.Context, name string) error
|
||||
Stop(ctx context.Context, name string) error
|
||||
GetState(ctx context.Context, name string) (instance.State, error)
|
||||
GetGroups(ctx context.Context) (map[string][]string, error)
|
||||
InstanceList(ctx context.Context, options InstanceListOptions) ([]types.Instance, error)
|
||||
|
||||
@@ -89,7 +89,7 @@ func onSessionExpires(provider providers.Provider) func(key string, instance ins
|
||||
return func(_key string, _instance instance.State) {
|
||||
go func(key string, instance instance.State) {
|
||||
log.Debugf("stopping %s...", key)
|
||||
_, err := provider.Stop(context.Background(), key)
|
||||
err := provider.Stop(context.Background(), key)
|
||||
|
||||
if err != nil {
|
||||
log.Warnf("error stopping %s: %s", key, err.Error())
|
||||
|
||||
@@ -184,17 +184,22 @@ func (s *SessionsManager) requestSessionInstance(name string, duration time.Dura
|
||||
if !exists {
|
||||
log.Debugf("starting %s...", name)
|
||||
|
||||
state, err := s.provider.Start(s.ctx, name)
|
||||
|
||||
err := s.provider.Start(s.ctx, name)
|
||||
if err != nil {
|
||||
log.Errorf("an error occurred starting %s: %s", name, err.Error())
|
||||
errState, _ := instance.ErrorInstanceState(name, err, 1)
|
||||
requestState.Name = errState.Name
|
||||
requestState.CurrentReplicas = errState.CurrentReplicas
|
||||
requestState.DesiredReplicas = errState.DesiredReplicas
|
||||
requestState.Status = errState.Status
|
||||
requestState.Message = errState.Message
|
||||
} else {
|
||||
state, _ := s.provider.GetState(s.ctx, name)
|
||||
requestState.CurrentReplicas = state.CurrentReplicas
|
||||
requestState.DesiredReplicas = state.DesiredReplicas
|
||||
requestState.Status = state.Status
|
||||
requestState.Message = state.Message
|
||||
}
|
||||
|
||||
requestState.Name = state.Name
|
||||
requestState.CurrentReplicas = state.CurrentReplicas
|
||||
requestState.DesiredReplicas = state.DesiredReplicas
|
||||
requestState.Status = state.Status
|
||||
requestState.Message = state.Message
|
||||
log.Debugf("status for %s=%s", name, requestState.Status)
|
||||
} else if requestState.Status != instance.Ready {
|
||||
log.Debugf("checking %s...", name)
|
||||
|
||||
@@ -38,7 +38,7 @@ services:
|
||||
- traefik.http.middlewares.group.plugin.sablier.blocking.timeout=30s
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
# Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik
|
||||
# The route doesn't exist anymore. Use dynamic-config.yml file instead.
|
||||
# labels:
|
||||
|
||||
@@ -43,7 +43,7 @@ services:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
```
|
||||
|
||||
#### **Caddyfile**
|
||||
@@ -75,7 +75,7 @@ services:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
|
||||
sablier:
|
||||
image: acouvreur/sablier:1.8.0-beta.8
|
||||
@@ -110,7 +110,7 @@ services:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
|
||||
sablier:
|
||||
image: acouvreur/sablier:1.8.0-beta.8
|
||||
@@ -139,7 +139,7 @@ services:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=demo
|
||||
|
||||
@@ -26,7 +26,7 @@ You have to use the dynamic config file provider instead.
|
||||
|
||||
```yaml
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
labels:
|
||||
- traefik.enable
|
||||
- traefik.http.routers.whoami.rule=PathPrefix(`/whoami`)
|
||||
@@ -66,7 +66,7 @@ See also [`traefik.docker.lbswarm`](https://doc.traefik.io/traefik/routing/provi
|
||||
```yaml
|
||||
services:
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
deploy:
|
||||
replicas: 0
|
||||
labels:
|
||||
|
||||
@@ -51,7 +51,7 @@ You have to register your containers by opting-in with labels.
|
||||
```yaml
|
||||
services:
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=mygroup
|
||||
|
||||
@@ -52,7 +52,7 @@ You have to register your services by opting-in with labels.
|
||||
```yaml
|
||||
services:
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
deploy:
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
|
||||
@@ -88,7 +88,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: whoami
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
```
|
||||
|
||||
## How does Sablier knows when a deployment is ready?
|
||||
|
||||
@@ -19,7 +19,10 @@ services:
|
||||
- '/var/run/docker.sock:/var/run/docker.sock'
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost" ]
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
@@ -24,7 +24,10 @@ services:
|
||||
- '/var/run/docker.sock:/var/run/docker.sock'
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost" ]
|
||||
interval: 5s
|
||||
deploy:
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
|
||||
@@ -23,7 +23,10 @@ services:
|
||||
- '/var/run/docker.sock:/var/run/docker.sock'
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost" ]
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
@@ -26,7 +26,10 @@ services:
|
||||
- '/var/run/docker.sock:/var/run/docker.sock'
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost" ]
|
||||
interval: 5s
|
||||
deploy:
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
|
||||
@@ -18,7 +18,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: whoami
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
||||
@@ -19,7 +19,10 @@ services:
|
||||
- '/var/run/docker.sock:/var/run/docker.sock'
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost" ]
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
@@ -18,7 +18,10 @@ services:
|
||||
- '/var/run/docker.sock:/var/run/docker.sock'
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost" ]
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: whoami
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
||||
@@ -19,7 +19,10 @@ services:
|
||||
- '/var/run/docker.sock:/var/run/docker.sock'
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost" ]
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
@@ -94,7 +94,7 @@ services:
|
||||
- traefik.http.middlewares.dynamic.plugin.sablier.dynamic.sessionDuration=1m
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
# Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik
|
||||
# The route doesn't exist anymore. Use dynamic-config.yml file instead.
|
||||
# labels:
|
||||
@@ -130,7 +130,7 @@ http:
|
||||
```yaml
|
||||
services:
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
deploy:
|
||||
replicas: 0
|
||||
labels:
|
||||
|
||||
@@ -54,9 +54,12 @@ services:
|
||||
- traefik.http.middlewares.group.plugin.sablier.dynamic.displayName=Group E2E
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
# Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik
|
||||
# The route doesn't exist anymore. Use dynamic-config.yml file instead.
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost" ]
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
@@ -62,7 +62,10 @@ services:
|
||||
- traefik.http.services.sablier.loadbalancer.server.port=10000
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
healthcheck:
|
||||
test: [ "CMD", "curl", "-f", "http://localhost" ]
|
||||
interval: 5s
|
||||
deploy:
|
||||
replicas: 0
|
||||
labels:
|
||||
|
||||
@@ -18,7 +18,13 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: whoami
|
||||
image: containous/whoami:v1.5.0
|
||||
image: acouvreur/whoami:v1.10.2
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 80
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
@@ -170,6 +176,14 @@ spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.23.1
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- curl
|
||||
- -f
|
||||
- http://localhost
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
||||
Reference in New Issue
Block a user