From be7ace0e0507bb6ff2b1d0c2fc719bbb02e7779e Mon Sep 17 00:00:00 2001 From: Alexis Couvreur Date: Mon, 8 Jul 2024 00:10:39 -0400 Subject: [PATCH] perf(providers): retrieve state on start instead of assuming starting (#350) When an instance does not exist yet and needs to be started, its status is not assumed to be starting anymore. Instead, the statue will be retrieved from the provider. This changes one thing, it's that you may be able to start and access your services instantly because they'll be instantly seen as ready. With this change, you might want to make sure that your containers have a proper healthcheck used to determine when the application is able to process incoming requests. * refactor: add interface guards * refactor(providers): remove instance.State as a return value from Stop and Start * test(e2e): add healthcheck on nginx container Because now the container check is so fast, we need to add a delay on which the container is considered started and healthy to have a proper waiting page. * fix(tests): using acouvreur/whoami:v1.10.2 instead of containous/whoami:v1.5.0 This image simply retrieve the curl binary from curlimages/curl:8.8.0 to be able to add proper docker healthcheck commands. Once this is merged with traefik/whoami, I'll update back to the original image. See https://github.com/traefik/whoami/issues/33 --- app/discovery/autostop.go | 2 +- app/discovery/autostop_test.go | 9 ++-- app/providers/docker/docker.go | 35 +++--------- app/providers/docker/docker_test.go | 38 +------------ app/providers/dockerswarm/docker_swarm.go | 23 ++++---- .../dockerswarm/docker_swarm_test.go | 54 ++----------------- app/providers/kubernetes/kubernetes.go | 24 ++++----- app/providers/kubernetes/kubernetes_test.go | 52 ++---------------- app/providers/mock/mock.go | 11 ++-- app/providers/provider.go | 4 +- app/sablier.go | 2 +- app/sessions/sessions_manager.go | 21 +++++--- docker-compose.yml | 2 +- docs/getting-started.md | 8 +-- docs/plugins/traefik.md | 4 +- docs/providers/docker.md | 2 +- docs/providers/docker_swarm.md | 2 +- docs/providers/kubernetes.md | 2 +- plugins/caddy/e2e/docker/docker-compose.yml | 5 +- .../caddy/e2e/docker_swarm/docker-stack.yml | 5 +- plugins/nginx/e2e/docker/docker-compose.yml | 5 +- .../nginx/e2e/docker_swarm/docker-stack.yml | 5 +- .../e2e/kubernetes/manifests/deployment.yml | 2 +- .../e2e/apacheapisix/docker/compose.yaml | 5 +- .../proxywasm/e2e/envoy/docker/compose.yaml | 5 +- .../e2e/istio/kubernetes/manifests/whoami.yml | 2 +- .../proxywasm/e2e/nginx/docker/compose.yaml | 5 +- plugins/traefik/README.md | 4 +- plugins/traefik/e2e/docker/docker-compose.yml | 5 +- .../traefik/e2e/docker_swarm/docker-stack.yml | 5 +- .../e2e/kubernetes/manifests/deployment.yml | 16 +++++- 31 files changed, 135 insertions(+), 229 deletions(-) diff --git a/app/discovery/autostop.go b/app/discovery/autostop.go index 390ead1..b170fd6 100644 --- a/app/discovery/autostop.go +++ b/app/discovery/autostop.go @@ -48,7 +48,7 @@ func StopAllUnregisteredInstances(ctx context.Context, provider providers.Provid func stopFunc(ctx context.Context, name string, provider providers.Provider) func() error { return func() error { log.Tracef("Stopping %v...", name) - _, err := provider.Stop(ctx, name) + err := provider.Stop(ctx, name) if err != nil { log.Errorf("Could not stop %v: %v", name, err) return err diff --git a/app/discovery/autostop_test.go b/app/discovery/autostop_test.go index ef6dcb6..b448a73 100644 --- a/app/discovery/autostop_test.go +++ b/app/discovery/autostop_test.go @@ -4,7 +4,6 @@ import ( "context" "errors" "github.com/acouvreur/sablier/app/discovery" - "github.com/acouvreur/sablier/app/instance" "github.com/acouvreur/sablier/app/providers" "github.com/acouvreur/sablier/app/providers/mock" "github.com/acouvreur/sablier/app/types" @@ -30,8 +29,8 @@ func TestStopAllUnregisteredInstances(t *testing.T) { }).Return(instances, nil) // Set up expectations for Stop - mockProvider.On("Stop", ctx, "instance2").Return(instance.State{}, nil) - mockProvider.On("Stop", ctx, "instance3").Return(instance.State{}, nil) + mockProvider.On("Stop", ctx, "instance2").Return(nil) + mockProvider.On("Stop", ctx, "instance3").Return(nil) // Call the function under test err := discovery.StopAllUnregisteredInstances(ctx, mockProvider, registered) @@ -62,8 +61,8 @@ func TestStopAllUnregisteredInstances_WithError(t *testing.T) { }).Return(instances, nil) // Set up expectations for Stop with error - mockProvider.On("Stop", ctx, "instance2").Return(instance.State{}, errors.New("stop error")) - mockProvider.On("Stop", ctx, "instance3").Return(instance.State{}, nil) + mockProvider.On("Stop", ctx, "instance2").Return(errors.New("stop error")) + mockProvider.On("Stop", ctx, "instance3").Return(nil) // Call the function under test err := discovery.StopAllUnregisteredInstances(ctx, mockProvider, registered) diff --git a/app/providers/docker/docker.go b/app/providers/docker/docker.go index 97979f3..1600772 100644 --- a/app/providers/docker/docker.go +++ b/app/providers/docker/docker.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "github.com/acouvreur/sablier/app/discovery" + "github.com/acouvreur/sablier/app/providers" "io" "strings" @@ -18,6 +19,9 @@ import ( log "github.com/sirupsen/logrus" ) +// Interface guard +var _ providers.Provider = (*DockerClassicProvider)(nil) + type DockerClassicProvider struct { Client client.APIClient desiredReplicas int @@ -71,39 +75,16 @@ func (provider *DockerClassicProvider) GetGroups(ctx context.Context) (map[strin return groups, nil } -func (provider *DockerClassicProvider) Start(ctx context.Context, name string) (instance.State, error) { - err := provider.Client.ContainerStart(ctx, name, container.StartOptions{}) - - if err != nil { - return instance.ErrorInstanceState(name, err, provider.desiredReplicas) - } - - return instance.State{ - Name: name, - CurrentReplicas: 0, - DesiredReplicas: provider.desiredReplicas, - Status: instance.NotReady, - }, err +func (provider *DockerClassicProvider) Start(ctx context.Context, name string) error { + return provider.Client.ContainerStart(ctx, name, container.StartOptions{}) } -func (provider *DockerClassicProvider) Stop(ctx context.Context, name string) (instance.State, error) { - err := provider.Client.ContainerStop(ctx, name, container.StopOptions{}) - - if err != nil { - return instance.ErrorInstanceState(name, err, provider.desiredReplicas) - } - - return instance.State{ - Name: name, - CurrentReplicas: 0, - DesiredReplicas: provider.desiredReplicas, - Status: instance.NotReady, - }, nil +func (provider *DockerClassicProvider) Stop(ctx context.Context, name string) error { + return provider.Client.ContainerStop(ctx, name, container.StopOptions{}) } func (provider *DockerClassicProvider) GetState(ctx context.Context, name string) (instance.State, error) { spec, err := provider.Client.ContainerInspect(ctx, name) - if err != nil { return instance.ErrorInstanceState(name, err, provider.desiredReplicas) } diff --git a/app/providers/docker/docker_test.go b/app/providers/docker/docker_test.go index 7f5afe8..0cd8b55 100644 --- a/app/providers/docker/docker_test.go +++ b/app/providers/docker/docker_test.go @@ -271,7 +271,6 @@ func TestDockerClassicProvider_Stop(t *testing.T) { name string fields fields args args - want instance.State wantErr bool err error }{ @@ -283,13 +282,6 @@ func TestDockerClassicProvider_Stop(t *testing.T) { args: args{ name: "nginx", }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.Unrecoverable, - Message: "container with name \"nginx\" was not found", - }, wantErr: true, err: fmt.Errorf("container with name \"nginx\" was not found"), }, @@ -301,12 +293,6 @@ func TestDockerClassicProvider_Stop(t *testing.T) { args: args{ name: "nginx", }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.NotReady, - }, wantErr: false, err: nil, }, @@ -320,14 +306,11 @@ func TestDockerClassicProvider_Stop(t *testing.T) { tt.fields.Client.On("ContainerStop", mock.Anything, mock.Anything, mock.Anything).Return(tt.err) - got, err := provider.Stop(context.Background(), tt.args.name) + err := provider.Stop(context.Background(), tt.args.name) if (err != nil) != tt.wantErr { t.Errorf("DockerClassicProvider.Stop() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("DockerClassicProvider.Stop() = %v, want %v", got, tt.want) - } }) } } @@ -343,7 +326,6 @@ func TestDockerClassicProvider_Start(t *testing.T) { name string fields fields args args - want instance.State wantErr bool err error }{ @@ -355,13 +337,6 @@ func TestDockerClassicProvider_Start(t *testing.T) { args: args{ name: "nginx", }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.Unrecoverable, - Message: "container with name \"nginx\" was not found", - }, wantErr: true, err: fmt.Errorf("container with name \"nginx\" was not found"), }, @@ -373,12 +348,6 @@ func TestDockerClassicProvider_Start(t *testing.T) { args: args{ name: "nginx", }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.NotReady, - }, wantErr: false, err: nil, }, @@ -392,14 +361,11 @@ func TestDockerClassicProvider_Start(t *testing.T) { tt.fields.Client.On("ContainerStart", mock.Anything, mock.Anything, mock.Anything).Return(tt.err) - got, err := provider.Start(context.Background(), tt.args.name) + err := provider.Start(context.Background(), tt.args.name) if (err != nil) != tt.wantErr { t.Errorf("DockerClassicProvider.Start() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("DockerClassicProvider.Start() = %v, want %v", got, tt.want) - } }) } } diff --git a/app/providers/dockerswarm/docker_swarm.go b/app/providers/dockerswarm/docker_swarm.go index 160c01f..9c33037 100644 --- a/app/providers/dockerswarm/docker_swarm.go +++ b/app/providers/dockerswarm/docker_swarm.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "github.com/acouvreur/sablier/app/discovery" + "github.com/acouvreur/sablier/app/providers" "io" "strings" @@ -16,6 +17,9 @@ import ( log "github.com/sirupsen/logrus" ) +// Interface guard +var _ providers.Provider = (*DockerSwarmProvider)(nil) + type DockerSwarmProvider struct { Client client.APIClient desiredReplicas int @@ -41,40 +45,37 @@ func NewDockerSwarmProvider() (*DockerSwarmProvider, error) { } -func (provider *DockerSwarmProvider) Start(ctx context.Context, name string) (instance.State, error) { +func (provider *DockerSwarmProvider) Start(ctx context.Context, name string) error { return provider.scale(ctx, name, uint64(provider.desiredReplicas)) } -func (provider *DockerSwarmProvider) Stop(ctx context.Context, name string) (instance.State, error) { +func (provider *DockerSwarmProvider) Stop(ctx context.Context, name string) error { return provider.scale(ctx, name, 0) } -func (provider *DockerSwarmProvider) scale(ctx context.Context, name string, replicas uint64) (instance.State, error) { +func (provider *DockerSwarmProvider) scale(ctx context.Context, name string, replicas uint64) error { service, err := provider.getServiceByName(name, ctx) - if err != nil { - return instance.ErrorInstanceState(name, err, provider.desiredReplicas) + return err } foundName := provider.getInstanceName(name, *service) - if service.Spec.Mode.Replicated == nil { - return instance.UnrecoverableInstanceState(foundName, "swarm service is not in \"replicated\" mode", provider.desiredReplicas) + return errors.New("swarm service is not in \"replicated\" mode") } service.Spec.Mode.Replicated.Replicas = &replicas response, err := provider.Client.ServiceUpdate(ctx, service.ID, service.Meta.Version, service.Spec, types.ServiceUpdateOptions{}) - if err != nil { - return instance.ErrorInstanceState(foundName, err, provider.desiredReplicas) + return err } if len(response.Warnings) > 0 { - return instance.UnrecoverableInstanceState(foundName, strings.Join(response.Warnings, ", "), provider.desiredReplicas) + return fmt.Errorf("warning received updating swarm service [%s]: %s", foundName, strings.Join(response.Warnings, ", ")) } - return instance.NotReadyInstanceState(foundName, 0, provider.desiredReplicas) + return nil } func (provider *DockerSwarmProvider) GetGroups(ctx context.Context) (map[string][]string, error) { diff --git a/app/providers/dockerswarm/docker_swarm_test.go b/app/providers/dockerswarm/docker_swarm_test.go index 108cd91..3447283 100644 --- a/app/providers/dockerswarm/docker_swarm_test.go +++ b/app/providers/dockerswarm/docker_swarm_test.go @@ -19,7 +19,6 @@ func TestDockerSwarmProvider_Start(t *testing.T) { tests := []struct { name string args args - want instance.State serviceList []swarm.Service response swarm.ServiceUpdateResponse wantService swarm.Service @@ -36,12 +35,6 @@ func TestDockerSwarmProvider_Start(t *testing.T) { response: swarm.ServiceUpdateResponse{ Warnings: []string{}, }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.NotReady, - }, wantService: mocks.ServiceReplicated("nginx", 1), wantErr: false, }, @@ -58,12 +51,6 @@ func TestDockerSwarmProvider_Start(t *testing.T) { response: swarm.ServiceUpdateResponse{ Warnings: []string{}, }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.NotReady, - }, wantService: mocks.ServiceReplicated("nginx", 1), wantErr: false, }, @@ -78,15 +65,8 @@ func TestDockerSwarmProvider_Start(t *testing.T) { response: swarm.ServiceUpdateResponse{ Warnings: []string{}, }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.Unrecoverable, - Message: "swarm service is not in \"replicated\" mode", - }, wantService: mocks.ServiceReplicated("nginx", 1), - wantErr: false, + wantErr: true, }, } for _, tt := range tests { @@ -100,14 +80,11 @@ func TestDockerSwarmProvider_Start(t *testing.T) { clientMock.On("ServiceList", mock.Anything, mock.Anything).Return(tt.serviceList, nil) clientMock.On("ServiceUpdate", mock.Anything, tt.wantService.ID, tt.wantService.Meta.Version, tt.wantService.Spec, mock.Anything).Return(tt.response, nil) - got, err := provider.Start(context.Background(), tt.args.name) + err := provider.Start(context.Background(), tt.args.name) if (err != nil) != tt.wantErr { t.Errorf("DockerSwarmProvider.Start() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("DockerSwarmProvider.Start() = %v, want %v", got, tt.want) - } }) } } @@ -119,7 +96,6 @@ func TestDockerSwarmProvider_Stop(t *testing.T) { tests := []struct { name string args args - want instance.State serviceList []swarm.Service response swarm.ServiceUpdateResponse wantService swarm.Service @@ -136,12 +112,6 @@ func TestDockerSwarmProvider_Stop(t *testing.T) { response: swarm.ServiceUpdateResponse{ Warnings: []string{}, }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.NotReady, - }, wantService: mocks.ServiceReplicated("nginx", 0), wantErr: false, }, @@ -158,12 +128,6 @@ func TestDockerSwarmProvider_Stop(t *testing.T) { response: swarm.ServiceUpdateResponse{ Warnings: []string{}, }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.NotReady, - }, wantService: mocks.ServiceReplicated("nginx", 0), wantErr: false, }, @@ -178,15 +142,8 @@ func TestDockerSwarmProvider_Stop(t *testing.T) { response: swarm.ServiceUpdateResponse{ Warnings: []string{}, }, - want: instance.State{ - Name: "nginx", - CurrentReplicas: 0, - DesiredReplicas: 1, - Status: instance.Unrecoverable, - Message: "swarm service is not in \"replicated\" mode", - }, wantService: mocks.ServiceReplicated("nginx", 1), - wantErr: false, + wantErr: true, }, } for _, tt := range tests { @@ -200,14 +157,11 @@ func TestDockerSwarmProvider_Stop(t *testing.T) { clientMock.On("ServiceList", mock.Anything, mock.Anything).Return(tt.serviceList, nil) clientMock.On("ServiceUpdate", mock.Anything, tt.wantService.ID, tt.wantService.Meta.Version, tt.wantService.Spec, mock.Anything).Return(tt.response, nil) - got, err := provider.Stop(context.Background(), tt.args.name) + err := provider.Stop(context.Background(), tt.args.name) if (err != nil) != tt.wantErr { t.Errorf("DockerSwarmProvider.Stop() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("DockerSwarmProvider.Stop() = %v, want %v", got, tt.want) - } }) } } diff --git a/app/providers/kubernetes/kubernetes.go b/app/providers/kubernetes/kubernetes.go index 2abb2f2..07ff1d7 100644 --- a/app/providers/kubernetes/kubernetes.go +++ b/app/providers/kubernetes/kubernetes.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "github.com/acouvreur/sablier/app/discovery" + "github.com/acouvreur/sablier/app/providers" "strconv" "strings" "time" @@ -23,6 +24,9 @@ import ( "k8s.io/client-go/tools/cache" ) +// Interface guard +var _ providers.Provider = (*KubernetesProvider)(nil) + type Config struct { OriginalName string Kind string // deployment or statefulset @@ -83,19 +87,19 @@ func NewKubernetesProvider(providerConfig providerConfig.Kubernetes) (*Kubernete } -func (provider *KubernetesProvider) Start(ctx context.Context, name string) (instance.State, error) { +func (provider *KubernetesProvider) Start(ctx context.Context, name string) error { config, err := provider.convertName(name) if err != nil { - return instance.UnrecoverableInstanceState(name, err.Error(), int(config.Replicas)) + return err } return provider.scale(ctx, config, config.Replicas) } -func (provider *KubernetesProvider) Stop(ctx context.Context, name string) (instance.State, error) { +func (provider *KubernetesProvider) Stop(ctx context.Context, name string) error { config, err := provider.convertName(name) if err != nil { - return instance.UnrecoverableInstanceState(name, err.Error(), int(config.Replicas)) + return err } return provider.scale(ctx, config, 0) @@ -147,7 +151,7 @@ func (provider *KubernetesProvider) GetGroups(ctx context.Context) (map[string][ return groups, nil } -func (provider *KubernetesProvider) scale(ctx context.Context, config *Config, replicas int32) (instance.State, error) { +func (provider *KubernetesProvider) scale(ctx context.Context, config *Config, replicas int32) error { var workload Workload switch config.Kind { @@ -156,22 +160,18 @@ func (provider *KubernetesProvider) scale(ctx context.Context, config *Config, r case "statefulset": workload = provider.Client.AppsV1().StatefulSets(config.Namespace) default: - return instance.UnrecoverableInstanceState(config.OriginalName, fmt.Sprintf("unsupported kind \"%s\" must be one of \"deployment\", \"statefulset\"", config.Kind), int(config.Replicas)) + return fmt.Errorf("unsupported kind \"%s\" must be one of \"deployment\", \"statefulset\"", config.Kind) } s, err := workload.GetScale(ctx, config.Name, metav1.GetOptions{}) if err != nil { - return instance.ErrorInstanceState(config.OriginalName, err, int(config.Replicas)) + return err } s.Spec.Replicas = replicas _, err = workload.UpdateScale(ctx, config.Name, s, metav1.UpdateOptions{}) - if err != nil { - return instance.ErrorInstanceState(config.OriginalName, err, int(config.Replicas)) - } - - return instance.NotReadyInstanceState(config.OriginalName, 0, int(config.Replicas)) + return err } func (provider *KubernetesProvider) GetState(ctx context.Context, name string) (instance.State, error) { diff --git a/app/providers/kubernetes/kubernetes_test.go b/app/providers/kubernetes/kubernetes_test.go index c21d9dd..fadf171 100644 --- a/app/providers/kubernetes/kubernetes_test.go +++ b/app/providers/kubernetes/kubernetes_test.go @@ -34,12 +34,6 @@ func TestKubernetesProvider_Start(t *testing.T) { args: args{ name: "deployment_default_nginx_2", }, - want: instance.State{ - Name: "deployment_default_nginx_2", - CurrentReplicas: 0, - DesiredReplicas: 2, - Status: instance.NotReady, - }, data: data{ name: "nginx", get: mocks.V1Scale(0), @@ -52,12 +46,6 @@ func TestKubernetesProvider_Start(t *testing.T) { args: args{ name: "statefulset_default_nginx_2", }, - want: instance.State{ - Name: "statefulset_default_nginx_2", - CurrentReplicas: 0, - DesiredReplicas: 2, - Status: instance.NotReady, - }, data: data{ name: "nginx", get: mocks.V1Scale(0), @@ -70,19 +58,12 @@ func TestKubernetesProvider_Start(t *testing.T) { args: args{ name: "gateway_default_nginx_2", }, - want: instance.State{ - Name: "gateway_default_nginx_2", - CurrentReplicas: 0, - DesiredReplicas: 2, - Status: instance.Unrecoverable, - Message: "unsupported kind \"gateway\" must be one of \"deployment\", \"statefulset\"", - }, data: data{ name: "nginx", get: mocks.V1Scale(0), update: mocks.V1Scale(0), }, - wantErr: false, + wantErr: true, }, } for _, tt := range tests { @@ -100,14 +81,11 @@ func TestKubernetesProvider_Start(t *testing.T) { statefulsetAPI.On("GetScale", mock.Anything, tt.data.name, metav1.GetOptions{}).Return(tt.data.get, nil) statefulsetAPI.On("UpdateScale", mock.Anything, tt.data.name, tt.data.update, metav1.UpdateOptions{}).Return(nil, nil) - got, err := provider.Start(context.Background(), tt.args.name) + err := provider.Start(context.Background(), tt.args.name) if (err != nil) != tt.wantErr { t.Errorf("KubernetesProvider.Start() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("KubernetesProvider.Start() = %v, want %v", got, tt.want) - } }) } } @@ -133,12 +111,6 @@ func TestKubernetesProvider_Stop(t *testing.T) { args: args{ name: "deployment_default_nginx_2", }, - want: instance.State{ - Name: "deployment_default_nginx_2", - CurrentReplicas: 0, - DesiredReplicas: 2, - Status: instance.NotReady, - }, data: data{ name: "nginx", get: mocks.V1Scale(2), @@ -151,12 +123,6 @@ func TestKubernetesProvider_Stop(t *testing.T) { args: args{ name: "statefulset_default_nginx_2", }, - want: instance.State{ - Name: "statefulset_default_nginx_2", - CurrentReplicas: 0, - DesiredReplicas: 2, - Status: instance.NotReady, - }, data: data{ name: "nginx", get: mocks.V1Scale(2), @@ -169,19 +135,12 @@ func TestKubernetesProvider_Stop(t *testing.T) { args: args{ name: "gateway_default_nginx_2", }, - want: instance.State{ - Name: "gateway_default_nginx_2", - CurrentReplicas: 0, - DesiredReplicas: 2, - Status: instance.Unrecoverable, - Message: "unsupported kind \"gateway\" must be one of \"deployment\", \"statefulset\"", - }, data: data{ name: "nginx", get: mocks.V1Scale(0), update: mocks.V1Scale(0), }, - wantErr: false, + wantErr: true, }, } for _, tt := range tests { @@ -199,14 +158,11 @@ func TestKubernetesProvider_Stop(t *testing.T) { statefulsetAPI.On("GetScale", mock.Anything, tt.data.name, metav1.GetOptions{}).Return(tt.data.get, nil) statefulsetAPI.On("UpdateScale", mock.Anything, tt.data.name, tt.data.update, metav1.UpdateOptions{}).Return(nil, nil) - got, err := provider.Stop(context.Background(), tt.args.name) + err := provider.Stop(context.Background(), tt.args.name) if (err != nil) != tt.wantErr { t.Errorf("KubernetesProvider.Stop() error = %v, wantErr %v", err, tt.wantErr) return } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("KubernetesProvider.Stop() = %v, want %v", got, tt.want) - } }) } } diff --git a/app/providers/mock/mock.go b/app/providers/mock/mock.go index 4d8bd23..8207ef2 100644 --- a/app/providers/mock/mock.go +++ b/app/providers/mock/mock.go @@ -8,18 +8,21 @@ import ( "github.com/stretchr/testify/mock" ) +// Interface guard +var _ providers.Provider = (*ProviderMock)(nil) + // ProviderMock is a structure that allows to define the behavior of a Provider type ProviderMock struct { mock.Mock } -func (m *ProviderMock) Start(ctx context.Context, name string) (instance.State, error) { +func (m *ProviderMock) Start(ctx context.Context, name string) error { args := m.Called(ctx, name) - return args.Get(0).(instance.State), args.Error(1) + return args.Error(0) } -func (m *ProviderMock) Stop(ctx context.Context, name string) (instance.State, error) { +func (m *ProviderMock) Stop(ctx context.Context, name string) error { args := m.Called(ctx, name) - return args.Get(0).(instance.State), args.Error(1) + return args.Error(0) } func (m *ProviderMock) GetState(ctx context.Context, name string) (instance.State, error) { args := m.Called(ctx, name) diff --git a/app/providers/provider.go b/app/providers/provider.go index 89ac3d0..6c1ef07 100644 --- a/app/providers/provider.go +++ b/app/providers/provider.go @@ -8,8 +8,8 @@ import ( ) type Provider interface { - Start(ctx context.Context, name string) (instance.State, error) - Stop(ctx context.Context, name string) (instance.State, error) + Start(ctx context.Context, name string) error + Stop(ctx context.Context, name string) error GetState(ctx context.Context, name string) (instance.State, error) GetGroups(ctx context.Context) (map[string][]string, error) InstanceList(ctx context.Context, options InstanceListOptions) ([]types.Instance, error) diff --git a/app/sablier.go b/app/sablier.go index 08f0518..8207aa7 100644 --- a/app/sablier.go +++ b/app/sablier.go @@ -89,7 +89,7 @@ func onSessionExpires(provider providers.Provider) func(key string, instance ins return func(_key string, _instance instance.State) { go func(key string, instance instance.State) { log.Debugf("stopping %s...", key) - _, err := provider.Stop(context.Background(), key) + err := provider.Stop(context.Background(), key) if err != nil { log.Warnf("error stopping %s: %s", key, err.Error()) diff --git a/app/sessions/sessions_manager.go b/app/sessions/sessions_manager.go index cd26b6c..1b62f53 100644 --- a/app/sessions/sessions_manager.go +++ b/app/sessions/sessions_manager.go @@ -184,17 +184,22 @@ func (s *SessionsManager) requestSessionInstance(name string, duration time.Dura if !exists { log.Debugf("starting %s...", name) - state, err := s.provider.Start(s.ctx, name) - + err := s.provider.Start(s.ctx, name) if err != nil { - log.Errorf("an error occurred starting %s: %s", name, err.Error()) + errState, _ := instance.ErrorInstanceState(name, err, 1) + requestState.Name = errState.Name + requestState.CurrentReplicas = errState.CurrentReplicas + requestState.DesiredReplicas = errState.DesiredReplicas + requestState.Status = errState.Status + requestState.Message = errState.Message + } else { + state, _ := s.provider.GetState(s.ctx, name) + requestState.CurrentReplicas = state.CurrentReplicas + requestState.DesiredReplicas = state.DesiredReplicas + requestState.Status = state.Status + requestState.Message = state.Message } - requestState.Name = state.Name - requestState.CurrentReplicas = state.CurrentReplicas - requestState.DesiredReplicas = state.DesiredReplicas - requestState.Status = state.Status - requestState.Message = state.Message log.Debugf("status for %s=%s", name, requestState.Status) } else if requestState.Status != instance.Ready { log.Debugf("checking %s...", name) diff --git a/docker-compose.yml b/docker-compose.yml index c96903d..e85dca7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -38,7 +38,7 @@ services: - traefik.http.middlewares.group.plugin.sablier.blocking.timeout=30s whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 # Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik # The route doesn't exist anymore. Use dynamic-config.yml file instead. # labels: diff --git a/docs/getting-started.md b/docs/getting-started.md index f8ffae2..252d301 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -43,7 +43,7 @@ services: - ./Caddyfile:/etc/caddy/Caddyfile:ro whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 ``` #### **Caddyfile** @@ -75,7 +75,7 @@ services: - ./Caddyfile:/etc/caddy/Caddyfile:ro whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 sablier: image: acouvreur/sablier:1.8.0-beta.8 @@ -110,7 +110,7 @@ services: - ./Caddyfile:/etc/caddy/Caddyfile:ro whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 sablier: image: acouvreur/sablier:1.8.0-beta.8 @@ -139,7 +139,7 @@ services: - ./Caddyfile:/etc/caddy/Caddyfile:ro whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 labels: - sablier.enable=true - sablier.group=demo diff --git a/docs/plugins/traefik.md b/docs/plugins/traefik.md index 7f0557c..55df2d8 100644 --- a/docs/plugins/traefik.md +++ b/docs/plugins/traefik.md @@ -26,7 +26,7 @@ You have to use the dynamic config file provider instead. ```yaml whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 labels: - traefik.enable - traefik.http.routers.whoami.rule=PathPrefix(`/whoami`) @@ -66,7 +66,7 @@ See also [`traefik.docker.lbswarm`](https://doc.traefik.io/traefik/routing/provi ```yaml services: whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 deploy: replicas: 0 labels: diff --git a/docs/providers/docker.md b/docs/providers/docker.md index 35cadc3..fe37cde 100644 --- a/docs/providers/docker.md +++ b/docs/providers/docker.md @@ -51,7 +51,7 @@ You have to register your containers by opting-in with labels. ```yaml services: whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 labels: - sablier.enable=true - sablier.group=mygroup diff --git a/docs/providers/docker_swarm.md b/docs/providers/docker_swarm.md index 7494c93..f568bbf 100644 --- a/docs/providers/docker_swarm.md +++ b/docs/providers/docker_swarm.md @@ -52,7 +52,7 @@ You have to register your services by opting-in with labels. ```yaml services: whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 deploy: labels: - sablier.enable=true diff --git a/docs/providers/kubernetes.md b/docs/providers/kubernetes.md index ac6b860..3de3f83 100644 --- a/docs/providers/kubernetes.md +++ b/docs/providers/kubernetes.md @@ -88,7 +88,7 @@ spec: spec: containers: - name: whoami - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 ``` ## How does Sablier knows when a deployment is ready? diff --git a/plugins/caddy/e2e/docker/docker-compose.yml b/plugins/caddy/e2e/docker/docker-compose.yml index 6871a32..d4f359e 100644 --- a/plugins/caddy/e2e/docker/docker-compose.yml +++ b/plugins/caddy/e2e/docker/docker-compose.yml @@ -19,7 +19,10 @@ services: - '/var/run/docker.sock:/var/run/docker.sock' whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost" ] + interval: 5s labels: - sablier.enable=true - sablier.group=E2E diff --git a/plugins/caddy/e2e/docker_swarm/docker-stack.yml b/plugins/caddy/e2e/docker_swarm/docker-stack.yml index 8919b97..fb5a9e8 100644 --- a/plugins/caddy/e2e/docker_swarm/docker-stack.yml +++ b/plugins/caddy/e2e/docker_swarm/docker-stack.yml @@ -24,7 +24,10 @@ services: - '/var/run/docker.sock:/var/run/docker.sock' whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost" ] + interval: 5s deploy: labels: - sablier.enable=true diff --git a/plugins/nginx/e2e/docker/docker-compose.yml b/plugins/nginx/e2e/docker/docker-compose.yml index 99d6238..1684496 100644 --- a/plugins/nginx/e2e/docker/docker-compose.yml +++ b/plugins/nginx/e2e/docker/docker-compose.yml @@ -23,7 +23,10 @@ services: - '/var/run/docker.sock:/var/run/docker.sock' whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost" ] + interval: 5s labels: - sablier.enable=true - sablier.group=E2E diff --git a/plugins/nginx/e2e/docker_swarm/docker-stack.yml b/plugins/nginx/e2e/docker_swarm/docker-stack.yml index 6003d8e..8ff800f 100644 --- a/plugins/nginx/e2e/docker_swarm/docker-stack.yml +++ b/plugins/nginx/e2e/docker_swarm/docker-stack.yml @@ -26,7 +26,10 @@ services: - '/var/run/docker.sock:/var/run/docker.sock' whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost" ] + interval: 5s deploy: labels: - sablier.enable=true diff --git a/plugins/nginx/e2e/kubernetes/manifests/deployment.yml b/plugins/nginx/e2e/kubernetes/manifests/deployment.yml index b7bd13a..ee3c723 100644 --- a/plugins/nginx/e2e/kubernetes/manifests/deployment.yml +++ b/plugins/nginx/e2e/kubernetes/manifests/deployment.yml @@ -18,7 +18,7 @@ spec: spec: containers: - name: whoami - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 --- apiVersion: v1 kind: Service diff --git a/plugins/proxywasm/e2e/apacheapisix/docker/compose.yaml b/plugins/proxywasm/e2e/apacheapisix/docker/compose.yaml index bd7cd8d..187193e 100644 --- a/plugins/proxywasm/e2e/apacheapisix/docker/compose.yaml +++ b/plugins/proxywasm/e2e/apacheapisix/docker/compose.yaml @@ -19,7 +19,10 @@ services: - '/var/run/docker.sock:/var/run/docker.sock' whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost" ] + interval: 5s labels: - sablier.enable=true - sablier.group=E2E diff --git a/plugins/proxywasm/e2e/envoy/docker/compose.yaml b/plugins/proxywasm/e2e/envoy/docker/compose.yaml index 7a42bc8..e67d96b 100644 --- a/plugins/proxywasm/e2e/envoy/docker/compose.yaml +++ b/plugins/proxywasm/e2e/envoy/docker/compose.yaml @@ -18,7 +18,10 @@ services: - '/var/run/docker.sock:/var/run/docker.sock' whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost" ] + interval: 5s labels: - sablier.enable=true - sablier.group=E2E diff --git a/plugins/proxywasm/e2e/istio/kubernetes/manifests/whoami.yml b/plugins/proxywasm/e2e/istio/kubernetes/manifests/whoami.yml index 18d42cc..4a7e9d3 100644 --- a/plugins/proxywasm/e2e/istio/kubernetes/manifests/whoami.yml +++ b/plugins/proxywasm/e2e/istio/kubernetes/manifests/whoami.yml @@ -16,7 +16,7 @@ spec: spec: containers: - name: whoami - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 --- apiVersion: v1 kind: Service diff --git a/plugins/proxywasm/e2e/nginx/docker/compose.yaml b/plugins/proxywasm/e2e/nginx/docker/compose.yaml index 20abed6..a5f7098 100644 --- a/plugins/proxywasm/e2e/nginx/docker/compose.yaml +++ b/plugins/proxywasm/e2e/nginx/docker/compose.yaml @@ -19,7 +19,10 @@ services: - '/var/run/docker.sock:/var/run/docker.sock' whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost" ] + interval: 5s labels: - sablier.enable=true - sablier.group=E2E diff --git a/plugins/traefik/README.md b/plugins/traefik/README.md index c33e5de..772b541 100644 --- a/plugins/traefik/README.md +++ b/plugins/traefik/README.md @@ -94,7 +94,7 @@ services: - traefik.http.middlewares.dynamic.plugin.sablier.dynamic.sessionDuration=1m whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 # Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik # The route doesn't exist anymore. Use dynamic-config.yml file instead. # labels: @@ -130,7 +130,7 @@ http: ```yaml services: whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 deploy: replicas: 0 labels: diff --git a/plugins/traefik/e2e/docker/docker-compose.yml b/plugins/traefik/e2e/docker/docker-compose.yml index 2875c3a..28cca37 100644 --- a/plugins/traefik/e2e/docker/docker-compose.yml +++ b/plugins/traefik/e2e/docker/docker-compose.yml @@ -54,9 +54,12 @@ services: - traefik.http.middlewares.group.plugin.sablier.dynamic.displayName=Group E2E whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 # Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik # The route doesn't exist anymore. Use dynamic-config.yml file instead. + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost" ] + interval: 5s labels: - sablier.enable=true - sablier.group=E2E diff --git a/plugins/traefik/e2e/docker_swarm/docker-stack.yml b/plugins/traefik/e2e/docker_swarm/docker-stack.yml index 7399327..efe0512 100644 --- a/plugins/traefik/e2e/docker_swarm/docker-stack.yml +++ b/plugins/traefik/e2e/docker_swarm/docker-stack.yml @@ -62,7 +62,10 @@ services: - traefik.http.services.sablier.loadbalancer.server.port=10000 whoami: - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost" ] + interval: 5s deploy: replicas: 0 labels: diff --git a/plugins/traefik/e2e/kubernetes/manifests/deployment.yml b/plugins/traefik/e2e/kubernetes/manifests/deployment.yml index a237537..b655aad 100644 --- a/plugins/traefik/e2e/kubernetes/manifests/deployment.yml +++ b/plugins/traefik/e2e/kubernetes/manifests/deployment.yml @@ -18,7 +18,13 @@ spec: spec: containers: - name: whoami - image: containous/whoami:v1.5.0 + image: acouvreur/whoami:v1.10.2 + livenessProbe: + httpGet: + path: /health + port: 80 + initialDelaySeconds: 5 + periodSeconds: 5 --- apiVersion: v1 kind: Service @@ -170,6 +176,14 @@ spec: containers: - name: nginx image: nginx:1.23.1 + livenessProbe: + exec: + command: + - curl + - -f + - http://localhost + initialDelaySeconds: 5 + periodSeconds: 5 --- apiVersion: v1 kind: Service