mirror of
https://github.com/sablierapp/sablier.git
synced 2025-12-21 13:23:03 +01:00
test(docker): use testcontainers (#519)
* refactor: move app/theme to pkg/theme * refactor * wip * use dockerindocker * add providertest * wip * wip * test(docker): get state now uses dind container to test against a real provider * test(docker): move to docker_test package * refactor(docker): create container_inspect.go * test(docker): add more dind test * test(docker): event test now use docker in docker * refactor: remove unused instance type props * refactor test docker * fix instance list sort * stabilize test * remove testcontainers custom config
This commit is contained in:
4
Makefile
4
Makefile
@@ -20,8 +20,8 @@ $(PLATFORMS):
|
||||
run:
|
||||
go run main.go start --storage.file=state.json --logging.level=debug
|
||||
|
||||
generate:
|
||||
go generate ./..
|
||||
gen:
|
||||
go generate -v ./...
|
||||
|
||||
build:
|
||||
go build -v .
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
package discovery
|
||||
|
||||
const (
|
||||
LabelEnable = "sablier.enable"
|
||||
LabelGroup = "sablier.group"
|
||||
LabelGroupDefaultValue = "default"
|
||||
LabelReplicas = "sablier.replicas"
|
||||
LabelReplicasDefaultValue uint64 = 1
|
||||
LabelEnable = "sablier.enable"
|
||||
LabelGroup = "sablier.group"
|
||||
LabelGroupDefaultValue = "default"
|
||||
)
|
||||
|
||||
type Group struct {
|
||||
|
||||
@@ -2,8 +2,8 @@ package routes
|
||||
|
||||
import (
|
||||
"github.com/sablierapp/sablier/app/sessions"
|
||||
"github.com/sablierapp/sablier/app/theme"
|
||||
"github.com/sablierapp/sablier/config"
|
||||
"github.com/sablierapp/sablier/pkg/theme"
|
||||
)
|
||||
|
||||
type ServeStrategy struct {
|
||||
|
||||
@@ -3,6 +3,7 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/sablierapp/sablier/app/discovery"
|
||||
"github.com/sablierapp/sablier/app/http/routes"
|
||||
"github.com/sablierapp/sablier/pkg/provider"
|
||||
@@ -10,6 +11,7 @@ import (
|
||||
"github.com/sablierapp/sablier/pkg/provider/dockerswarm"
|
||||
"github.com/sablierapp/sablier/pkg/provider/kubernetes"
|
||||
"github.com/sablierapp/sablier/pkg/store/inmemory"
|
||||
"github.com/sablierapp/sablier/pkg/theme"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/signal"
|
||||
@@ -18,7 +20,6 @@ import (
|
||||
|
||||
"github.com/sablierapp/sablier/app/sessions"
|
||||
"github.com/sablierapp/sablier/app/storage"
|
||||
"github.com/sablierapp/sablier/app/theme"
|
||||
"github.com/sablierapp/sablier/config"
|
||||
"github.com/sablierapp/sablier/internal/server"
|
||||
"github.com/sablierapp/sablier/version"
|
||||
@@ -174,7 +175,11 @@ func NewProvider(ctx context.Context, logger *slog.Logger, config config.Provide
|
||||
case "swarm", "docker_swarm":
|
||||
return dockerswarm.NewDockerSwarmProvider(ctx, logger)
|
||||
case "docker":
|
||||
return docker.NewDockerClassicProvider(ctx, logger)
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create docker client: %v", err)
|
||||
}
|
||||
return docker.NewDockerClassicProvider(ctx, cli, logger)
|
||||
case "kubernetes":
|
||||
return kubernetes.NewKubernetesProvider(ctx, logger, config.Kubernetes)
|
||||
}
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
package types
|
||||
|
||||
type Instance struct {
|
||||
Name string
|
||||
Kind string
|
||||
Status string
|
||||
Replicas uint64
|
||||
DesiredReplicas uint64
|
||||
ScalingReplicas uint64
|
||||
Group string
|
||||
Name string
|
||||
Group string
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"github.com/neilotoole/slogt"
|
||||
"github.com/sablierapp/sablier/app/http/routes"
|
||||
"github.com/sablierapp/sablier/app/sessions/sessionstest"
|
||||
"github.com/sablierapp/sablier/app/theme"
|
||||
"github.com/sablierapp/sablier/config"
|
||||
"github.com/sablierapp/sablier/pkg/theme"
|
||||
"go.uber.org/mock/gomock"
|
||||
"gotest.tools/v3/assert"
|
||||
"net/http"
|
||||
|
||||
@@ -2,7 +2,7 @@ package api
|
||||
|
||||
import (
|
||||
"github.com/sablierapp/sablier/app/sessions"
|
||||
"github.com/sablierapp/sablier/app/theme"
|
||||
"github.com/sablierapp/sablier/pkg/theme"
|
||||
"github.com/tniswong/go.rfcx/rfc7807"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/sablierapp/sablier/app/http/routes/models"
|
||||
"github.com/sablierapp/sablier/app/instance"
|
||||
"github.com/sablierapp/sablier/app/sessions"
|
||||
"github.com/sablierapp/sablier/app/theme"
|
||||
theme2 "github.com/sablierapp/sablier/pkg/theme"
|
||||
)
|
||||
|
||||
func StartDynamic(router *gin.RouterGroup, s *routes.ServeStrategy) {
|
||||
@@ -65,7 +65,7 @@ func StartDynamic(router *gin.RouterGroup, s *routes.ServeStrategy) {
|
||||
|
||||
AddSablierHeader(c, sessionState)
|
||||
|
||||
renderOptions := theme.Options{
|
||||
renderOptions := theme2.Options{
|
||||
DisplayName: request.DisplayName,
|
||||
ShowDetails: request.ShowDetails,
|
||||
SessionDuration: request.SessionDuration,
|
||||
@@ -76,7 +76,7 @@ func StartDynamic(router *gin.RouterGroup, s *routes.ServeStrategy) {
|
||||
buf := new(bytes.Buffer)
|
||||
writer := bufio.NewWriter(buf)
|
||||
err = s.Theme.Render(request.Theme, renderOptions, writer)
|
||||
var themeNotFound theme.ErrThemeNotFound
|
||||
var themeNotFound theme2.ErrThemeNotFound
|
||||
if errors.As(err, &themeNotFound) {
|
||||
AbortWithProblemDetail(c, ProblemThemeNotFound(themeNotFound))
|
||||
return
|
||||
@@ -90,7 +90,7 @@ func StartDynamic(router *gin.RouterGroup, s *routes.ServeStrategy) {
|
||||
})
|
||||
}
|
||||
|
||||
func sessionStateToRenderOptionsInstanceState(sessionState *sessions.SessionState) (instances []theme.Instance) {
|
||||
func sessionStateToRenderOptionsInstanceState(sessionState *sessions.SessionState) (instances []theme2.Instance) {
|
||||
if sessionState == nil {
|
||||
return
|
||||
}
|
||||
@@ -106,7 +106,7 @@ func sessionStateToRenderOptionsInstanceState(sessionState *sessions.SessionStat
|
||||
return
|
||||
}
|
||||
|
||||
func instanceStateToRenderOptionsRequestState(instanceState instance.State) theme.Instance {
|
||||
func instanceStateToRenderOptionsRequestState(instanceState instance.State) theme2.Instance {
|
||||
|
||||
var err error
|
||||
if instanceState.Message == "" {
|
||||
@@ -115,7 +115,7 @@ func instanceStateToRenderOptionsRequestState(instanceState instance.State) them
|
||||
err = errors.New(instanceState.Message)
|
||||
}
|
||||
|
||||
return theme.Instance{
|
||||
return theme2.Instance{
|
||||
Name: instanceState.Name,
|
||||
Status: instanceState.Status,
|
||||
CurrentReplicas: instanceState.CurrentReplicas,
|
||||
|
||||
43
pkg/provider/docker/container_inspect.go
Normal file
43
pkg/provider/docker/container_inspect.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/sablierapp/sablier/app/instance"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
func (p *DockerClassicProvider) GetState(ctx context.Context, name string) (instance.State, error) {
|
||||
spec, err := p.Client.ContainerInspect(ctx, name)
|
||||
if err != nil {
|
||||
return instance.State{}, fmt.Errorf("cannot inspect container: %w", err)
|
||||
}
|
||||
|
||||
// "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
switch spec.State.Status {
|
||||
case "created", "paused", "restarting", "removing":
|
||||
return instance.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
|
||||
case "running":
|
||||
if spec.State.Health != nil {
|
||||
// // "starting", "healthy" or "unhealthy"
|
||||
if spec.State.Health.Status == "healthy" {
|
||||
return instance.ReadyInstanceState(name, p.desiredReplicas), nil
|
||||
} else if spec.State.Health.Status == "unhealthy" {
|
||||
return instance.UnrecoverableInstanceState(name, "container is unhealthy", p.desiredReplicas), nil
|
||||
} else {
|
||||
return instance.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
|
||||
}
|
||||
}
|
||||
p.l.WarnContext(ctx, "container running without healthcheck, you should define a healthcheck on your container so that Sablier properly detects when the container is ready to handle requests.", slog.String("container", name))
|
||||
return instance.ReadyInstanceState(name, p.desiredReplicas), nil
|
||||
case "exited":
|
||||
if spec.State.ExitCode != 0 {
|
||||
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container exited with code \"%d\"", spec.State.ExitCode), p.desiredReplicas), nil
|
||||
}
|
||||
return instance.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
|
||||
case "dead":
|
||||
return instance.UnrecoverableInstanceState(name, "container in \"dead\" state cannot be restarted", p.desiredReplicas), nil
|
||||
default:
|
||||
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container status \"%s\" not handled", spec.State.Status), p.desiredReplicas), nil
|
||||
}
|
||||
}
|
||||
280
pkg/provider/docker/container_inspect_test.go
Normal file
280
pkg/provider/docker/container_inspect_test.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/neilotoole/slogt"
|
||||
"github.com/sablierapp/sablier/app/instance"
|
||||
"github.com/sablierapp/sablier/pkg/provider/docker"
|
||||
"gotest.tools/v3/assert"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestDockerClassicProvider_GetState(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
type args struct {
|
||||
do func(dind *dindContainer) (string, error)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want instance.State
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "created container",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
resp, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Cmd: []string{"/mimic"},
|
||||
Healthcheck: nil,
|
||||
})
|
||||
return resp.ID, err
|
||||
},
|
||||
},
|
||||
want: instance.State{
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "running container without healthcheck",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Cmd: []string{"/mimic"},
|
||||
Healthcheck: nil,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return c.ID, dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
|
||||
},
|
||||
},
|
||||
want: instance.State{
|
||||
CurrentReplicas: 1,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Ready,
|
||||
},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "running container with \"starting\" health",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Cmd: []string{"/mimic", "-running", "-running-after", "1s", "-healthy", "true"},
|
||||
// Keep long interval so that the container is still in starting state
|
||||
Healthcheck: &container.HealthConfig{
|
||||
Test: []string{"CMD", "/mimic", "healthcheck"},
|
||||
Interval: time.Second,
|
||||
Timeout: 10 * time.Second,
|
||||
StartPeriod: 10 * time.Second,
|
||||
StartInterval: 10 * time.Second,
|
||||
Retries: 10,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return c.ID, dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
|
||||
},
|
||||
},
|
||||
want: instance.State{
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "running container with \"unhealthy\" health",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Cmd: []string{"/mimic", "-running", "-running-after=1ms", "-healthy=false", "-healthy-after=1ms"},
|
||||
Healthcheck: &container.HealthConfig{
|
||||
Test: []string{"CMD", "/mimic", "healthcheck"},
|
||||
Timeout: time.Second,
|
||||
Interval: time.Millisecond,
|
||||
StartInterval: time.Millisecond,
|
||||
StartPeriod: time.Millisecond,
|
||||
Retries: 1,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
<-time.After(2 * time.Second)
|
||||
|
||||
return c.ID, nil
|
||||
},
|
||||
},
|
||||
want: instance.State{
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "container is unhealthy",
|
||||
},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "running container with \"healthy\" health",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Cmd: []string{"/mimic", "-running", "-running-after=1ms", "-healthy", "-healthy-after=1ms"},
|
||||
Healthcheck: &container.HealthConfig{
|
||||
Test: []string{"CMD", "/mimic", "healthcheck"},
|
||||
Interval: 100 * time.Millisecond,
|
||||
Timeout: time.Second,
|
||||
StartPeriod: time.Second,
|
||||
StartInterval: 100 * time.Millisecond,
|
||||
Retries: 10,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
<-time.After(2 * time.Second)
|
||||
|
||||
return c.ID, nil
|
||||
},
|
||||
},
|
||||
want: instance.State{
|
||||
CurrentReplicas: 1,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Ready,
|
||||
},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "paused container",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = dind.client.ContainerPause(ctx, c.ID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return c.ID, nil
|
||||
},
|
||||
},
|
||||
want: instance.State{
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "exited container with status code 0",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Cmd: []string{"/mimic", "-running=false", "-exit-code=0"},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
waitC, _ := dind.client.ContainerWait(ctx, c.ID, container.WaitConditionNotRunning)
|
||||
<-waitC
|
||||
|
||||
return c.ID, nil
|
||||
},
|
||||
},
|
||||
want: instance.State{
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: nil,
|
||||
},
|
||||
{
|
||||
name: "nginx exited container state with status code 137",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Cmd: []string{"/mimic", "-running=false", "-exit-code=137"},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
waitC, _ := dind.client.ContainerWait(ctx, c.ID, container.WaitConditionNotRunning)
|
||||
<-waitC
|
||||
|
||||
return c.ID, nil
|
||||
},
|
||||
},
|
||||
want: instance.State{
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "container exited with code \"137\"",
|
||||
},
|
||||
wantErr: nil,
|
||||
},
|
||||
}
|
||||
c := setupDinD(t, ctx)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
p, err := docker.NewDockerClassicProvider(ctx, c.client, slogt.New(t))
|
||||
|
||||
name, err := tt.args.do(c)
|
||||
assert.NilError(t, err)
|
||||
|
||||
tt.want.Name = name
|
||||
got, err := p.GetState(ctx, name)
|
||||
if !cmp.Equal(err, tt.wantErr) {
|
||||
t.Errorf("DockerClassicProvider.GetState() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
assert.DeepEqual(t, got, tt.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -14,16 +14,12 @@ import (
|
||||
|
||||
func (p *DockerClassicProvider) InstanceList(ctx context.Context, options provider.InstanceListOptions) ([]types.Instance, error) {
|
||||
args := filters.NewArgs()
|
||||
for _, label := range options.Labels {
|
||||
args.Add("label", label)
|
||||
args.Add("label", fmt.Sprintf("%s=true", label))
|
||||
}
|
||||
args.Add("label", fmt.Sprintf("%s=true", discovery.LabelEnable))
|
||||
|
||||
containers, err := p.Client.ContainerList(ctx, container.ListOptions{
|
||||
All: options.All,
|
||||
Filters: args,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -49,12 +45,34 @@ func containerToInstance(c dockertypes.Container) types.Instance {
|
||||
}
|
||||
|
||||
return types.Instance{
|
||||
Name: strings.TrimPrefix(c.Names[0], "/"), // Containers name are reported with a leading slash
|
||||
Kind: "container",
|
||||
Status: c.Status,
|
||||
// Replicas: c.Status,
|
||||
// DesiredReplicas: 1,
|
||||
ScalingReplicas: 1,
|
||||
Group: group,
|
||||
Name: strings.TrimPrefix(c.Names[0], "/"), // Containers name are reported with a leading slash
|
||||
Group: group,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DockerClassicProvider) GetGroups(ctx context.Context) (map[string][]string, error) {
|
||||
args := filters.NewArgs()
|
||||
args.Add("label", fmt.Sprintf("%s=true", discovery.LabelEnable))
|
||||
|
||||
containers, err := p.Client.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
Filters: args,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groups := make(map[string][]string)
|
||||
for _, c := range containers {
|
||||
groupName := c.Labels[discovery.LabelGroup]
|
||||
if len(groupName) == 0 {
|
||||
groupName = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
group := groups[groupName]
|
||||
group = append(group, strings.TrimPrefix(c.Names[0], "/"))
|
||||
groups[groupName] = group
|
||||
}
|
||||
|
||||
return groups, nil
|
||||
}
|
||||
115
pkg/provider/docker/container_list_test.go
Normal file
115
pkg/provider/docker/container_list_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"github.com/neilotoole/slogt"
|
||||
"github.com/sablierapp/sablier/app/types"
|
||||
"github.com/sablierapp/sablier/pkg/provider"
|
||||
"github.com/sablierapp/sablier/pkg/provider/docker"
|
||||
"gotest.tools/v3/assert"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDockerClassicProvider_InstanceList(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
dind := setupDinD(t, ctx)
|
||||
p, err := docker.NewDockerClassicProvider(ctx, dind.client, slogt.New(t))
|
||||
assert.NilError(t, err)
|
||||
|
||||
c1, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Labels: map[string]string{
|
||||
"sablier.enable": "true",
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
i1, err := dind.client.ContainerInspect(ctx, c1.ID)
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.NilError(t, err)
|
||||
|
||||
c2, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Labels: map[string]string{
|
||||
"sablier.enable": "true",
|
||||
"sablier.group": "my-group",
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
i2, err := dind.client.ContainerInspect(ctx, c2.ID)
|
||||
assert.NilError(t, err)
|
||||
|
||||
got, err := p.InstanceList(ctx, provider.InstanceListOptions{
|
||||
All: true,
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
want := []types.Instance{
|
||||
{
|
||||
Name: strings.TrimPrefix(i1.Name, "/"),
|
||||
Group: "default",
|
||||
},
|
||||
{
|
||||
Name: strings.TrimPrefix(i2.Name, "/"),
|
||||
Group: "my-group",
|
||||
},
|
||||
}
|
||||
// Assert go is equal to want
|
||||
// Sort both array to ensure they are equal
|
||||
sort.Slice(got, func(i, j int) bool {
|
||||
return strings.Compare(got[i].Name, got[j].Name) < 0
|
||||
})
|
||||
sort.Slice(want, func(i, j int) bool {
|
||||
return strings.Compare(want[i].Name, want[j].Name) < 0
|
||||
})
|
||||
assert.DeepEqual(t, got, want)
|
||||
}
|
||||
|
||||
func TestDockerClassicProvider_GetGroups(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
ctx := t.Context()
|
||||
dind := setupDinD(t, ctx)
|
||||
p, err := docker.NewDockerClassicProvider(ctx, dind.client, slogt.New(t))
|
||||
assert.NilError(t, err)
|
||||
|
||||
c1, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Labels: map[string]string{
|
||||
"sablier.enable": "true",
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
i1, err := dind.client.ContainerInspect(ctx, c1.ID)
|
||||
assert.NilError(t, err)
|
||||
|
||||
assert.NilError(t, err)
|
||||
|
||||
c2, err := dind.CreateMimic(ctx, MimicOptions{
|
||||
Labels: map[string]string{
|
||||
"sablier.enable": "true",
|
||||
"sablier.group": "my-group",
|
||||
},
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
|
||||
i2, err := dind.client.ContainerInspect(ctx, c2.ID)
|
||||
assert.NilError(t, err)
|
||||
|
||||
got, err := p.GetGroups(ctx)
|
||||
assert.NilError(t, err)
|
||||
|
||||
want := map[string][]string{
|
||||
"default": {strings.TrimPrefix(i1.Name, "/")},
|
||||
"my-group": {strings.TrimPrefix(i2.Name, "/")},
|
||||
}
|
||||
|
||||
assert.DeepEqual(t, got, want)
|
||||
}
|
||||
16
pkg/provider/docker/container_start.go
Normal file
16
pkg/provider/docker/container_start.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
)
|
||||
|
||||
func (p *DockerClassicProvider) Start(ctx context.Context, name string) error {
|
||||
// TODO: Start should block until the container is ready.
|
||||
err := p.Client.ContainerStart(ctx, name, container.StartOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot start container %s: %w", name, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
64
pkg/provider/docker/container_start_test.go
Normal file
64
pkg/provider/docker/container_start_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/neilotoole/slogt"
|
||||
"github.com/sablierapp/sablier/pkg/provider/docker"
|
||||
"gotest.tools/v3/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDockerClassicProvider_Start(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
type args struct {
|
||||
do func(dind *dindContainer) (string, error)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "non existing container start",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
return "non-existent", nil
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("cannot start container non-existent: Error response from daemon: No such container: non-existent"),
|
||||
},
|
||||
{
|
||||
name: "container start as expected",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{})
|
||||
return c.ID, err
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
c := setupDinD(t, ctx)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
p, err := docker.NewDockerClassicProvider(ctx, c.client, slogt.New(t))
|
||||
assert.NilError(t, err)
|
||||
|
||||
name, err := tt.args.do(c)
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = p.Start(t.Context(), name)
|
||||
if tt.err != nil {
|
||||
assert.Error(t, err, tt.err.Error())
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
31
pkg/provider/docker/container_stop.go
Normal file
31
pkg/provider/docker/container_stop.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
func (p *DockerClassicProvider) Stop(ctx context.Context, name string) error {
|
||||
p.l.DebugContext(ctx, "stopping container", slog.String("name", name))
|
||||
err := p.Client.ContainerStop(ctx, name, container.StopOptions{})
|
||||
if err != nil {
|
||||
p.l.ErrorContext(ctx, "cannot stop container", slog.String("name", name), slog.Any("error", err))
|
||||
return fmt.Errorf("cannot stop container %s: %w", name, err)
|
||||
}
|
||||
|
||||
p.l.DebugContext(ctx, "waiting for container to stop", slog.String("name", name))
|
||||
waitC, errC := p.Client.ContainerWait(ctx, name, container.WaitConditionNotRunning)
|
||||
select {
|
||||
case <-waitC:
|
||||
p.l.DebugContext(ctx, "container stopped", slog.String("name", name))
|
||||
return nil
|
||||
case err := <-errC:
|
||||
p.l.ErrorContext(ctx, "cannot wait for container to stop", slog.String("name", name), slog.Any("error", err))
|
||||
return fmt.Errorf("cannot wait for container %s to stop: %w", name, err)
|
||||
case <-ctx.Done():
|
||||
p.l.ErrorContext(ctx, "context cancelled while waiting for container to stop", slog.String("name", name))
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
73
pkg/provider/docker/container_stop_test.go
Normal file
73
pkg/provider/docker/container_stop_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/neilotoole/slogt"
|
||||
"github.com/sablierapp/sablier/pkg/provider/docker"
|
||||
"gotest.tools/v3/assert"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDockerClassicProvider_Stop(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
type args struct {
|
||||
do func(dind *dindContainer) (string, error)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "non existing container stop",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
return "non-existent", nil
|
||||
},
|
||||
},
|
||||
err: fmt.Errorf("cannot stop container non-existent: Error response from daemon: No such container: non-existent"),
|
||||
},
|
||||
{
|
||||
name: "container stop as expected",
|
||||
args: args{
|
||||
do: func(dind *dindContainer) (string, error) {
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return c.ID, nil
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
c := setupDinD(t, ctx)
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
p, err := docker.NewDockerClassicProvider(ctx, c.client, slogt.New(t))
|
||||
|
||||
name, err := tt.args.do(c)
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = p.Stop(t.Context(), name)
|
||||
if tt.err != nil {
|
||||
assert.Error(t, err, tt.err.Error())
|
||||
} else {
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,21 +2,10 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/sablierapp/sablier/app/discovery"
|
||||
"github.com/sablierapp/sablier/pkg/provider"
|
||||
"io"
|
||||
"log/slog"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/sablierapp/sablier/app/instance"
|
||||
"github.com/sablierapp/sablier/pkg/provider"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// Interface guard
|
||||
@@ -28,12 +17,8 @@ type DockerClassicProvider struct {
|
||||
l *slog.Logger
|
||||
}
|
||||
|
||||
func NewDockerClassicProvider(ctx context.Context, logger *slog.Logger) (*DockerClassicProvider, error) {
|
||||
func NewDockerClassicProvider(ctx context.Context, cli *client.Client, logger *slog.Logger) (*DockerClassicProvider, error) {
|
||||
logger = logger.With(slog.String("provider", "docker"))
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot create docker client: %v", err)
|
||||
}
|
||||
|
||||
serverVersion, err := cli.ServerVersion(ctx)
|
||||
if err != nil {
|
||||
@@ -50,111 +35,3 @@ func NewDockerClassicProvider(ctx context.Context, logger *slog.Logger) (*Docker
|
||||
l: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *DockerClassicProvider) GetGroups(ctx context.Context) (map[string][]string, error) {
|
||||
args := filters.NewArgs()
|
||||
args.Add("label", fmt.Sprintf("%s=true", discovery.LabelEnable))
|
||||
|
||||
containers, err := p.Client.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
Filters: args,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groups := make(map[string][]string)
|
||||
for _, c := range containers {
|
||||
groupName := c.Labels[discovery.LabelGroup]
|
||||
if len(groupName) == 0 {
|
||||
groupName = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
group := groups[groupName]
|
||||
group = append(group, strings.TrimPrefix(c.Names[0], "/"))
|
||||
groups[groupName] = group
|
||||
}
|
||||
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
func (p *DockerClassicProvider) Start(ctx context.Context, name string) error {
|
||||
return p.Client.ContainerStart(ctx, name, container.StartOptions{})
|
||||
}
|
||||
|
||||
func (p *DockerClassicProvider) Stop(ctx context.Context, name string) error {
|
||||
return p.Client.ContainerStop(ctx, name, container.StopOptions{})
|
||||
}
|
||||
|
||||
func (p *DockerClassicProvider) GetState(ctx context.Context, name string) (instance.State, error) {
|
||||
spec, err := p.Client.ContainerInspect(ctx, name)
|
||||
if err != nil {
|
||||
return instance.State{}, err
|
||||
}
|
||||
|
||||
// "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
switch spec.State.Status {
|
||||
case "created", "paused", "restarting", "removing":
|
||||
return instance.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
|
||||
case "running":
|
||||
if spec.State.Health != nil {
|
||||
// // "starting", "healthy" or "unhealthy"
|
||||
if spec.State.Health.Status == "healthy" {
|
||||
return instance.ReadyInstanceState(name, p.desiredReplicas), nil
|
||||
} else if spec.State.Health.Status == "unhealthy" {
|
||||
if len(spec.State.Health.Log) >= 1 {
|
||||
lastLog := spec.State.Health.Log[len(spec.State.Health.Log)-1]
|
||||
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container is unhealthy: %s (%d)", lastLog.Output, lastLog.ExitCode), p.desiredReplicas), nil
|
||||
} else {
|
||||
return instance.UnrecoverableInstanceState(name, "container is unhealthy: no log available", p.desiredReplicas), nil
|
||||
}
|
||||
} else {
|
||||
return instance.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
|
||||
}
|
||||
}
|
||||
p.l.WarnContext(ctx, "container running without healthcheck, you should define a healthcheck on your container so that Sablier properly detects when the container is ready to handle requests.", slog.String("container", name))
|
||||
return instance.ReadyInstanceState(name, p.desiredReplicas), nil
|
||||
case "exited":
|
||||
if spec.State.ExitCode != 0 {
|
||||
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container exited with code \"%d\"", spec.State.ExitCode), p.desiredReplicas), nil
|
||||
}
|
||||
return instance.NotReadyInstanceState(name, 0, p.desiredReplicas), nil
|
||||
case "dead":
|
||||
return instance.UnrecoverableInstanceState(name, "container in \"dead\" state cannot be restarted", p.desiredReplicas), nil
|
||||
default:
|
||||
return instance.UnrecoverableInstanceState(name, fmt.Sprintf("container status \"%s\" not handled", spec.State.Status), p.desiredReplicas), nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *DockerClassicProvider) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
|
||||
msgs, errs := p.Client.Events(ctx, types.EventsOptions{
|
||||
Filters: filters.NewArgs(
|
||||
filters.Arg("scope", "local"),
|
||||
filters.Arg("type", string(events.ContainerEventType)),
|
||||
filters.Arg("event", "die"),
|
||||
),
|
||||
})
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-msgs:
|
||||
if !ok {
|
||||
p.l.ErrorContext(ctx, "event stream closed")
|
||||
return
|
||||
}
|
||||
// Send the container that has died to the channel
|
||||
instance <- strings.TrimPrefix(msg.Actor.Attributes["name"], "/")
|
||||
case err, ok := <-errs:
|
||||
if !ok {
|
||||
p.l.ErrorContext(ctx, "event stream closed")
|
||||
return
|
||||
}
|
||||
if errors.Is(err, io.EOF) {
|
||||
p.l.ErrorContext(ctx, "event stream closed")
|
||||
return
|
||||
}
|
||||
p.l.ErrorContext(ctx, "event stream error", slog.Any("error", err))
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,405 +0,0 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/neilotoole/slogt"
|
||||
"github.com/sablierapp/sablier/pkg/provider/mocks"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/sablierapp/sablier/app/instance"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
func setupProvider(t *testing.T, client client.APIClient) *DockerClassicProvider {
|
||||
t.Helper()
|
||||
return &DockerClassicProvider{
|
||||
Client: client,
|
||||
desiredReplicas: 1,
|
||||
l: slogt.New(t),
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerClassicProvider_GetState(t *testing.T) {
|
||||
type fields struct {
|
||||
Client *mocks.DockerAPIClientMock
|
||||
}
|
||||
type args struct {
|
||||
name string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want instance.State
|
||||
wantErr bool
|
||||
containerSpec types.ContainerJSON
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "nginx created container state",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.CreatedContainerSpec("nginx"),
|
||||
},
|
||||
{
|
||||
name: "nginx running container state without healthcheck",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 1,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Ready,
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.RunningWithoutHealthcheckContainerSpec("nginx"),
|
||||
},
|
||||
{
|
||||
name: "nginx running container state with \"starting\" health",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.RunningWithHealthcheckContainerSpec("nginx", "starting"),
|
||||
},
|
||||
{
|
||||
name: "nginx running container state with \"unhealthy\" health",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "container is unhealthy: curl http://localhost failed (1)",
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.RunningWithHealthcheckContainerSpec("nginx", "unhealthy"),
|
||||
},
|
||||
{
|
||||
name: "nginx running container state with \"healthy\" health",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 1,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Ready,
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.RunningWithHealthcheckContainerSpec("nginx", "healthy"),
|
||||
},
|
||||
{
|
||||
name: "nginx paused container state",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.PausedContainerSpec("nginx"),
|
||||
},
|
||||
{
|
||||
name: "nginx restarting container state",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.RestartingContainerSpec("nginx"),
|
||||
},
|
||||
{
|
||||
name: "nginx removing container state",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.RemovingContainerSpec("nginx"),
|
||||
},
|
||||
{
|
||||
name: "nginx exited container state with status code 0",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.NotReady,
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.ExitedContainerSpec("nginx", 0),
|
||||
},
|
||||
{
|
||||
name: "nginx exited container state with status code 137",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "container exited with code \"137\"",
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.ExitedContainerSpec("nginx", 137),
|
||||
},
|
||||
{
|
||||
name: "nginx dead container state",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{
|
||||
Name: "nginx",
|
||||
CurrentReplicas: 0,
|
||||
DesiredReplicas: 1,
|
||||
Status: instance.Unrecoverable,
|
||||
Message: "container in \"dead\" state cannot be restarted",
|
||||
},
|
||||
wantErr: false,
|
||||
containerSpec: mocks.DeadContainerSpec("nginx"),
|
||||
},
|
||||
{
|
||||
name: "container inspect has an error",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
want: instance.State{},
|
||||
wantErr: true,
|
||||
containerSpec: types.ContainerJSON{},
|
||||
err: fmt.Errorf("container with name \"nginx\" was not found"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
provider := setupProvider(t, tt.fields.Client)
|
||||
|
||||
tt.fields.Client.On("ContainerInspect", mock.Anything, mock.Anything).Return(tt.containerSpec, tt.err)
|
||||
|
||||
got, err := provider.GetState(context.Background(), tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("DockerClassicProvider.GetState() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("DockerClassicProvider.GetState() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerClassicProvider_Stop(t *testing.T) {
|
||||
type fields struct {
|
||||
Client *mocks.DockerAPIClientMock
|
||||
}
|
||||
type args struct {
|
||||
name string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "container stop has an error",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
wantErr: true,
|
||||
err: fmt.Errorf("container with name \"nginx\" was not found"),
|
||||
},
|
||||
{
|
||||
name: "container stop as expected",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
wantErr: false,
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
provider := setupProvider(t, tt.fields.Client)
|
||||
|
||||
tt.fields.Client.On("ContainerStop", mock.Anything, mock.Anything, mock.Anything).Return(tt.err)
|
||||
|
||||
err := provider.Stop(context.Background(), tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("DockerClassicProvider.Stop() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerClassicProvider_Start(t *testing.T) {
|
||||
type fields struct {
|
||||
Client *mocks.DockerAPIClientMock
|
||||
}
|
||||
type args struct {
|
||||
name string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
err error
|
||||
}{
|
||||
{
|
||||
name: "container start has an error",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
wantErr: true,
|
||||
err: fmt.Errorf("container with name \"nginx\" was not found"),
|
||||
},
|
||||
{
|
||||
name: "container start as expected",
|
||||
fields: fields{
|
||||
Client: mocks.NewDockerAPIClientMock(),
|
||||
},
|
||||
args: args{
|
||||
name: "nginx",
|
||||
},
|
||||
wantErr: false,
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
provider := setupProvider(t, tt.fields.Client)
|
||||
|
||||
tt.fields.Client.On("ContainerStart", mock.Anything, mock.Anything, mock.Anything).Return(tt.err)
|
||||
|
||||
err := provider.Start(context.Background(), tt.args.name)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("DockerClassicProvider.Start() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDockerClassicProvider_NotifyInstanceStopped(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
want []string
|
||||
events []events.Message
|
||||
errors []error
|
||||
}{
|
||||
{
|
||||
name: "container nginx is stopped",
|
||||
want: []string{"nginx"},
|
||||
events: []events.Message{
|
||||
mocks.ContainerStoppedEvent("nginx"),
|
||||
},
|
||||
errors: []error{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
provider := setupProvider(t, mocks.NewDockerAPIClientMockWithEvents(tt.events, tt.errors))
|
||||
|
||||
instanceC := make(chan string, 1)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
provider.NotifyInstanceStopped(ctx, instanceC)
|
||||
|
||||
var got []string
|
||||
|
||||
got = append(got, <-instanceC)
|
||||
cancel()
|
||||
close(instanceC)
|
||||
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("NotifyInstanceStopped() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
48
pkg/provider/docker/events.go
Normal file
48
pkg/provider/docker/events.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"io"
|
||||
"log/slog"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (p *DockerClassicProvider) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
|
||||
msgs, errs := p.Client.Events(ctx, events.ListOptions{
|
||||
Filters: filters.NewArgs(
|
||||
filters.Arg("scope", "local"),
|
||||
filters.Arg("type", string(events.ContainerEventType)),
|
||||
filters.Arg("event", "die"),
|
||||
),
|
||||
})
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-msgs:
|
||||
if !ok {
|
||||
p.l.ErrorContext(ctx, "event stream closed")
|
||||
close(instance)
|
||||
return
|
||||
}
|
||||
// Send the container that has died to the channel
|
||||
instance <- strings.TrimPrefix(msg.Actor.Attributes["name"], "/")
|
||||
case err, ok := <-errs:
|
||||
if !ok {
|
||||
p.l.ErrorContext(ctx, "event stream closed")
|
||||
close(instance)
|
||||
return
|
||||
}
|
||||
if errors.Is(err, io.EOF) {
|
||||
p.l.ErrorContext(ctx, "event stream closed")
|
||||
close(instance)
|
||||
return
|
||||
}
|
||||
p.l.ErrorContext(ctx, "event stream error", slog.Any("error", err))
|
||||
case <-ctx.Done():
|
||||
close(instance)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
45
pkg/provider/docker/events_test.go
Normal file
45
pkg/provider/docker/events_test.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/neilotoole/slogt"
|
||||
"github.com/sablierapp/sablier/pkg/provider/docker"
|
||||
"gotest.tools/v3/assert"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestDockerClassicProvider_NotifyInstanceStopped(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping test in short mode.")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)
|
||||
defer cancel()
|
||||
dind := setupDinD(t, ctx)
|
||||
p, err := docker.NewDockerClassicProvider(ctx, dind.client, slogt.New(t))
|
||||
assert.NilError(t, err)
|
||||
|
||||
c, err := dind.CreateMimic(ctx, MimicOptions{})
|
||||
assert.NilError(t, err)
|
||||
|
||||
inspected, err := dind.client.ContainerInspect(ctx, c.ID)
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = dind.client.ContainerStart(ctx, c.ID, container.StartOptions{})
|
||||
assert.NilError(t, err)
|
||||
|
||||
<-time.After(1 * time.Second)
|
||||
|
||||
waitC := make(chan string)
|
||||
go p.NotifyInstanceStopped(ctx, waitC)
|
||||
|
||||
err = dind.client.ContainerStop(ctx, c.ID, container.StopOptions{})
|
||||
assert.NilError(t, err)
|
||||
|
||||
name := <-waitC
|
||||
|
||||
// Docker container name is prefixed with a slash, but we don't use it
|
||||
assert.Equal(t, "/"+name, inspected.Name)
|
||||
}
|
||||
163
pkg/provider/docker/testcontainers_test.go
Normal file
163
pkg/provider/docker/testcontainers_test.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package docker_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/testcontainers/testcontainers-go"
|
||||
"github.com/testcontainers/testcontainers-go/wait"
|
||||
"gotest.tools/v3/assert"
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type dindContainer struct {
|
||||
testcontainers.Container
|
||||
client *client.Client
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
type MimicOptions struct {
|
||||
Cmd []string
|
||||
Healthcheck *container.HealthConfig
|
||||
RestartPolicy container.RestartPolicy
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
func (d *dindContainer) CreateMimic(ctx context.Context, opts MimicOptions) (container.CreateResponse, error) {
|
||||
if len(opts.Cmd) == 0 {
|
||||
opts.Cmd = []string{"/mimic", "-running", "-running-after=1s", "-healthy=false"}
|
||||
}
|
||||
|
||||
d.t.Log("Creating mimic container with options", opts)
|
||||
return d.client.ContainerCreate(ctx, &container.Config{
|
||||
Entrypoint: opts.Cmd,
|
||||
Image: "sablierapp/mimic:v0.3.1",
|
||||
Labels: opts.Labels,
|
||||
Healthcheck: opts.Healthcheck,
|
||||
}, &container.HostConfig{RestartPolicy: opts.RestartPolicy}, nil, nil, "")
|
||||
}
|
||||
|
||||
func setupDinD(t *testing.T, ctx context.Context) *dindContainer {
|
||||
t.Helper()
|
||||
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())
|
||||
assert.NilError(t, err)
|
||||
|
||||
req := testcontainers.ContainerRequest{
|
||||
Image: "docker:dind",
|
||||
ExposedPorts: []string{"2375/tcp"},
|
||||
WaitingFor: wait.ForLog("API listen on [::]:2375"),
|
||||
Cmd: []string{
|
||||
"dockerd", "-H", "tcp://0.0.0.0:2375", "--tls=false",
|
||||
},
|
||||
Privileged: true,
|
||||
}
|
||||
c, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||
ContainerRequest: req,
|
||||
Started: true,
|
||||
Logger: testcontainers.TestLogger(t),
|
||||
})
|
||||
assert.NilError(t, err)
|
||||
t.Cleanup(func() {
|
||||
testcontainers.CleanupContainer(t, c)
|
||||
})
|
||||
|
||||
ip, err := c.Host(ctx)
|
||||
assert.NilError(t, err)
|
||||
|
||||
mappedPort, err := c.MappedPort(ctx, "2375")
|
||||
assert.NilError(t, err)
|
||||
|
||||
host := fmt.Sprintf("http://%s:%s", ip, mappedPort.Port())
|
||||
dindCli, err := client.NewClientWithOpts(client.WithHost(host), client.WithAPIVersionNegotiation())
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = addMimicToDind(ctx, cli, dindCli)
|
||||
assert.NilError(t, err)
|
||||
|
||||
return &dindContainer{
|
||||
Container: c,
|
||||
client: dindCli,
|
||||
t: t,
|
||||
}
|
||||
}
|
||||
|
||||
func searchMimicImage(ctx context.Context, cli *client.Client) (string, error) {
|
||||
images, err := cli.ImageList(ctx, image.ListOptions{})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to list images: %w", err)
|
||||
}
|
||||
|
||||
for _, summary := range images {
|
||||
if slices.Contains(summary.RepoTags, "sablierapp/mimic:v0.3.1") {
|
||||
return summary.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func pullMimicImage(ctx context.Context, cli *client.Client) error {
|
||||
reader, err := cli.ImagePull(ctx, "sablierapp/mimic:v0.3.1", image.PullOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to pull image: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
resp, err := cli.ImageLoad(ctx, reader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load image: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func addMimicToDind(ctx context.Context, cli *client.Client, dindCli *client.Client) error {
|
||||
ID, err := searchMimicImage(ctx, cli)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to search for mimic image: %w", err)
|
||||
}
|
||||
|
||||
if ID == "" {
|
||||
err = pullMimicImage(ctx, cli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ID, err = searchMimicImage(ctx, cli)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to search for mimic image even though it's just been pulled without errors: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
reader, err := cli.ImageSave(ctx, []string{ID})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save image: %w", err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
resp, err := dindCli.ImageLoad(ctx, reader, true)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load image in docker in docker container: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = buf.ReadFrom(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read from response body: %w", err)
|
||||
}
|
||||
|
||||
list, err := dindCli.ImageList(ctx, image.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dindCli.ImageTag(ctx, list[0].ID, "sablierapp/mimic:v0.3.1")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"github.com/sablierapp/sablier/app/discovery"
|
||||
"github.com/sablierapp/sablier/app/types"
|
||||
"github.com/sablierapp/sablier/pkg/provider"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func (p *DockerSwarmProvider) InstanceList(ctx context.Context, options provider.InstanceListOptions) ([]types.Instance, error) {
|
||||
@@ -39,7 +37,6 @@ func (p *DockerSwarmProvider) InstanceList(ctx context.Context, options provider
|
||||
|
||||
func (p *DockerSwarmProvider) serviceToInstance(s swarm.Service) (i types.Instance) {
|
||||
var group string
|
||||
var replicas uint64
|
||||
|
||||
if _, ok := s.Spec.Labels[discovery.LabelEnable]; ok {
|
||||
if g, ok := s.Spec.Labels[discovery.LabelGroup]; ok {
|
||||
@@ -47,28 +44,10 @@ func (p *DockerSwarmProvider) serviceToInstance(s swarm.Service) (i types.Instan
|
||||
} else {
|
||||
group = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
|
||||
if r, ok := s.Spec.Labels[discovery.LabelReplicas]; ok {
|
||||
atoi, err := strconv.Atoi(r)
|
||||
if err != nil {
|
||||
p.l.Warn("invalid replicas label value, using default replicas value", slog.Any("error", err), slog.String("instance", s.Spec.Name), slog.String("value", r))
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
} else {
|
||||
replicas = uint64(atoi)
|
||||
}
|
||||
} else {
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
}
|
||||
}
|
||||
|
||||
return types.Instance{
|
||||
Name: s.Spec.Name,
|
||||
Kind: "service",
|
||||
// TODO
|
||||
// Status: string(s.UpdateStatus.State),
|
||||
// Replicas: s.ServiceStatus.RunningTasks,
|
||||
// DesiredReplicas: s.ServiceStatus.DesiredTasks,
|
||||
ScalingReplicas: replicas,
|
||||
Group: group,
|
||||
Name: s.Spec.Name,
|
||||
Group: group,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,8 +8,6 @@ import (
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"log/slog"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -47,7 +45,6 @@ func (p *KubernetesProvider) deploymentList(ctx context.Context, options provide
|
||||
|
||||
func (p *KubernetesProvider) deploymentToInstance(d v1.Deployment) types.Instance {
|
||||
var group string
|
||||
var replicas uint64
|
||||
|
||||
if _, ok := d.Labels[discovery.LabelEnable]; ok {
|
||||
if g, ok := d.Labels[discovery.LabelGroup]; ok {
|
||||
@@ -55,30 +52,13 @@ func (p *KubernetesProvider) deploymentToInstance(d v1.Deployment) types.Instanc
|
||||
} else {
|
||||
group = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
|
||||
if r, ok := d.Labels[discovery.LabelReplicas]; ok {
|
||||
atoi, err := strconv.Atoi(r)
|
||||
if err != nil {
|
||||
p.l.Warn("invalid replicas label value, using default replicas value", slog.Any("error", err), slog.String("instance", d.Name), slog.String("value", r))
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
} else {
|
||||
replicas = uint64(atoi)
|
||||
}
|
||||
} else {
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
}
|
||||
}
|
||||
|
||||
parsed := DeploymentName(d, ParseOptions{Delimiter: p.delimiter})
|
||||
|
||||
return types.Instance{
|
||||
Name: parsed.Original,
|
||||
Kind: parsed.Kind,
|
||||
Status: d.Status.String(),
|
||||
Replicas: uint64(d.Status.Replicas),
|
||||
DesiredReplicas: uint64(*d.Spec.Replicas),
|
||||
ScalingReplicas: replicas,
|
||||
Group: group,
|
||||
Name: parsed.Original,
|
||||
Group: group,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -102,7 +82,6 @@ func (p *KubernetesProvider) statefulSetList(ctx context.Context, options provid
|
||||
|
||||
func (p *KubernetesProvider) statefulSetToInstance(ss v1.StatefulSet) types.Instance {
|
||||
var group string
|
||||
var replicas uint64
|
||||
|
||||
if _, ok := ss.Labels[discovery.LabelEnable]; ok {
|
||||
if g, ok := ss.Labels[discovery.LabelGroup]; ok {
|
||||
@@ -110,29 +89,12 @@ func (p *KubernetesProvider) statefulSetToInstance(ss v1.StatefulSet) types.Inst
|
||||
} else {
|
||||
group = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
|
||||
if r, ok := ss.Labels[discovery.LabelReplicas]; ok {
|
||||
atoi, err := strconv.Atoi(r)
|
||||
if err != nil {
|
||||
p.l.Warn("invalid replicas label value, using default replicas value", slog.Any("error", err), slog.String("instance", ss.Name), slog.String("value", r))
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
} else {
|
||||
replicas = uint64(atoi)
|
||||
}
|
||||
} else {
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
}
|
||||
}
|
||||
|
||||
parsed := StatefulSetName(ss, ParseOptions{Delimiter: p.delimiter})
|
||||
|
||||
return types.Instance{
|
||||
Name: parsed.Original,
|
||||
Kind: parsed.Kind,
|
||||
Status: ss.Status.String(),
|
||||
Replicas: uint64(ss.Status.Replicas),
|
||||
DesiredReplicas: uint64(*ss.Spec.Replicas),
|
||||
ScalingReplicas: replicas,
|
||||
Group: group,
|
||||
Name: parsed.Original,
|
||||
Group: group,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,15 +2,13 @@ package mocks
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"io"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -18,6 +16,9 @@ import (
|
||||
v1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
)
|
||||
|
||||
// DockerAPIClientMock is a mock for the Docker API client
|
||||
//
|
||||
// Deprecated: tests should use a testcontainer
|
||||
type DockerAPIClientMock struct {
|
||||
// Will be sent through events
|
||||
messages []events.Message
|
||||
@@ -68,136 +69,6 @@ func (client *DockerAPIClientMock) Events(ctx context.Context, options types.Eve
|
||||
return evnts, errors
|
||||
}
|
||||
|
||||
func CreatedContainerSpec(name string) types.ContainerJSON {
|
||||
return types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
Name: name,
|
||||
ID: name,
|
||||
State: &types.ContainerState{
|
||||
Running: false,
|
||||
Status: "created",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func RunningWithoutHealthcheckContainerSpec(name string) types.ContainerJSON {
|
||||
return types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
Name: name,
|
||||
ID: name,
|
||||
State: &types.ContainerState{
|
||||
Running: true,
|
||||
Status: "running",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// RunningWithHealthcheckContainerSpec Status can be "starting", "healthy" or "unhealthy"
|
||||
func RunningWithHealthcheckContainerSpec(name string, status string) types.ContainerJSON {
|
||||
return types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
Name: name,
|
||||
ID: name,
|
||||
State: &types.ContainerState{
|
||||
Running: true,
|
||||
Status: "running",
|
||||
Health: &types.Health{
|
||||
Status: status,
|
||||
Log: []*types.HealthcheckResult{
|
||||
{
|
||||
Start: time.Now().Add(-5 * time.Second),
|
||||
End: time.Now(),
|
||||
Output: "curl http://localhost failed",
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func PausedContainerSpec(name string) types.ContainerJSON {
|
||||
return types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
Name: name,
|
||||
ID: name,
|
||||
State: &types.ContainerState{
|
||||
Paused: true,
|
||||
Status: "paused",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func RestartingContainerSpec(name string) types.ContainerJSON {
|
||||
return types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
Name: name,
|
||||
ID: name,
|
||||
State: &types.ContainerState{
|
||||
Restarting: true,
|
||||
Status: "restarting",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func RemovingContainerSpec(name string) types.ContainerJSON {
|
||||
return types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
Name: name,
|
||||
ID: name,
|
||||
State: &types.ContainerState{
|
||||
Status: "removing",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ExitedContainerSpec(name string, exitCode int) types.ContainerJSON {
|
||||
return types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
Name: name,
|
||||
ID: name,
|
||||
State: &types.ContainerState{
|
||||
ExitCode: exitCode,
|
||||
Status: "exited",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func DeadContainerSpec(name string) types.ContainerJSON {
|
||||
return types.ContainerJSON{
|
||||
ContainerJSONBase: &types.ContainerJSONBase{
|
||||
Name: name,
|
||||
ID: name,
|
||||
State: &types.ContainerState{
|
||||
Dead: true,
|
||||
Status: "dead",
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ContainerStoppedEvent(name string) events.Message {
|
||||
return events.Message{
|
||||
From: name,
|
||||
Scope: "local",
|
||||
Action: "stop",
|
||||
Type: "container",
|
||||
Actor: events.Actor{
|
||||
ID: "randomid",
|
||||
Attributes: map[string]string{
|
||||
"name": name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (client *DockerAPIClientMock) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) {
|
||||
args := client.Mock.Called(ctx, serviceID, version, service, options)
|
||||
return args.Get(0).(swarm.ServiceUpdateResponse), args.Error(1)
|
||||
@@ -301,6 +172,9 @@ type KubernetesAPIClientMock struct {
|
||||
kubernetes.Clientset
|
||||
}
|
||||
|
||||
// AppsV1InterfaceMock is a mock
|
||||
//
|
||||
// Deprecated: tests should use a testcontainer
|
||||
type AppsV1InterfaceMock struct {
|
||||
deployments *DeploymentMock
|
||||
statefulsets *StatefulSetsMock
|
||||
@@ -308,6 +182,9 @@ type AppsV1InterfaceMock struct {
|
||||
v1.AppsV1Interface
|
||||
}
|
||||
|
||||
// DeploymentMock is a mock
|
||||
//
|
||||
// Deprecated: tests should use a testcontainer
|
||||
type DeploymentMock struct {
|
||||
v1.DeploymentInterface
|
||||
mock.Mock
|
||||
|
||||
@@ -7,6 +7,8 @@ import (
|
||||
"github.com/sablierapp/sablier/app/instance"
|
||||
)
|
||||
|
||||
//go:generate mockgen -package providertest -source=provider.go -destination=providertest/mock_provider.go *
|
||||
|
||||
type Provider interface {
|
||||
Start(ctx context.Context, name string) error
|
||||
Stop(ctx context.Context, name string) error
|
||||
|
||||
129
pkg/provider/providertest/mock_provider.go
Normal file
129
pkg/provider/providertest/mock_provider.go
Normal file
@@ -0,0 +1,129 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: provider.go
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -package providertest -source=provider.go -destination=providertest/mock_provider.go *
|
||||
//
|
||||
|
||||
// Package providertest is a generated GoMock package.
|
||||
package providertest
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
|
||||
instance "github.com/sablierapp/sablier/app/instance"
|
||||
types "github.com/sablierapp/sablier/app/types"
|
||||
provider "github.com/sablierapp/sablier/pkg/provider"
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
)
|
||||
|
||||
// MockProvider is a mock of Provider interface.
|
||||
type MockProvider struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockProviderMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockProviderMockRecorder is the mock recorder for MockProvider.
|
||||
type MockProviderMockRecorder struct {
|
||||
mock *MockProvider
|
||||
}
|
||||
|
||||
// NewMockProvider creates a new mock instance.
|
||||
func NewMockProvider(ctrl *gomock.Controller) *MockProvider {
|
||||
mock := &MockProvider{ctrl: ctrl}
|
||||
mock.recorder = &MockProviderMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockProvider) EXPECT() *MockProviderMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// GetGroups mocks base method.
|
||||
func (m *MockProvider) GetGroups(ctx context.Context) (map[string][]string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetGroups", ctx)
|
||||
ret0, _ := ret[0].(map[string][]string)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetGroups indicates an expected call of GetGroups.
|
||||
func (mr *MockProviderMockRecorder) GetGroups(ctx any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroups", reflect.TypeOf((*MockProvider)(nil).GetGroups), ctx)
|
||||
}
|
||||
|
||||
// GetState mocks base method.
|
||||
func (m *MockProvider) GetState(ctx context.Context, name string) (instance.State, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetState", ctx, name)
|
||||
ret0, _ := ret[0].(instance.State)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetState indicates an expected call of GetState.
|
||||
func (mr *MockProviderMockRecorder) GetState(ctx, name any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockProvider)(nil).GetState), ctx, name)
|
||||
}
|
||||
|
||||
// InstanceList mocks base method.
|
||||
func (m *MockProvider) InstanceList(ctx context.Context, options provider.InstanceListOptions) ([]types.Instance, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "InstanceList", ctx, options)
|
||||
ret0, _ := ret[0].([]types.Instance)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// InstanceList indicates an expected call of InstanceList.
|
||||
func (mr *MockProviderMockRecorder) InstanceList(ctx, options any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InstanceList", reflect.TypeOf((*MockProvider)(nil).InstanceList), ctx, options)
|
||||
}
|
||||
|
||||
// NotifyInstanceStopped mocks base method.
|
||||
func (m *MockProvider) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
|
||||
m.ctrl.T.Helper()
|
||||
m.ctrl.Call(m, "NotifyInstanceStopped", ctx, instance)
|
||||
}
|
||||
|
||||
// NotifyInstanceStopped indicates an expected call of NotifyInstanceStopped.
|
||||
func (mr *MockProviderMockRecorder) NotifyInstanceStopped(ctx, instance any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NotifyInstanceStopped", reflect.TypeOf((*MockProvider)(nil).NotifyInstanceStopped), ctx, instance)
|
||||
}
|
||||
|
||||
// Start mocks base method.
|
||||
func (m *MockProvider) Start(ctx context.Context, name string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Start", ctx, name)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Start indicates an expected call of Start.
|
||||
func (mr *MockProviderMockRecorder) Start(ctx, name any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockProvider)(nil).Start), ctx, name)
|
||||
}
|
||||
|
||||
// Stop mocks base method.
|
||||
func (m *MockProvider) Stop(ctx context.Context, name string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "Stop", ctx, name)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Stop indicates an expected call of Stop.
|
||||
func (mr *MockProviderMockRecorder) Stop(ctx, name any) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockProvider)(nil).Stop), ctx, name)
|
||||
}
|
||||
@@ -2,10 +2,10 @@ package theme_test
|
||||
|
||||
import (
|
||||
"github.com/neilotoole/slogt"
|
||||
"github.com/sablierapp/sablier/pkg/theme"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
|
||||
"github.com/sablierapp/sablier/app/theme"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -4,13 +4,13 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"github.com/neilotoole/slogt"
|
||||
"github.com/sablierapp/sablier/pkg/theme"
|
||||
"log/slog"
|
||||
"os"
|
||||
"testing"
|
||||
"testing/fstest"
|
||||
"time"
|
||||
|
||||
"github.com/sablierapp/sablier/app/theme"
|
||||
"github.com/sablierapp/sablier/version"
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user