mirror of
https://github.com/sablierapp/sablier.git
synced 2025-12-21 13:23:03 +01:00
feat(providers): add provider.auto-stop-on-startup argument (#346)
This feature adds the capability to stop unregistered running instances upon startup. Previously, you had to stop running instances manually or issue an initial request that will shut down instances afterwards. With this change, all discovered instances will be shutdown. They need to be registered using labels. E.g.: sablier.enable=true Fixes #153
This commit is contained in:
59
app/discovery/autostop.go
Normal file
59
app/discovery/autostop.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"github.com/acouvreur/sablier/pkg/arrays"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// StopAllUnregisteredInstances stops all auto-discovered running instances that are not yet registered
|
||||
// as running instances by Sablier.
|
||||
// By default, Sablier does not stop all already running instances. Meaning that you need to make an
|
||||
// initial request in order to trigger the scaling to zero.
|
||||
func StopAllUnregisteredInstances(ctx context.Context, provider providers.Provider, registered []string) error {
|
||||
log.Info("Stopping all unregistered running instances")
|
||||
|
||||
log.Tracef("Retrieving all instances with label [%v=true]", LabelEnable)
|
||||
instances, err := provider.InstanceList(ctx, providers.InstanceListOptions{
|
||||
All: false, // Only running containers
|
||||
Labels: []string{LabelEnable},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Tracef("Found %v instances with label [%v=true]", len(instances), LabelEnable)
|
||||
names := make([]string, 0, len(instances))
|
||||
for _, instance := range instances {
|
||||
names = append(names, instance.Name)
|
||||
}
|
||||
|
||||
unregistered := arrays.RemoveElements(names, registered)
|
||||
log.Tracef("Found %v unregistered instances ", len(instances))
|
||||
|
||||
waitGroup := errgroup.Group{}
|
||||
|
||||
// Previously, the variables declared by a “for” loop were created once and updated by each iteration.
|
||||
// In Go 1.22, each iteration of the loop creates new variables, to avoid accidental sharing bugs.
|
||||
// The transition support tooling described in the proposal continues to work in the same way it did in Go 1.21.
|
||||
for _, name := range unregistered {
|
||||
waitGroup.Go(stopFunc(ctx, name, provider))
|
||||
}
|
||||
|
||||
return waitGroup.Wait()
|
||||
}
|
||||
|
||||
func stopFunc(ctx context.Context, name string, provider providers.Provider) func() error {
|
||||
return func() error {
|
||||
log.Tracef("Stopping %v...", name)
|
||||
_, err := provider.Stop(ctx, name)
|
||||
if err != nil {
|
||||
log.Errorf("Could not stop %v: %v", name, err)
|
||||
return err
|
||||
}
|
||||
log.Tracef("Successfully stopped %v", name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
76
app/discovery/autostop_test.go
Normal file
76
app/discovery/autostop_test.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package discovery_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"github.com/acouvreur/sablier/app/instance"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"github.com/acouvreur/sablier/app/providers/mock"
|
||||
"github.com/acouvreur/sablier/app/types"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStopAllUnregisteredInstances(t *testing.T) {
|
||||
mockProvider := new(mock.ProviderMock)
|
||||
ctx := context.TODO()
|
||||
|
||||
// Define instances and registered instances
|
||||
instances := []types.Instance{
|
||||
{Name: "instance1"},
|
||||
{Name: "instance2"},
|
||||
{Name: "instance3"},
|
||||
}
|
||||
registered := []string{"instance1"}
|
||||
|
||||
// Set up expectations for InstanceList
|
||||
mockProvider.On("InstanceList", ctx, providers.InstanceListOptions{
|
||||
All: false,
|
||||
Labels: []string{discovery.LabelEnable},
|
||||
}).Return(instances, nil)
|
||||
|
||||
// Set up expectations for Stop
|
||||
mockProvider.On("Stop", ctx, "instance2").Return(instance.State{}, nil)
|
||||
mockProvider.On("Stop", ctx, "instance3").Return(instance.State{}, nil)
|
||||
|
||||
// Call the function under test
|
||||
err := discovery.StopAllUnregisteredInstances(ctx, mockProvider, registered)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, but got %v", err)
|
||||
}
|
||||
|
||||
// Check expectations
|
||||
mockProvider.AssertExpectations(t)
|
||||
}
|
||||
|
||||
func TestStopAllUnregisteredInstances_WithError(t *testing.T) {
|
||||
mockProvider := new(mock.ProviderMock)
|
||||
ctx := context.TODO()
|
||||
|
||||
// Define instances and registered instances
|
||||
instances := []types.Instance{
|
||||
{Name: "instance1"},
|
||||
{Name: "instance2"},
|
||||
{Name: "instance3"},
|
||||
}
|
||||
registered := []string{"instance1"}
|
||||
|
||||
// Set up expectations for InstanceList
|
||||
mockProvider.On("InstanceList", ctx, providers.InstanceListOptions{
|
||||
All: false,
|
||||
Labels: []string{discovery.LabelEnable},
|
||||
}).Return(instances, nil)
|
||||
|
||||
// Set up expectations for Stop with error
|
||||
mockProvider.On("Stop", ctx, "instance2").Return(instance.State{}, errors.New("stop error"))
|
||||
mockProvider.On("Stop", ctx, "instance3").Return(instance.State{}, nil)
|
||||
|
||||
// Call the function under test
|
||||
err := discovery.StopAllUnregisteredInstances(ctx, mockProvider, registered)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error, but got nil")
|
||||
}
|
||||
|
||||
// Check expectations
|
||||
mockProvider.AssertExpectations(t)
|
||||
}
|
||||
18
app/discovery/types.go
Normal file
18
app/discovery/types.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package discovery
|
||||
|
||||
const (
|
||||
LabelEnable = "sablier.enable"
|
||||
LabelGroup = "sablier.group"
|
||||
LabelGroupDefaultValue = "default"
|
||||
LabelReplicas = "sablier.replicas"
|
||||
LabelReplicasDefaultValue uint64 = 1
|
||||
)
|
||||
|
||||
type Group struct {
|
||||
Name string
|
||||
Instances []Instance
|
||||
}
|
||||
|
||||
type Instance struct {
|
||||
Name string
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
package providers
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
@@ -33,7 +34,7 @@ func NewDockerClassicProvider() (*DockerClassicProvider, error) {
|
||||
return nil, fmt.Errorf("cannot connect to docker host: %v", err)
|
||||
}
|
||||
|
||||
log.Trace(fmt.Sprintf("connection established with docker %s (API %s)", serverVersion.Version, serverVersion.APIVersion))
|
||||
log.Tracef("connection established with docker %s (API %s)", serverVersion.Version, serverVersion.APIVersion)
|
||||
|
||||
return &DockerClassicProvider{
|
||||
Client: cli,
|
||||
@@ -43,7 +44,7 @@ func NewDockerClassicProvider() (*DockerClassicProvider, error) {
|
||||
|
||||
func (provider *DockerClassicProvider) GetGroups(ctx context.Context) (map[string][]string, error) {
|
||||
args := filters.NewArgs()
|
||||
args.Add("label", fmt.Sprintf("%s=true", enableLabel))
|
||||
args.Add("label", fmt.Sprintf("%s=true", discovery.LabelEnable))
|
||||
|
||||
containers, err := provider.Client.ContainerList(ctx, container.ListOptions{
|
||||
All: true,
|
||||
@@ -56,9 +57,9 @@ func (provider *DockerClassicProvider) GetGroups(ctx context.Context) (map[strin
|
||||
|
||||
groups := make(map[string][]string)
|
||||
for _, c := range containers {
|
||||
groupName := c.Labels[groupLabel]
|
||||
groupName := c.Labels[discovery.LabelGroup]
|
||||
if len(groupName) == 0 {
|
||||
groupName = defaultGroupValue
|
||||
groupName = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
group := groups[groupName]
|
||||
group = append(group, strings.TrimPrefix(c.Names[0], "/"))
|
||||
@@ -1,4 +1,4 @@
|
||||
package providers
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
60
app/providers/docker/list.go
Normal file
60
app/providers/docker/list.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"github.com/acouvreur/sablier/app/types"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (provider *DockerClassicProvider) InstanceList(ctx context.Context, options providers.InstanceListOptions) ([]types.Instance, error) {
|
||||
args := filters.NewArgs()
|
||||
for _, label := range options.Labels {
|
||||
args.Add("label", label)
|
||||
args.Add("label", fmt.Sprintf("%s=true", label))
|
||||
}
|
||||
|
||||
containers, err := provider.Client.ContainerList(ctx, container.ListOptions{
|
||||
All: options.All,
|
||||
Filters: args,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instances := make([]types.Instance, 0, len(containers))
|
||||
for _, c := range containers {
|
||||
instance := containerToInstance(c)
|
||||
instances = append(instances, instance)
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func containerToInstance(c dockertypes.Container) types.Instance {
|
||||
var group string
|
||||
|
||||
if _, ok := c.Labels[discovery.LabelEnable]; ok {
|
||||
if g, ok := c.Labels[discovery.LabelGroup]; ok {
|
||||
group = g
|
||||
} else {
|
||||
group = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
}
|
||||
|
||||
return types.Instance{
|
||||
Name: strings.TrimPrefix(c.Names[0], "/"), // Containers name are reported with a leading slash
|
||||
Kind: "container",
|
||||
Status: c.Status,
|
||||
// Replicas: c.Status,
|
||||
// DesiredReplicas: 1,
|
||||
ScalingReplicas: 1,
|
||||
Group: group,
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
package providers
|
||||
package dockerswarm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
@@ -78,7 +79,7 @@ func (provider *DockerSwarmProvider) scale(ctx context.Context, name string, rep
|
||||
|
||||
func (provider *DockerSwarmProvider) GetGroups(ctx context.Context) (map[string][]string, error) {
|
||||
filters := filters.NewArgs()
|
||||
filters.Add("label", fmt.Sprintf("%s=true", enableLabel))
|
||||
filters.Add("label", fmt.Sprintf("%s=true", discovery.LabelEnable))
|
||||
|
||||
services, err := provider.Client.ServiceList(ctx, types.ServiceListOptions{
|
||||
Filters: filters,
|
||||
@@ -90,9 +91,9 @@ func (provider *DockerSwarmProvider) GetGroups(ctx context.Context) (map[string]
|
||||
|
||||
groups := make(map[string][]string)
|
||||
for _, service := range services {
|
||||
groupName := service.Spec.Labels[groupLabel]
|
||||
groupName := service.Spec.Labels[discovery.LabelGroup]
|
||||
if len(groupName) == 0 {
|
||||
groupName = defaultGroupValue
|
||||
groupName = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
|
||||
group := groups[groupName]
|
||||
@@ -1,4 +1,4 @@
|
||||
package providers
|
||||
package dockerswarm
|
||||
|
||||
import (
|
||||
"context"
|
||||
74
app/providers/dockerswarm/list.go
Normal file
74
app/providers/dockerswarm/list.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package dockerswarm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"github.com/acouvreur/sablier/app/types"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func (provider *DockerSwarmProvider) InstanceList(ctx context.Context, options providers.InstanceListOptions) ([]types.Instance, error) {
|
||||
args := filters.NewArgs()
|
||||
for _, label := range options.Labels {
|
||||
args.Add("label", label)
|
||||
args.Add("label", fmt.Sprintf("%s=true", label))
|
||||
}
|
||||
|
||||
services, err := provider.Client.ServiceList(ctx, dockertypes.ServiceListOptions{
|
||||
Filters: args,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instances := make([]types.Instance, 0, len(services))
|
||||
for _, s := range services {
|
||||
instance := serviceToInstance(s)
|
||||
instances = append(instances, instance)
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func serviceToInstance(s swarm.Service) (i types.Instance) {
|
||||
var group string
|
||||
var replicas uint64
|
||||
|
||||
if _, ok := s.Spec.Labels[discovery.LabelEnable]; ok {
|
||||
if g, ok := s.Spec.Labels[discovery.LabelGroup]; ok {
|
||||
group = g
|
||||
} else {
|
||||
group = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
|
||||
if r, ok := s.Spec.Labels[discovery.LabelReplicas]; ok {
|
||||
atoi, err := strconv.Atoi(r)
|
||||
if err != nil {
|
||||
log.Warnf("Defaulting to default replicas value, could not convert value \"%v\" to int: %v", r, err)
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
} else {
|
||||
replicas = uint64(atoi)
|
||||
}
|
||||
} else {
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
}
|
||||
}
|
||||
|
||||
return types.Instance{
|
||||
Name: s.Spec.Name,
|
||||
Kind: "service",
|
||||
// TODO
|
||||
// Status: string(s.UpdateStatus.State),
|
||||
// Replicas: s.ServiceStatus.RunningTasks,
|
||||
// DesiredReplicas: s.ServiceStatus.DesiredTasks,
|
||||
ScalingReplicas: replicas,
|
||||
Group: group,
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,10 @@
|
||||
package providers
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -54,15 +55,6 @@ func (provider *KubernetesProvider) convertName(name string) (*Config, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) convertStatefulset(ss *appsv1.StatefulSet, replicas int32) string {
|
||||
return fmt.Sprintf("statefulset%s%s%s%s%s%d", provider.delimiter, ss.Namespace, provider.delimiter, ss.Name, provider.delimiter, replicas)
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) convertDeployment(d *appsv1.Deployment, replicas int32) string {
|
||||
return fmt.Sprintf("deployment%s%s%s%s%s%d", provider.delimiter, d.Namespace, provider.delimiter, d.Name, provider.delimiter, replicas)
|
||||
|
||||
}
|
||||
|
||||
type KubernetesProvider struct {
|
||||
Client kubernetes.Interface
|
||||
delimiter string
|
||||
@@ -112,7 +104,7 @@ func (provider *KubernetesProvider) Stop(ctx context.Context, name string) (inst
|
||||
|
||||
func (provider *KubernetesProvider) GetGroups(ctx context.Context) (map[string][]string, error) {
|
||||
deployments, err := provider.Client.AppsV1().Deployments(core_v1.NamespaceAll).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: enableLabel,
|
||||
LabelSelector: discovery.LabelEnable,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -121,20 +113,19 @@ func (provider *KubernetesProvider) GetGroups(ctx context.Context) (map[string][
|
||||
|
||||
groups := make(map[string][]string)
|
||||
for _, deployment := range deployments.Items {
|
||||
groupName := deployment.Labels[groupLabel]
|
||||
groupName := deployment.Labels[discovery.LabelGroup]
|
||||
if len(groupName) == 0 {
|
||||
groupName = defaultGroupValue
|
||||
groupName = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
|
||||
group := groups[groupName]
|
||||
// TOOD: Use annotation for scale
|
||||
name := provider.convertDeployment(&deployment, 1)
|
||||
group = append(group, name)
|
||||
parsed := DeploymentName(deployment, ParseOptions{Delimiter: provider.delimiter})
|
||||
group = append(group, parsed.Original)
|
||||
groups[groupName] = group
|
||||
}
|
||||
|
||||
statefulSets, err := provider.Client.AppsV1().StatefulSets(core_v1.NamespaceAll).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: enableLabel,
|
||||
LabelSelector: discovery.LabelEnable,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
@@ -142,15 +133,14 @@ func (provider *KubernetesProvider) GetGroups(ctx context.Context) (map[string][
|
||||
}
|
||||
|
||||
for _, statefulSet := range statefulSets.Items {
|
||||
groupName := statefulSet.Labels[groupLabel]
|
||||
groupName := statefulSet.Labels[discovery.LabelGroup]
|
||||
if len(groupName) == 0 {
|
||||
groupName = defaultGroupValue
|
||||
groupName = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
|
||||
group := groups[groupName]
|
||||
// TOOD: Use annotation for scale
|
||||
name := provider.convertStatefulset(&statefulSet, 1)
|
||||
group = append(group, name)
|
||||
parsed := StatefulSetName(statefulSet, ParseOptions{Delimiter: provider.delimiter})
|
||||
group = append(group, parsed.Original)
|
||||
groups[groupName] = group
|
||||
}
|
||||
|
||||
@@ -249,12 +239,14 @@ func (provider *KubernetesProvider) watchDeployents(instance chan<- string) cach
|
||||
}
|
||||
|
||||
if *newDeployment.Spec.Replicas == 0 {
|
||||
instance <- provider.convertDeployment(newDeployment, *oldDeployment.Spec.Replicas)
|
||||
parsed := DeploymentName(*newDeployment, ParseOptions{Delimiter: provider.delimiter})
|
||||
instance <- parsed.Original
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
deletedDeployment := obj.(*appsv1.Deployment)
|
||||
instance <- provider.convertDeployment(deletedDeployment, *deletedDeployment.Spec.Replicas)
|
||||
parsed := DeploymentName(*deletedDeployment, ParseOptions{Delimiter: provider.delimiter})
|
||||
instance <- parsed.Original
|
||||
},
|
||||
}
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(provider.Client, 2*time.Second, informers.WithNamespace(core_v1.NamespaceAll))
|
||||
@@ -275,12 +267,14 @@ func (provider *KubernetesProvider) watchStatefulSets(instance chan<- string) ca
|
||||
}
|
||||
|
||||
if *newStatefulSet.Spec.Replicas == 0 {
|
||||
instance <- provider.convertStatefulset(newStatefulSet, *oldStatefulSet.Spec.Replicas)
|
||||
parsed := StatefulSetName(*newStatefulSet, ParseOptions{Delimiter: provider.delimiter})
|
||||
instance <- parsed.Original
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
deletedStatefulSet := obj.(*appsv1.StatefulSet)
|
||||
instance <- provider.convertStatefulset(deletedStatefulSet, *deletedStatefulSet.Spec.Replicas)
|
||||
parsed := StatefulSetName(*deletedStatefulSet, ParseOptions{Delimiter: provider.delimiter})
|
||||
instance <- parsed.Original
|
||||
},
|
||||
}
|
||||
factory := informers.NewSharedInformerFactoryWithOptions(provider.Client, 2*time.Second, informers.WithNamespace(core_v1.NamespaceAll))
|
||||
@@ -1,4 +1,4 @@
|
||||
package providers
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
138
app/providers/kubernetes/list.go
Normal file
138
app/providers/kubernetes/list.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"github.com/acouvreur/sablier/app/types"
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
core_v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (provider *KubernetesProvider) InstanceList(ctx context.Context, options providers.InstanceListOptions) ([]types.Instance, error) {
|
||||
deployments, err := provider.deploymentList(ctx, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
statefulSets, err := provider.statefulSetList(ctx, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return append(deployments, statefulSets...), nil
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) deploymentList(ctx context.Context, options providers.InstanceListOptions) ([]types.Instance, error) {
|
||||
deployments, err := provider.Client.AppsV1().Deployments(core_v1.NamespaceAll).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: strings.Join(options.Labels, ","),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instances := make([]types.Instance, 0, len(deployments.Items))
|
||||
for _, d := range deployments.Items {
|
||||
instance := provider.deploymentToInstance(d)
|
||||
instances = append(instances, instance)
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) deploymentToInstance(d v1.Deployment) types.Instance {
|
||||
var group string
|
||||
var replicas uint64
|
||||
|
||||
if _, ok := d.Labels[discovery.LabelEnable]; ok {
|
||||
if g, ok := d.Labels[discovery.LabelGroup]; ok {
|
||||
group = g
|
||||
} else {
|
||||
group = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
|
||||
if r, ok := d.Labels[discovery.LabelReplicas]; ok {
|
||||
atoi, err := strconv.Atoi(r)
|
||||
if err != nil {
|
||||
log.Warnf("Defaulting to default replicas value, could not convert value \"%v\" to int: %v", r, err)
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
} else {
|
||||
replicas = uint64(atoi)
|
||||
}
|
||||
} else {
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
}
|
||||
}
|
||||
|
||||
parsed := DeploymentName(d, ParseOptions{Delimiter: provider.delimiter})
|
||||
|
||||
return types.Instance{
|
||||
Name: parsed.Original,
|
||||
Kind: parsed.Kind,
|
||||
Status: d.Status.String(),
|
||||
Replicas: uint64(d.Status.Replicas),
|
||||
DesiredReplicas: uint64(*d.Spec.Replicas),
|
||||
ScalingReplicas: replicas,
|
||||
Group: group,
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) statefulSetList(ctx context.Context, options providers.InstanceListOptions) ([]types.Instance, error) {
|
||||
statefulSets, err := provider.Client.AppsV1().StatefulSets(core_v1.NamespaceAll).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: strings.Join(options.Labels, ","),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
instances := make([]types.Instance, 0, len(statefulSets.Items))
|
||||
for _, ss := range statefulSets.Items {
|
||||
instance := provider.statefulSetToInstance(ss)
|
||||
instances = append(instances, instance)
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func (provider *KubernetesProvider) statefulSetToInstance(ss v1.StatefulSet) types.Instance {
|
||||
var group string
|
||||
var replicas uint64
|
||||
|
||||
if _, ok := ss.Labels[discovery.LabelEnable]; ok {
|
||||
if g, ok := ss.Labels[discovery.LabelGroup]; ok {
|
||||
group = g
|
||||
} else {
|
||||
group = discovery.LabelGroupDefaultValue
|
||||
}
|
||||
|
||||
if r, ok := ss.Labels[discovery.LabelReplicas]; ok {
|
||||
atoi, err := strconv.Atoi(r)
|
||||
if err != nil {
|
||||
log.Warnf("Defaulting to default replicas value, could not convert value \"%v\" to int: %v", r, err)
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
} else {
|
||||
replicas = uint64(atoi)
|
||||
}
|
||||
} else {
|
||||
replicas = discovery.LabelReplicasDefaultValue
|
||||
}
|
||||
}
|
||||
|
||||
parsed := StatefulSetName(ss, ParseOptions{Delimiter: provider.delimiter})
|
||||
|
||||
return types.Instance{
|
||||
Name: parsed.Original,
|
||||
Kind: parsed.Kind,
|
||||
Status: ss.Status.String(),
|
||||
Replicas: uint64(ss.Status.Replicas),
|
||||
DesiredReplicas: uint64(*ss.Spec.Replicas),
|
||||
ScalingReplicas: replicas,
|
||||
Group: group,
|
||||
}
|
||||
}
|
||||
64
app/providers/kubernetes/parse_name.go
Normal file
64
app/providers/kubernetes/parse_name.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
)
|
||||
|
||||
type ParsedName struct {
|
||||
Original string
|
||||
Kind string // deployment or statefulset
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
type ParseOptions struct {
|
||||
Delimiter string
|
||||
}
|
||||
|
||||
func ParseName(name string, opts ParseOptions) (ParsedName, error) {
|
||||
|
||||
split := strings.Split(name, opts.Delimiter)
|
||||
if len(split) < 3 {
|
||||
return ParsedName{}, fmt.Errorf("invalid name should be: kind%snamespace%sname (have %s)", opts.Delimiter, opts.Delimiter, name)
|
||||
}
|
||||
|
||||
return ParsedName{
|
||||
Original: name,
|
||||
Kind: split[0],
|
||||
Namespace: split[1],
|
||||
Name: split[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func DeploymentName(deployment v1.Deployment, opts ParseOptions) ParsedName {
|
||||
kind := "deployment"
|
||||
namespace := deployment.Namespace
|
||||
name := deployment.Name
|
||||
// TOOD: Use annotation for scale
|
||||
original := fmt.Sprintf("%s%s%s%s%s%s%d", kind, opts.Delimiter, namespace, opts.Delimiter, name, opts.Delimiter, 1)
|
||||
|
||||
return ParsedName{
|
||||
Original: original,
|
||||
Kind: kind,
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func StatefulSetName(statefulSet v1.StatefulSet, opts ParseOptions) ParsedName {
|
||||
kind := "statefulset"
|
||||
namespace := statefulSet.Namespace
|
||||
name := statefulSet.Name
|
||||
// TOOD: Use annotation for scale
|
||||
original := fmt.Sprintf("%s%s%s%s%s%s%d", kind, opts.Delimiter, namespace, opts.Delimiter, name, opts.Delimiter, 1)
|
||||
|
||||
return ParsedName{
|
||||
Original: original,
|
||||
Kind: kind,
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
106
app/providers/kubernetes/parse_name_test.go
Normal file
106
app/providers/kubernetes/parse_name_test.go
Normal file
@@ -0,0 +1,106 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
opts ParseOptions
|
||||
expected ParsedName
|
||||
hasError bool
|
||||
}{
|
||||
{
|
||||
name: "Valid name with default delimiter",
|
||||
input: "deployment:namespace:name",
|
||||
opts: ParseOptions{Delimiter: ":"},
|
||||
expected: ParsedName{Original: "deployment:namespace:name", Kind: "deployment", Namespace: "namespace", Name: "name"},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid name with missing parts",
|
||||
input: "deployment:namespace",
|
||||
opts: ParseOptions{Delimiter: ":"},
|
||||
expected: ParsedName{},
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
name: "Valid name with custom delimiter",
|
||||
input: "statefulset#namespace#name",
|
||||
opts: ParseOptions{Delimiter: "#"},
|
||||
expected: ParsedName{Original: "statefulset#namespace#name", Kind: "statefulset", Namespace: "namespace", Name: "name"},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "Invalid name with incorrect delimiter",
|
||||
input: "statefulset:namespace:name",
|
||||
opts: ParseOptions{Delimiter: "#"},
|
||||
expected: ParsedName{},
|
||||
hasError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := ParseName(tt.input, tt.opts)
|
||||
if tt.hasError {
|
||||
if err == nil {
|
||||
t.Errorf("expected error but got nil")
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("expected no error but got %v", err)
|
||||
}
|
||||
if result != tt.expected {
|
||||
t.Errorf("expected %v but got %v", tt.expected, result)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeploymentName(t *testing.T) {
|
||||
deployment := v1.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
Name: "test-deployment",
|
||||
},
|
||||
}
|
||||
opts := ParseOptions{Delimiter: ":"}
|
||||
expected := ParsedName{
|
||||
Original: "deployment:test-namespace:test-deployment:1",
|
||||
Kind: "deployment",
|
||||
Namespace: "test-namespace",
|
||||
Name: "test-deployment",
|
||||
}
|
||||
|
||||
result := DeploymentName(deployment, opts)
|
||||
if result != expected {
|
||||
t.Errorf("expected %v but got %v", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatefulSetName(t *testing.T) {
|
||||
statefulSet := v1.StatefulSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test-namespace",
|
||||
Name: "test-statefulset",
|
||||
},
|
||||
}
|
||||
opts := ParseOptions{Delimiter: ":"}
|
||||
expected := ParsedName{
|
||||
Original: "statefulset:test-namespace:test-statefulset:1",
|
||||
Kind: "statefulset",
|
||||
Namespace: "test-namespace",
|
||||
Name: "test-statefulset",
|
||||
}
|
||||
|
||||
result := StatefulSetName(statefulSet, opts)
|
||||
if result != expected {
|
||||
t.Errorf("expected %v but got %v", expected, result)
|
||||
}
|
||||
}
|
||||
39
app/providers/mock/mock.go
Normal file
39
app/providers/mock/mock.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package mock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/acouvreur/sablier/app/instance"
|
||||
"github.com/acouvreur/sablier/app/providers"
|
||||
"github.com/acouvreur/sablier/app/types"
|
||||
"github.com/stretchr/testify/mock"
|
||||
)
|
||||
|
||||
// ProviderMock is a structure that allows to define the behavior of a Provider
|
||||
type ProviderMock struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
func (m *ProviderMock) Start(ctx context.Context, name string) (instance.State, error) {
|
||||
args := m.Called(ctx, name)
|
||||
return args.Get(0).(instance.State), args.Error(1)
|
||||
}
|
||||
func (m *ProviderMock) Stop(ctx context.Context, name string) (instance.State, error) {
|
||||
args := m.Called(ctx, name)
|
||||
return args.Get(0).(instance.State), args.Error(1)
|
||||
}
|
||||
func (m *ProviderMock) GetState(ctx context.Context, name string) (instance.State, error) {
|
||||
args := m.Called(ctx, name)
|
||||
return args.Get(0).(instance.State), args.Error(1)
|
||||
}
|
||||
func (m *ProviderMock) GetGroups(ctx context.Context) (map[string][]string, error) {
|
||||
args := m.Called(ctx)
|
||||
return args.Get(0).(map[string][]string), args.Error(1)
|
||||
}
|
||||
func (m *ProviderMock) InstanceList(ctx context.Context, options providers.InstanceListOptions) ([]types.Instance, error) {
|
||||
args := m.Called(ctx, options)
|
||||
return args.Get(0).([]types.Instance), args.Error(1)
|
||||
}
|
||||
|
||||
func (m *ProviderMock) NotifyInstanceStopped(ctx context.Context, instance chan<- string) {
|
||||
m.Called(ctx, instance)
|
||||
}
|
||||
@@ -2,37 +2,17 @@ package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/types"
|
||||
|
||||
"github.com/acouvreur/sablier/app/instance"
|
||||
"github.com/acouvreur/sablier/config"
|
||||
)
|
||||
|
||||
const enableLabel = "sablier.enable"
|
||||
const groupLabel = "sablier.group"
|
||||
const defaultGroupValue = "default"
|
||||
|
||||
type Provider interface {
|
||||
Start(ctx context.Context, name string) (instance.State, error)
|
||||
Stop(ctx context.Context, name string) (instance.State, error)
|
||||
GetState(ctx context.Context, name string) (instance.State, error)
|
||||
GetGroups(ctx context.Context) (map[string][]string, error)
|
||||
InstanceList(ctx context.Context, options InstanceListOptions) ([]types.Instance, error)
|
||||
|
||||
NotifyInstanceStopped(ctx context.Context, instance chan<- string)
|
||||
}
|
||||
|
||||
func NewProvider(config config.Provider) (Provider, error) {
|
||||
if err := config.IsValid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch config.Name {
|
||||
case "swarm", "docker_swarm":
|
||||
return NewDockerSwarmProvider()
|
||||
case "docker":
|
||||
return NewDockerClassicProvider()
|
||||
case "kubernetes":
|
||||
return NewKubernetesProvider(config.Kubernetes)
|
||||
}
|
||||
return nil, fmt.Errorf("unimplemented provider %s", config.Name)
|
||||
}
|
||||
|
||||
6
app/providers/types.go
Normal file
6
app/providers/types.go
Normal file
@@ -0,0 +1,6 @@
|
||||
package providers
|
||||
|
||||
type InstanceListOptions struct {
|
||||
All bool
|
||||
Labels []string
|
||||
}
|
||||
@@ -2,6 +2,11 @@ package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/acouvreur/sablier/app/discovery"
|
||||
"github.com/acouvreur/sablier/app/providers/docker"
|
||||
"github.com/acouvreur/sablier/app/providers/dockerswarm"
|
||||
"github.com/acouvreur/sablier/app/providers/kubernetes"
|
||||
"os"
|
||||
|
||||
"github.com/acouvreur/sablier/app/http"
|
||||
@@ -29,7 +34,7 @@ func Start(conf config.Config) error {
|
||||
|
||||
log.Info(version.Info())
|
||||
|
||||
provider, err := providers.NewProvider(conf.Provider)
|
||||
provider, err := NewProvider(conf.Provider)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -51,6 +56,13 @@ func Start(conf config.Config) error {
|
||||
loadSessions(storage, sessionsManager)
|
||||
}
|
||||
|
||||
if conf.Provider.AutoStopOnStartup {
|
||||
err := discovery.StopAllUnregisteredInstances(context.Background(), provider, store.Keys())
|
||||
if err != nil {
|
||||
log.Warnf("Stopping unregistered instances had an error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var t *theme.Themes
|
||||
|
||||
if conf.Strategy.Dynamic.CustomThemesPath != "" {
|
||||
@@ -110,3 +122,19 @@ func saveSessions(storage storage.Storage, sessions sessions.Manager) {
|
||||
log.Error("error saving sessions", err)
|
||||
}
|
||||
}
|
||||
|
||||
func NewProvider(config config.Provider) (providers.Provider, error) {
|
||||
if err := config.IsValid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch config.Name {
|
||||
case "swarm", "docker_swarm":
|
||||
return dockerswarm.NewDockerSwarmProvider()
|
||||
case "docker":
|
||||
return docker.NewDockerClassicProvider()
|
||||
case "kubernetes":
|
||||
return kubernetes.NewKubernetesProvider(config.Kubernetes)
|
||||
}
|
||||
return nil, fmt.Errorf("unimplemented provider %s", config.Name)
|
||||
}
|
||||
|
||||
11
app/types/instance.go
Normal file
11
app/types/instance.go
Normal file
@@ -0,0 +1,11 @@
|
||||
package types
|
||||
|
||||
type Instance struct {
|
||||
Name string
|
||||
Kind string
|
||||
Status string
|
||||
Replicas uint64
|
||||
DesiredReplicas uint64
|
||||
ScalingReplicas uint64
|
||||
Group string
|
||||
}
|
||||
1
app/types/session.go
Normal file
1
app/types/session.go
Normal file
@@ -0,0 +1 @@
|
||||
package types
|
||||
@@ -46,6 +46,8 @@ It provides an integrations with multiple reverse proxies and different loading
|
||||
// Provider flags
|
||||
startCmd.Flags().StringVar(&conf.Provider.Name, "provider.name", "docker", fmt.Sprintf("Provider to use to manage containers %v", config.GetProviders()))
|
||||
viper.BindPFlag("provider.name", startCmd.Flags().Lookup("provider.name"))
|
||||
startCmd.Flags().BoolVar(&conf.Provider.AutoStopOnStartup, "provider.auto-stop-on-startup", true, "")
|
||||
viper.BindPFlag("provider.auto-stop-on-startup", startCmd.Flags().Lookup("provider.auto-stop-on-startup"))
|
||||
startCmd.Flags().Float32Var(&conf.Provider.Kubernetes.QPS, "provider.kubernetes.qps", 5, "QPS limit for K8S API access client-side throttling")
|
||||
viper.BindPFlag("provider.kubernetes.qps", startCmd.Flags().Lookup("provider.kubernetes.qps"))
|
||||
startCmd.Flags().IntVar(&conf.Provider.Kubernetes.Burst, "provider.kubernetes.burst", 10, "Maximum burst for K8S API acees client-side throttling")
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -21,7 +20,7 @@ func TestDefault(t *testing.T) {
|
||||
testDir, err := os.Getwd()
|
||||
require.NoError(t, err, "error getting the current working directory")
|
||||
|
||||
wantConfig, err := ioutil.ReadFile(filepath.Join(testDir, "testdata", "config_default.json"))
|
||||
wantConfig, err := os.ReadFile(filepath.Join(testDir, "testdata", "config_default.json"))
|
||||
require.NoError(t, err, "error reading test config file")
|
||||
|
||||
// CHANGE `startCmd` behavior to only print the config, this is for testing purposes only
|
||||
@@ -51,7 +50,7 @@ func TestPrecedence(t *testing.T) {
|
||||
newStartCommand = mockStartCommand
|
||||
|
||||
t.Run("config file", func(t *testing.T) {
|
||||
wantConfig, err := ioutil.ReadFile(filepath.Join(testDir, "testdata", "config_yaml_wanted.json"))
|
||||
wantConfig, err := os.ReadFile(filepath.Join(testDir, "testdata", "config_yaml_wanted.json"))
|
||||
require.NoError(t, err, "error reading test config file")
|
||||
|
||||
conf = config.NewConfig()
|
||||
@@ -95,7 +94,7 @@ func TestPrecedence(t *testing.T) {
|
||||
setEnvsFromFile(filepath.Join(testDir, "testdata", "config.env"))
|
||||
defer unsetEnvsFromFile(filepath.Join(testDir, "testdata", "config.env"))
|
||||
|
||||
wantConfig, err := ioutil.ReadFile(filepath.Join(testDir, "testdata", "config_cli_wanted.json"))
|
||||
wantConfig, err := os.ReadFile(filepath.Join(testDir, "testdata", "config_cli_wanted.json"))
|
||||
require.NoError(t, err, "error reading test config file")
|
||||
|
||||
cmd := NewRootCommand()
|
||||
|
||||
1
cmd/testdata/config.env
vendored
1
cmd/testdata/config.env
vendored
@@ -1,4 +1,5 @@
|
||||
PROVIDER_NAME=envvar
|
||||
PROVIDER_AUTOSTOPONSTARTUP=false
|
||||
PROVIDER_KUBERNETES_QPS=16
|
||||
PROVIDER_KUBERNETES_BURST=32
|
||||
PROVIDER_KUBERNETES_DELIMITER=/
|
||||
|
||||
1
cmd/testdata/config.yml
vendored
1
cmd/testdata/config.yml
vendored
@@ -1,5 +1,6 @@
|
||||
provider:
|
||||
name: configfile
|
||||
auto-stop-on-startup: false
|
||||
kubernetes:
|
||||
qps: 64
|
||||
burst: 128
|
||||
|
||||
1
cmd/testdata/config_cli_wanted.json
vendored
1
cmd/testdata/config_cli_wanted.json
vendored
@@ -8,6 +8,7 @@
|
||||
},
|
||||
"Provider": {
|
||||
"Name": "cli",
|
||||
"AutoStopOnStartup": false,
|
||||
"Kubernetes": {
|
||||
"QPS": 256,
|
||||
"Burst": 512,
|
||||
|
||||
1
cmd/testdata/config_default.json
vendored
1
cmd/testdata/config_default.json
vendored
@@ -8,6 +8,7 @@
|
||||
},
|
||||
"Provider": {
|
||||
"Name": "docker",
|
||||
"AutoStopOnStartup": true,
|
||||
"Kubernetes": {
|
||||
"QPS": 5,
|
||||
"Burst": 10,
|
||||
|
||||
1
cmd/testdata/config_env_wanted.json
vendored
1
cmd/testdata/config_env_wanted.json
vendored
@@ -8,6 +8,7 @@
|
||||
},
|
||||
"Provider": {
|
||||
"Name": "envvar",
|
||||
"AutoStopOnStartup": false,
|
||||
"Kubernetes": {
|
||||
"QPS": 16,
|
||||
"Burst": 32,
|
||||
|
||||
1
cmd/testdata/config_yaml_wanted.json
vendored
1
cmd/testdata/config_yaml_wanted.json
vendored
@@ -8,6 +8,7 @@
|
||||
},
|
||||
"Provider": {
|
||||
"Name": "configfile",
|
||||
"AutoStopOnStartup": false,
|
||||
"Kubernetes": {
|
||||
"QPS": 64,
|
||||
"Burst": 128,
|
||||
|
||||
@@ -8,8 +8,9 @@ import (
|
||||
type Provider struct {
|
||||
// The provider name to use
|
||||
// It can be either docker, swarm or kubernetes. Defaults to "docker"
|
||||
Name string `mapstructure:"NAME" yaml:"provider,omitempty" default:"docker"`
|
||||
Kubernetes Kubernetes
|
||||
Name string `mapstructure:"NAME" yaml:"name,omitempty" default:"docker"`
|
||||
AutoStopOnStartup bool `yaml:"auto-stop-on-startup,omitempty" default:"true"`
|
||||
Kubernetes Kubernetes
|
||||
}
|
||||
|
||||
type Kubernetes struct {
|
||||
|
||||
@@ -137,3 +137,35 @@ func Test_Healthy(t *testing.T) {
|
||||
Status(http.StatusNotFound).
|
||||
Body().Contains(`nginx/`)
|
||||
}
|
||||
|
||||
func Test_Group(t *testing.T) {
|
||||
e := httpexpect.Default(t, "http://localhost:8080/")
|
||||
|
||||
e.GET("/group").
|
||||
Expect().
|
||||
Status(http.StatusOK).
|
||||
Body().
|
||||
Contains(`Group E2E`).
|
||||
Contains(`Your instance(s) will stop after 1 minute of inactivity`)
|
||||
|
||||
e.GET("/group").
|
||||
WithMaxRetries(10).
|
||||
WithRetryDelay(time.Second, time.Second*2).
|
||||
WithRetryPolicy(httpexpect.RetryCustomHandler).
|
||||
WithCustomHandler(func(resp *http.Response, _ error) bool {
|
||||
if resp.Body != nil {
|
||||
|
||||
// Check body if available, etc.
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
defer resp.Body.Close()
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return !strings.Contains(string(body), "Host: localhost:8080")
|
||||
}
|
||||
return false
|
||||
}).
|
||||
Expect().
|
||||
Status(http.StatusOK).
|
||||
Body().Contains(`Host: localhost:8080`)
|
||||
}
|
||||
|
||||
1
go.mod
1
go.mod
@@ -16,6 +16,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.19.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
golang.org/x/sync v0.6.0
|
||||
gotest.tools/v3 v3.5.1
|
||||
k8s.io/api v0.30.2
|
||||
k8s.io/apimachinery v0.30.2
|
||||
|
||||
4
go.sum
4
go.sum
@@ -29,10 +29,6 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU=
|
||||
github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.0.2+incompatible h1:mNhCtgXNV1fIRns102grG7rdzIsGGCq1OlOD0KunZos=
|
||||
github.com/docker/docker v27.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
|
||||
github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
|
||||
17
go.work.sum
17
go.work.sum
@@ -339,18 +339,35 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
|
||||
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
|
||||
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2 h1:IRJeR9r1pYWsHKTRe/IInb7lYvbBVIqOgsX/u0mbOWY=
|
||||
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
|
||||
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457 h1:zf5N6UOrA487eEFacMePxjXAJctxKmyjKUsjA11Uzuk=
|
||||
golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
|
||||
golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
|
||||
golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
|
||||
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
|
||||
golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0=
|
||||
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224 h1:Ug9qvr1myri/zFN6xL17LSCBGFDnphBBhzmILHsM5TY=
|
||||
|
||||
21
pkg/arrays/remove_elements.go
Normal file
21
pkg/arrays/remove_elements.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package arrays
|
||||
|
||||
// RemoveElements returns a new slice containing all elements from `allElements` that are not in `elementsToRemove`
|
||||
func RemoveElements(allElements, elementsToRemove []string) []string {
|
||||
// Create a map to store elements to remove for quick lookup
|
||||
removeMap := make(map[string]struct{}, len(elementsToRemove))
|
||||
for _, elem := range elementsToRemove {
|
||||
removeMap[elem] = struct{}{}
|
||||
}
|
||||
|
||||
// Create a slice to store the result
|
||||
result := make([]string, 0, len(allElements)) // Preallocate memory based on the size of allElements
|
||||
for _, elem := range allElements {
|
||||
// Check if the element is not in the removeMap
|
||||
if _, found := removeMap[elem]; !found {
|
||||
result = append(result, elem)
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
30
pkg/arrays/remove_elements_test.go
Normal file
30
pkg/arrays/remove_elements_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package arrays
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestRemoveElements(t *testing.T) {
|
||||
tests := []struct {
|
||||
allElements []string
|
||||
elementsToRemove []string
|
||||
expected []string
|
||||
}{
|
||||
{[]string{"apple", "banana", "cherry", "date", "fig", "grape"}, []string{"banana", "date", "grape"}, []string{"apple", "cherry", "fig"}},
|
||||
{[]string{"apple", "banana", "cherry"}, []string{"date", "fig", "grape"}, []string{"apple", "banana", "cherry"}}, // No elements to remove are present
|
||||
{[]string{"apple", "banana", "cherry", "date"}, []string{}, []string{"apple", "banana", "cherry", "date"}}, // No elements to remove
|
||||
{[]string{}, []string{"apple", "banana", "cherry"}, []string{}}, // Empty allElements slice
|
||||
{[]string{"apple", "banana", "banana", "cherry", "cherry", "date"}, []string{"banana", "cherry"}, []string{"apple", "date"}}, // Duplicate elements in allElements
|
||||
{[]string{"apple", "apple", "apple", "apple"}, []string{"apple"}, []string{}}, // All elements are removed
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run("", func(t *testing.T) {
|
||||
result := RemoveElements(tt.allElements, tt.elementsToRemove)
|
||||
if !reflect.DeepEqual(result, tt.expected) {
|
||||
t.Errorf("RemoveElements(%v, %v) = %v; want %v", tt.allElements, tt.elementsToRemove, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -57,4 +57,16 @@
|
||||
}
|
||||
reverse_proxy nginx:80
|
||||
}
|
||||
|
||||
route /group {
|
||||
sablier http://sablier:10000 {
|
||||
group E2E
|
||||
session_duration 1m
|
||||
dynamic {
|
||||
display_name Group E2E
|
||||
theme hacker-terminal
|
||||
}
|
||||
}
|
||||
reverse_proxy whoami:80
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,9 +20,15 @@ services:
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
nginx:
|
||||
image: nginx:1.27.0
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost"]
|
||||
interval: 5s
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
@@ -35,5 +35,6 @@ run_docker_classic_test Test_Dynamic
|
||||
run_docker_classic_test Test_Blocking
|
||||
run_docker_classic_test Test_Multiple
|
||||
run_docker_classic_test Test_Healthy
|
||||
run_docker_classic_test Test_Group
|
||||
|
||||
exit $errors
|
||||
@@ -57,4 +57,16 @@
|
||||
}
|
||||
reverse_proxy nginx:80
|
||||
}
|
||||
|
||||
route /group {
|
||||
sablier http://sablier:10000 {
|
||||
group E2E
|
||||
session_duration 1m
|
||||
dynamic {
|
||||
display_name Group E2E
|
||||
theme hacker-terminal
|
||||
}
|
||||
}
|
||||
reverse_proxy whoami:80
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,6 +26,9 @@ services:
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
deploy:
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
replicas: 0
|
||||
|
||||
nginx:
|
||||
@@ -34,4 +37,7 @@ services:
|
||||
test: ["CMD", "curl", "-f", "http://localhost"]
|
||||
interval: 5s
|
||||
deploy:
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
replicas: 0
|
||||
@@ -48,5 +48,6 @@ run_docker_swarm_test Test_Dynamic
|
||||
run_docker_swarm_test Test_Blocking
|
||||
run_docker_swarm_test Test_Multiple
|
||||
run_docker_swarm_test Test_Healthy
|
||||
run_docker_swarm_test Test_Group
|
||||
|
||||
exit $errors
|
||||
@@ -19,4 +19,16 @@
|
||||
sablier url=http://tasks.sablier:10000 names=e2e-nginx-1 session_duration=1m dynamic.display_name=Healthy-Nginx dynamic.theme=hacker-terminal
|
||||
reverse_proxy nginx:80
|
||||
}
|
||||
|
||||
route /group {
|
||||
sablier url=http://tasks.sablier:10000 {
|
||||
group E2E
|
||||
session_duration 1m
|
||||
dynamic {
|
||||
display_name Group E2E
|
||||
theme hacker-terminal
|
||||
}
|
||||
}
|
||||
reverse_proxy whoami:80
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,9 +24,15 @@ services:
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
nginx:
|
||||
image: nginx:1.27.0
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost"]
|
||||
interval: 5s
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
@@ -79,4 +79,14 @@ server {
|
||||
set $sablierDynamicTheme hacker-terminal;
|
||||
js_content sablier.call;
|
||||
}
|
||||
|
||||
location /group {
|
||||
set $sablierDynamicShowDetails true;
|
||||
set $sablierDynamicRefreshFrequency 5s;
|
||||
set $sablierNginxInternalRedirect @whoami;
|
||||
set $sablierGroup E2E;
|
||||
set $sablierDynamicName "Group E2E";
|
||||
set $sablierDynamicTheme hacker-terminal;
|
||||
js_content sablier.call;
|
||||
}
|
||||
}
|
||||
@@ -10,7 +10,6 @@ docker version
|
||||
|
||||
prepare_docker_classic() {
|
||||
docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME up -d
|
||||
docker compose -f $DOCKER_COMPOSE_FILE -p $DOCKER_COMPOSE_PROJECT_NAME stop whoami nginx
|
||||
}
|
||||
|
||||
destroy_docker_classic() {
|
||||
@@ -35,5 +34,6 @@ run_docker_classic_test Test_Dynamic
|
||||
run_docker_classic_test Test_Blocking
|
||||
run_docker_classic_test Test_Multiple
|
||||
run_docker_classic_test Test_Healthy
|
||||
run_docker_classic_test Test_Group
|
||||
|
||||
exit $errors
|
||||
@@ -28,6 +28,9 @@ services:
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
deploy:
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
replicas: 0
|
||||
|
||||
nginx:
|
||||
@@ -36,4 +39,7 @@ services:
|
||||
test: ["CMD", "curl", "-f", "http://localhost"]
|
||||
interval: 5s
|
||||
deploy:
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
replicas: 0
|
||||
@@ -79,4 +79,14 @@ server {
|
||||
set $sablierDynamicTheme hacker-terminal;
|
||||
js_content sablier.call;
|
||||
}
|
||||
|
||||
location /group {
|
||||
set $sablierDynamicShowDetails true;
|
||||
set $sablierDynamicRefreshFrequency 5s;
|
||||
set $sablierNginxInternalRedirect @whoami;
|
||||
set $sablierGroup E2E;
|
||||
set $sablierDynamicName "Group E2E";
|
||||
set $sablierDynamicTheme hacker-terminal;
|
||||
js_content sablier.call;
|
||||
}
|
||||
}
|
||||
@@ -47,5 +47,6 @@ run_docker_swarm_test Test_Dynamic
|
||||
run_docker_swarm_test Test_Blocking
|
||||
run_docker_swarm_test Test_Multiple
|
||||
run_docker_swarm_test Test_Healthy
|
||||
run_docker_swarm_test Test_Group
|
||||
|
||||
exit $errors
|
||||
@@ -4,6 +4,8 @@ metadata:
|
||||
name: whoami-deployment
|
||||
labels:
|
||||
app: whoami
|
||||
sablier.enable: true
|
||||
sablier.group: E2E
|
||||
spec:
|
||||
replicas: 0
|
||||
selector:
|
||||
@@ -36,6 +38,8 @@ metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
sablier.enable: true
|
||||
sablier.group: E2E
|
||||
spec:
|
||||
replicas: 0
|
||||
selector:
|
||||
@@ -98,4 +102,23 @@ spec:
|
||||
service:
|
||||
name: nginx-service
|
||||
port:
|
||||
number: 80
|
||||
number: 80
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: group-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: localhost
|
||||
http:
|
||||
paths:
|
||||
- path: /group
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: nginx-service
|
||||
port:
|
||||
number: 80
|
||||
@@ -16,7 +16,6 @@ spec:
|
||||
app: sablier
|
||||
spec:
|
||||
serviceAccountName: sablier
|
||||
serviceAccount: sablier
|
||||
containers:
|
||||
- name: sablier
|
||||
image: acouvreur/sablier:local
|
||||
|
||||
@@ -43,4 +43,13 @@ routes:
|
||||
type: roundrobin
|
||||
nodes:
|
||||
"nginx:80": 1
|
||||
|
||||
- uri: "/group"
|
||||
plugins:
|
||||
proxywasm_sablier_plugin:
|
||||
conf: '{ "sablier_url": "sablier:10000", "group": "E2E", "session_duration": "1m", "dynamic": { "display_name": "Group E2E" } }'
|
||||
upstream:
|
||||
type: roundrobin
|
||||
nodes:
|
||||
"whoami:80": 1
|
||||
#END
|
||||
@@ -20,9 +20,15 @@ services:
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
nginx:
|
||||
image: nginx:1.27.0
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost"]
|
||||
interval: 5s
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
@@ -35,5 +35,6 @@ run_docker_classic_test Test_Dynamic
|
||||
run_docker_classic_test Test_Blocking
|
||||
run_docker_classic_test Test_Multiple
|
||||
run_docker_classic_test Test_Healthy
|
||||
run_docker_classic_test Test_Group
|
||||
|
||||
exit $errors
|
||||
@@ -19,9 +19,15 @@ services:
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
nginx:
|
||||
image: nginx:1.27.0
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost"]
|
||||
interval: 5s
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
@@ -69,6 +69,16 @@ static_resources:
|
||||
config: # Note this config field could not be empty because the xDS API requirement.
|
||||
"@type": type.googleapis.com/google.protobuf.Empty # Empty as a placeholder.
|
||||
is_optional: true
|
||||
- match:
|
||||
path: "/group"
|
||||
route:
|
||||
cluster: whoami
|
||||
typed_per_filter_config:
|
||||
sablier-wasm-group:
|
||||
"@type": type.googleapis.com/envoy.config.route.v3.FilterConfig
|
||||
config: # Note this config field could not be empty because the xDS API requirement.
|
||||
"@type": type.googleapis.com/google.protobuf.Empty # Empty as a placeholder.
|
||||
is_optional: true
|
||||
|
||||
http_filters:
|
||||
- name: sablier-wasm-whoami-dynamic
|
||||
@@ -184,6 +194,34 @@ static_resources:
|
||||
local:
|
||||
filename: "/etc/sablierproxywasm.wasm"
|
||||
configuration: { }
|
||||
- name: sablier-wasm-group
|
||||
disabled: true
|
||||
typed_config:
|
||||
"@type": type.googleapis.com/udpa.type.v1.TypedStruct
|
||||
type_url: type.googleapis.com/envoy.extensions.filters.http.wasm.v3.Wasm
|
||||
value:
|
||||
config:
|
||||
name: "sablier-wasm-group"
|
||||
root_id: "sablier-wasm-group"
|
||||
configuration:
|
||||
"@type": "type.googleapis.com/google.protobuf.StringValue"
|
||||
value: |
|
||||
{
|
||||
"sablier_url": "sablier:10000",
|
||||
"cluster": "sablier",
|
||||
"group": "E2E",
|
||||
"session_duration": "1m",
|
||||
"dynamic": {
|
||||
"display_name": "Group E2E"
|
||||
}
|
||||
}
|
||||
vm_config:
|
||||
runtime: "envoy.wasm.runtime.v8"
|
||||
vm_id: "vm.sablier.sablier-wasm-group"
|
||||
code:
|
||||
local:
|
||||
filename: "/etc/sablierproxywasm.wasm"
|
||||
configuration: { }
|
||||
- name: envoy.filters.http.router
|
||||
typed_config:
|
||||
"@type": type.googleapis.com/envoy.extensions.filters.http.router.v3.Router
|
||||
|
||||
@@ -35,5 +35,6 @@ run_docker_classic_test Test_Dynamic
|
||||
run_docker_classic_test Test_Blocking
|
||||
run_docker_classic_test Test_Multiple
|
||||
run_docker_classic_test Test_Healthy
|
||||
run_docker_classic_test Test_Group
|
||||
|
||||
exit $errors
|
||||
@@ -20,9 +20,15 @@ services:
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
|
||||
nginx:
|
||||
image: nginx:1.27.0
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost"]
|
||||
interval: 5s
|
||||
interval: 5s
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
@@ -58,5 +58,13 @@ http {
|
||||
proxy_pass http://$proxy_pass_host;
|
||||
proxy_set_header Host localhost:8080; # e2e test compliance
|
||||
}
|
||||
|
||||
location /group {
|
||||
proxy_wasm proxywasm_sablier_plugin '{ "sablier_url": "sablier:10000", "group": "E2E", "session_duration": "1m", "dynamic": { "display_name": "Group E2E" } }';
|
||||
|
||||
set $proxy_pass_host whoami:80$request_uri;
|
||||
proxy_pass http://$proxy_pass_host;
|
||||
proxy_set_header Host localhost:8080; # e2e test compliance
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -35,5 +35,6 @@ run_docker_classic_test Test_Dynamic
|
||||
run_docker_classic_test Test_Blocking
|
||||
run_docker_classic_test Test_Multiple
|
||||
run_docker_classic_test Test_Healthy
|
||||
run_docker_classic_test Test_Group
|
||||
|
||||
exit $errors
|
||||
@@ -47,12 +47,19 @@ services:
|
||||
- traefik.http.middlewares.healthy.plugin.sablier.sablierUrl=http://sablier:10000
|
||||
- traefik.http.middlewares.healthy.plugin.sablier.sessionDuration=1m
|
||||
- traefik.http.middlewares.healthy.plugin.sablier.dynamic.displayName=Healthy Nginx
|
||||
# Group Middleware
|
||||
- traefik.http.middlewares.group.plugin.sablier.group=E2E
|
||||
- traefik.http.middlewares.group.plugin.sablier.sablierUrl=http://sablier:10000
|
||||
- traefik.http.middlewares.group.plugin.sablier.sessionDuration=1m
|
||||
- traefik.http.middlewares.group.plugin.sablier.dynamic.displayName=Group E2E
|
||||
|
||||
whoami:
|
||||
image: containous/whoami:v1.5.0
|
||||
# Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik
|
||||
# The route doesn't exist anymore. Use dynamic-config.yml file instead.
|
||||
# labels:
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
# - traefik.enable
|
||||
# - traefik.http.routers.whoami.rule=PathPrefix(`/whoami`)
|
||||
# - traefik.http.routers.whoami.middlewares=ondemand
|
||||
@@ -64,7 +71,9 @@ services:
|
||||
interval: 5s
|
||||
# Cannot use labels because as soon as the container is stopped, the labels are not treated by Traefik
|
||||
# The route doesn't exist anymore. Use dynamic-config.yml file instead.
|
||||
# labels:
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
# - traefik.enable
|
||||
# - traefik.http.routers.nginx.rule=PathPrefix(`/nginx`)
|
||||
# - traefik.http.routers.nginx.middlewares=ondemand
|
||||
@@ -48,4 +48,12 @@ http:
|
||||
- "http"
|
||||
middlewares:
|
||||
- healthy@docker
|
||||
service: "nginx"
|
||||
service: "nginx"
|
||||
|
||||
group:
|
||||
rule: PathPrefix(`/group`)
|
||||
entryPoints:
|
||||
- "http"
|
||||
middlewares:
|
||||
- group@docker
|
||||
service: "whoami"
|
||||
@@ -35,5 +35,6 @@ run_docker_classic_test Test_Dynamic
|
||||
run_docker_classic_test Test_Blocking
|
||||
run_docker_classic_test Test_Multiple
|
||||
run_docker_classic_test Test_Healthy
|
||||
run_docker_classic_test Test_Group
|
||||
|
||||
exit $errors
|
||||
@@ -2,13 +2,13 @@ version: "3.7"
|
||||
|
||||
services:
|
||||
traefik:
|
||||
image: traefik:2.9.1
|
||||
image: traefik:v3.0.4
|
||||
command:
|
||||
- --experimental.localPlugins.sablier.moduleName=github.com/acouvreur/sablier
|
||||
- --entryPoints.http.address=:80
|
||||
- --providers.docker=true
|
||||
- --providers.docker.swarmmode=true
|
||||
- --providers.docker.swarmModeRefreshSeconds=1 # Default is 15s
|
||||
- --providers.swarm=true
|
||||
- --providers.swarm.refreshSeconds=1 # Default is 15s
|
||||
- --providers.swarm.allowemptyservices=true
|
||||
ports:
|
||||
- target: 80
|
||||
published: 8080
|
||||
@@ -54,6 +54,11 @@ services:
|
||||
- traefik.http.middlewares.healthy.plugin.sablier.sablierUrl=http://tasks.sablier:10000
|
||||
- traefik.http.middlewares.healthy.plugin.sablier.sessionDuration=1m
|
||||
- traefik.http.middlewares.healthy.plugin.sablier.dynamic.displayName=Healthy Nginx
|
||||
# Group Middleware
|
||||
- traefik.http.middlewares.group.plugin.sablier.group=E2E
|
||||
- traefik.http.middlewares.group.plugin.sablier.sablierUrl=http://tasks.sablier:10000
|
||||
- traefik.http.middlewares.group.plugin.sablier.sessionDuration=1m
|
||||
- traefik.http.middlewares.group.plugin.sablier.dynamic.displayName=Group E2E
|
||||
- traefik.http.services.sablier.loadbalancer.server.port=10000
|
||||
|
||||
whoami:
|
||||
@@ -61,19 +66,24 @@ services:
|
||||
deploy:
|
||||
replicas: 0
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
- traefik.enable=true
|
||||
# If you do not use the swarm load balancer, traefik will evict the service from its pool
|
||||
# as soon as the service is 0/0. If you do not set that, fallback to dynamic-config.yml file usage.
|
||||
- traefik.docker.lbswarm=true
|
||||
- traefik.http.routers.whoami-dynamic.middlewares=dynamic@docker
|
||||
- traefik.http.routers.whoami-dynamic.middlewares=dynamic@swarm
|
||||
- traefik.http.routers.whoami-dynamic.rule=PathPrefix(`/dynamic/whoami`)
|
||||
- traefik.http.routers.whoami-dynamic.service=whoami
|
||||
- traefik.http.routers.whoami-blocking.middlewares=blocking@docker
|
||||
- traefik.http.routers.whoami-blocking.middlewares=blocking@swarm
|
||||
- traefik.http.routers.whoami-blocking.rule=PathPrefix(`/blocking/whoami`)
|
||||
- traefik.http.routers.whoami-blocking.service=whoami
|
||||
- traefik.http.routers.whoami-multiple.middlewares=multiple@docker
|
||||
- traefik.http.routers.whoami-multiple.middlewares=multiple@swarm
|
||||
- traefik.http.routers.whoami-multiple.rule=PathPrefix(`/multiple/whoami`)
|
||||
- traefik.http.routers.whoami-multiple.service=whoami
|
||||
- traefik.http.routers.whoami-group.middlewares=group@swarm
|
||||
- traefik.http.routers.whoami-group.rule=PathPrefix(`/group`)
|
||||
- traefik.http.routers.whoami-group.service=whoami
|
||||
- traefik.http.services.whoami.loadbalancer.server.port=80
|
||||
|
||||
nginx:
|
||||
@@ -84,14 +94,16 @@ services:
|
||||
deploy:
|
||||
replicas: 0
|
||||
labels:
|
||||
- sablier.enable=true
|
||||
- sablier.group=E2E
|
||||
- traefik.enable=true
|
||||
# If you do not use the swarm load balancer, traefik will evict the service from its pool
|
||||
# as soon as the service is 0/0. If you do not set that, fallback to dynamic-config.yml file usage.
|
||||
- traefik.docker.lbswarm=true
|
||||
- traefik.http.routers.nginx-multiple.middlewares=multiple@docker
|
||||
- traefik.http.routers.nginx-multiple.middlewares=multiple@swarm
|
||||
- traefik.http.routers.nginx-multiple.rule=PathPrefix(`/multiple/nginx`)
|
||||
- traefik.http.routers.nginx-multiple.service=nginx
|
||||
- traefik.http.routers.nginx-healthy.middlewares=healthy@docker
|
||||
- traefik.http.routers.nginx-healthy.middlewares=healthy@swarm
|
||||
- traefik.http.routers.nginx-healthy.rule=PathPrefix(`/healthy/nginx`)
|
||||
- traefik.http.routers.nginx-healthy.service=nginx
|
||||
- traefik.http.services.nginx.loadbalancer.server.port=80
|
||||
|
||||
@@ -47,5 +47,6 @@ run_docker_swarm_test Test_Dynamic
|
||||
run_docker_swarm_test Test_Blocking
|
||||
run_docker_swarm_test Test_Multiple
|
||||
run_docker_swarm_test Test_Healthy
|
||||
run_docker_swarm_test Test_Group
|
||||
|
||||
exit $errors
|
||||
@@ -1,8 +1,8 @@
|
||||
version: '3'
|
||||
services:
|
||||
server:
|
||||
image: "rancher/k3s:v1.23.12-k3s1"
|
||||
command: server --no-deploy traefik
|
||||
image: "rancher/k3s:v1.30.2-k3s1"
|
||||
command: server --disable=traefik
|
||||
tmpfs:
|
||||
- /run
|
||||
- /var/run
|
||||
|
||||
@@ -4,6 +4,8 @@ metadata:
|
||||
name: whoami-deployment
|
||||
labels:
|
||||
app: whoami
|
||||
sablier.enable: "true"
|
||||
sablier.group: "E2E"
|
||||
spec:
|
||||
replicas: 0
|
||||
selector:
|
||||
@@ -30,7 +32,7 @@ spec:
|
||||
selector:
|
||||
app: whoami
|
||||
---
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: dynamic
|
||||
@@ -44,7 +46,7 @@ spec:
|
||||
dynamic:
|
||||
displayName: 'Dynamic Whoami'
|
||||
---
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: blocking
|
||||
@@ -59,7 +61,7 @@ spec:
|
||||
timeout: 30s
|
||||
|
||||
---
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: multiple
|
||||
@@ -73,12 +75,26 @@ spec:
|
||||
dynamic:
|
||||
displayName: 'Multiple Whoami'
|
||||
---
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: group
|
||||
namespace: default
|
||||
spec:
|
||||
plugin:
|
||||
sablier:
|
||||
group: E2E
|
||||
sablierUrl: 'http://sablier:10000'
|
||||
sessionDuration: 1m
|
||||
dynamic:
|
||||
displayName: 'Group E2E'
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whoami-dynamic-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||
traefik.ingress.kubernetes.io/router.middlewares: default-dynamic@kubernetescrd
|
||||
spec:
|
||||
rules:
|
||||
@@ -98,7 +114,7 @@ kind: Ingress
|
||||
metadata:
|
||||
name: whoami-blocking-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||
traefik.ingress.kubernetes.io/router.middlewares: default-blocking@kubernetescrd
|
||||
spec:
|
||||
rules:
|
||||
@@ -118,7 +134,7 @@ kind: Ingress
|
||||
metadata:
|
||||
name: whoami-multiple-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||
traefik.ingress.kubernetes.io/router.middlewares: default-multiple@kubernetescrd
|
||||
spec:
|
||||
rules:
|
||||
@@ -133,13 +149,14 @@ spec:
|
||||
port:
|
||||
number: 80
|
||||
---
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
sablier.enable: "true"
|
||||
sablier.group: "E2E"
|
||||
spec:
|
||||
replicas: 0
|
||||
selector:
|
||||
@@ -166,7 +183,7 @@ spec:
|
||||
selector:
|
||||
app: nginx
|
||||
---
|
||||
apiVersion: traefik.containo.us/v1alpha1
|
||||
apiVersion: traefik.io/v1alpha1
|
||||
kind: Middleware
|
||||
metadata:
|
||||
name: healthy
|
||||
@@ -185,7 +202,7 @@ kind: Ingress
|
||||
metadata:
|
||||
name: nginx-multiple-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||
traefik.ingress.kubernetes.io/router.middlewares: default-multiple@kubernetescrd
|
||||
spec:
|
||||
rules:
|
||||
@@ -205,7 +222,7 @@ kind: Ingress
|
||||
metadata:
|
||||
name: nginx-healthy-ingress
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||
traefik.ingress.kubernetes.io/router.middlewares: default-healthy@kubernetescrd
|
||||
spec:
|
||||
rules:
|
||||
@@ -218,4 +235,24 @@ spec:
|
||||
service:
|
||||
name: nginx-service
|
||||
port:
|
||||
number: 80
|
||||
number: 80
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: group-ingress
|
||||
annotations:
|
||||
traefik.ingress.kubernetes.io/router.entrypoints: web
|
||||
traefik.ingress.kubernetes.io/router.middlewares: default-group@kubernetescrd
|
||||
spec:
|
||||
rules:
|
||||
- host: localhost
|
||||
http:
|
||||
paths:
|
||||
- path: /group
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: whoami-service
|
||||
port:
|
||||
number: 80
|
||||
@@ -16,7 +16,6 @@ spec:
|
||||
app: sablier
|
||||
spec:
|
||||
serviceAccountName: sablier
|
||||
serviceAccount: sablier
|
||||
containers:
|
||||
- name: sablier
|
||||
image: acouvreur/sablier:local
|
||||
|
||||
@@ -25,7 +25,7 @@ destroy_kubernetes() {
|
||||
prepare_traefik() {
|
||||
helm repo add traefik https://traefik.github.io/charts
|
||||
helm repo update
|
||||
helm install traefik --version 27.0.2 traefik/traefik -f values.yaml --namespace kube-system
|
||||
helm install traefik --version 28.3.0 traefik/traefik -f values.yaml --namespace kube-system
|
||||
}
|
||||
|
||||
prepare_deployment() {
|
||||
@@ -68,5 +68,6 @@ run_kubernetes_deployment_test Test_Dynamic
|
||||
run_kubernetes_deployment_test Test_Blocking
|
||||
run_kubernetes_deployment_test Test_Multiple
|
||||
run_kubernetes_deployment_test Test_Healthy
|
||||
run_kubernetes_deployment_test Test_Group
|
||||
|
||||
exit $errors
|
||||
|
||||
Reference in New Issue
Block a user