diff --git a/cmd/managementapi/wire.go b/cmd/managementapi/wire.go index 2ac490c74..e6176d244 100644 --- a/cmd/managementapi/wire.go +++ b/cmd/managementapi/wire.go @@ -53,7 +53,6 @@ func initializeManagementMux(ctx context.Context, conf config.Config) (*runtime. providers.ProvideDefinitionConstructors, // services - service.NewSchedulerManagerConfig, service.NewSchedulerManager, service.NewOperationManager, diff --git a/cmd/managementapi/wire_gen.go b/cmd/managementapi/wire_gen.go index 22532f956..8624d92dc 100644 --- a/cmd/managementapi/wire_gen.go +++ b/cmd/managementapi/wire_gen.go @@ -50,11 +50,7 @@ func initializeManagementMux(ctx context.Context, conf config.Config) (*runtime. if err != nil { return nil, err } - schedulerManagerConfig, err := service.NewSchedulerManagerConfig(conf) - if err != nil { - return nil, err - } - schedulerManager := service.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulerManagerConfig) + schedulerManager := service.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulersHandler := handlers.ProvideSchedulersHandler(schedulerManager) operationsHandler := handlers.ProvideOperationsHandler(operationManager) serveMux := provideManagementMux(ctx, schedulersHandler, operationsHandler) diff --git a/cmd/runtimewatcher/wire.go b/cmd/runtimewatcher/wire.go index f7639ff5e..a6272fb67 100644 --- a/cmd/runtimewatcher/wire.go +++ b/cmd/runtimewatcher/wire.go @@ -31,6 +31,7 @@ import ( "github.com/topfreegames/maestro/internal/core/services/events" "github.com/topfreegames/maestro/internal/core/services/workers" "github.com/topfreegames/maestro/internal/core/worker" + workerconfigs "github.com/topfreegames/maestro/internal/core/worker/config" "github.com/topfreegames/maestro/internal/core/worker/runtimewatcher" "github.com/topfreegames/maestro/internal/service" ) @@ -42,16 +43,24 @@ func provideRuntimeWatcherBuilder() *worker.WorkerBuilder { } } +func provideRuntimeWatcherConfig(c config.Config) *workerconfigs.RuntimeWatcherConfig { + return &workerconfigs.RuntimeWatcherConfig{ + DisruptionWorkerIntervalSeconds: c.GetDuration("runtimeWatcher.disruptionWorker.intervalSeconds"), + DisruptionSafetyPercentage: c.GetFloat64("runtimeWatcher.disruptionWorker.safetyPercentage"), + } +} + var WorkerOptionsSet = wire.NewSet( service.NewRuntimeKubernetes, + service.NewRoomStorageRedis, RoomManagerSet, - wire.Struct(new(worker.WorkerOptions), "RoomManager", "Runtime")) + provideRuntimeWatcherConfig, + wire.Struct(new(worker.WorkerOptions), "Runtime", "RoomStorage", "RoomManager", "RuntimeWatcherConfig")) var RoomManagerSet = wire.NewSet( service.NewSchedulerStoragePg, service.NewClockTime, service.NewPortAllocatorRandom, - service.NewRoomStorageRedis, service.NewGameRoomInstanceStorageRedis, service.NewSchedulerCacheRedis, service.NewRoomManagerConfig, diff --git a/cmd/runtimewatcher/wire_gen.go b/cmd/runtimewatcher/wire_gen.go index 67e028115..c9e33e7be 100644 --- a/cmd/runtimewatcher/wire_gen.go +++ b/cmd/runtimewatcher/wire_gen.go @@ -12,6 +12,7 @@ import ( "github.com/topfreegames/maestro/internal/core/services/events" "github.com/topfreegames/maestro/internal/core/services/workers" "github.com/topfreegames/maestro/internal/core/worker" + config2 "github.com/topfreegames/maestro/internal/core/worker/config" "github.com/topfreegames/maestro/internal/core/worker/runtimewatcher" "github.com/topfreegames/maestro/internal/service" ) @@ -24,8 +25,7 @@ func initializeRuntimeWatcher(c config.Config) (*workers.WorkersManager, error) if err != nil { return nil, err } - clock := service.NewClockTime() - portAllocator, err := service.NewPortAllocatorRandom(c) + runtime, err := service.NewRuntimeKubernetes(c) if err != nil { return nil, err } @@ -33,11 +33,12 @@ func initializeRuntimeWatcher(c config.Config) (*workers.WorkersManager, error) if err != nil { return nil, err } - gameRoomInstanceStorage, err := service.NewGameRoomInstanceStorageRedis(c) + clock := service.NewClockTime() + portAllocator, err := service.NewPortAllocatorRandom(c) if err != nil { return nil, err } - runtime, err := service.NewRuntimeKubernetes(c) + gameRoomInstanceStorage, err := service.NewGameRoomInstanceStorageRedis(c) if err != nil { return nil, err } @@ -59,9 +60,12 @@ func initializeRuntimeWatcher(c config.Config) (*workers.WorkersManager, error) return nil, err } roomManager := service.NewRoomManager(clock, portAllocator, roomStorage, gameRoomInstanceStorage, runtime, eventsService, roomManagerConfig) + runtimeWatcherConfig := provideRuntimeWatcherConfig(c) workerOptions := &worker.WorkerOptions{ - RoomManager: roomManager, - Runtime: runtime, + Runtime: runtime, + RoomStorage: roomStorage, + RoomManager: roomManager, + RuntimeWatcherConfig: runtimeWatcherConfig, } workersManager := workers.NewWorkersManager(workerBuilder, c, schedulerStorage, workerOptions) return workersManager, nil @@ -76,6 +80,14 @@ func provideRuntimeWatcherBuilder() *worker.WorkerBuilder { } } -var WorkerOptionsSet = wire.NewSet(service.NewRuntimeKubernetes, RoomManagerSet, wire.Struct(new(worker.WorkerOptions), "RoomManager", "Runtime")) +func provideRuntimeWatcherConfig(c config.Config) *config2.RuntimeWatcherConfig { + return &config2.RuntimeWatcherConfig{ + DisruptionWorkerIntervalSeconds: c.GetDuration("runtimeWatcher.disruptionWorker.intervalSeconds"), + DisruptionSafetyPercentage: c.GetFloat64("runtimeWatcher.disruptionWorker.safetyPercentage"), + } +} + +var WorkerOptionsSet = wire.NewSet(service.NewRuntimeKubernetes, service.NewRoomStorageRedis, RoomManagerSet, + provideRuntimeWatcherConfig, wire.Struct(new(worker.WorkerOptions), "Runtime", "RoomStorage", "RoomManager", "RuntimeWatcherConfig")) -var RoomManagerSet = wire.NewSet(service.NewSchedulerStoragePg, service.NewClockTime, service.NewPortAllocatorRandom, service.NewRoomStorageRedis, service.NewGameRoomInstanceStorageRedis, service.NewSchedulerCacheRedis, service.NewRoomManagerConfig, service.NewRoomManager, service.NewEventsForwarder, events.NewEventsForwarderService, service.NewEventsForwarderServiceConfig) +var RoomManagerSet = wire.NewSet(service.NewSchedulerStoragePg, service.NewClockTime, service.NewPortAllocatorRandom, service.NewGameRoomInstanceStorageRedis, service.NewSchedulerCacheRedis, service.NewRoomManagerConfig, service.NewRoomManager, service.NewEventsForwarder, events.NewEventsForwarderService, service.NewEventsForwarderServiceConfig) diff --git a/cmd/worker/wire.go b/cmd/worker/wire.go index ee693ccaf..dfa4f11a9 100644 --- a/cmd/worker/wire.go +++ b/cmd/worker/wire.go @@ -70,7 +70,6 @@ func initializeWorker(c config.Config, builder *worker.WorkerBuilder) (*workerss worker.ProvideWorkerOptions, workersservice.NewWorkersManager, service.NewSchedulerManager, - service.NewSchedulerManagerConfig, ) return &workersservice.WorkersManager{}, nil diff --git a/cmd/worker/wire_gen.go b/cmd/worker/wire_gen.go index ea1da74ad..b6de22f7d 100644 --- a/cmd/worker/wire_gen.go +++ b/cmd/worker/wire_gen.go @@ -75,11 +75,7 @@ func initializeWorker(c config.Config, builder *worker.WorkerBuilder) (*workers. return nil, err } roomManager := service.NewRoomManager(clock, portAllocator, roomStorage, gameRoomInstanceStorage, runtime, eventsService, roomManagerConfig) - schedulerManagerConfig, err := service.NewSchedulerManagerConfig(c) - if err != nil { - return nil, err - } - schedulerManager := service.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulerManagerConfig) + schedulerManager := service.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) policyMap := service.NewPolicyMap(roomStorage) autoscaler := service.NewAutoscaler(policyMap) newversionConfig := service.NewCreateSchedulerVersionConfig(c) diff --git a/config/config.yaml b/config/config.yaml index 161291f6c..fd01fd068 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -79,8 +79,6 @@ operations: limit: 1000 services: - schedulerManager: - defaultPdbMaxUnavailable: "5%" roomManager: roomPingTimeoutMillis: 240000 roomInitializationTimeoutMillis: 120000 diff --git a/docs/reference/Scheduler.md b/docs/reference/Scheduler.md index d85e393de..d30f742a9 100644 --- a/docs/reference/Scheduler.md +++ b/docs/reference/Scheduler.md @@ -203,8 +203,7 @@ autoscaling: "metadata": {} } } - ], - "pdbMaxUnavailable": "5%", + ] "autoscaling": { "enabled": true, "min": 10, @@ -235,7 +234,6 @@ forwarders: Forwarders autoscaling: Autoscaling spec: Spec annotation: Map -pdbMaxUnavailable: String ``` - **Name**: Scheduler name. This name is unique and will be the same name used for the kubernetes namespace. It's @@ -256,7 +254,6 @@ pdbMaxUnavailable: String used by them, limits and images. More info [here](#spec). - **annotations**: Allows annotations for the scheduler's game room. Know more about annotations on Kubernetes [here](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations) -- **pdbMaxUnavailable**: Defines the disruption budget for game rooms. Optional and defaults to 5%. Value can be defined as a string representing the % between 0 and 100, "15%", or a raw number of rooms "100". ### PortRange The **PortRange** is used to select a random port for a GRU between **start** and **end**. @@ -406,43 +403,4 @@ It is represented as: - **name**: Name of the port. Facilitates on recognition; - **protocol**: Port protocol. Can be UDP, TCP or SCTP.; - **port**: The port exposed. -- **hostPortRange**: The [port range](#portrange) for the port to be allocated in the host. Mutually exclusive with the port range configured in the root structure. - -#### PDB Max Unavailable - -A string value that defines the disruption budget of Game Rooms from a specific scheduler. -Maestro will create a [PDB Resource](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) -to prevent evictions drastically impacting availability of the Game Rooms. - -By default this value is set to 5%, so at worst runtime can evit 5% of the pods. There is no way to control -what pods will be evicted - if it prefers ready, pending, etc. - -The configuration can be specified with this order of precedence: - -1. Value specified in Scheduler's definition - -```json -{ - "pdbMaxUnavailable": "10%" -} -``` - -2. Value specified in the ENV VAR: - -```shell -MAESTRO_SERVICES_SCHEDULERMANAGER_DEFAULTPDBMAXUNAVAILABLE="10%" -``` - -3. Value specified in the [config.yaml](../../config/config.yaml): - -```yaml -services: - schedulerManager: - defaultPdbMaxUnavailable: "5%" -``` - -4. Value specified in [code](../../internal/core/entities/pdb/pdb.go) that defaults to 5%: - -```go -const DefaultPdbMaxUnavailablePercentage = "5%" -``` +- **hostPortRange**: The [port range](#portrange) for the port to be allocated in the host. Mutually exclusive with the port range configured in the root structure. \ No newline at end of file diff --git a/internal/adapters/runtime/kubernetes/scheduler.go b/internal/adapters/runtime/kubernetes/scheduler.go index 8b051b8e3..2ac5ee082 100644 --- a/internal/adapters/runtime/kubernetes/scheduler.go +++ b/internal/adapters/runtime/kubernetes/scheduler.go @@ -27,18 +27,19 @@ import ( "strconv" "github.com/topfreegames/maestro/internal/core/entities" - pdbEntity "github.com/topfreegames/maestro/internal/core/entities/pdb" "github.com/topfreegames/maestro/internal/core/ports/errors" "go.uber.org/zap" v1 "k8s.io/api/core/v1" v1Policy "k8s.io/api/policy/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) const ( - MajorKubeVersionPDB int = 1 - MinorKubeVersionPDB int = 21 + DefaultDisruptionSafetyPercentage float64 = 0.05 + MajorKubeVersionPDB int = 1 + MinorKubeVersionPDB int = 21 ) func (k *kubernetes) isPDBSupported() bool { @@ -95,7 +96,10 @@ func (k *kubernetes) createPDBFromScheduler(ctx context.Context, scheduler *enti }, }, Spec: v1Policy.PodDisruptionBudgetSpec{ - MaxUnavailable: pdbEntity.ConvertStrToSpec(scheduler.PdbMaxUnavailable), + MinAvailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(0), + }, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "maestro-scheduler": scheduler.Name, @@ -169,8 +173,29 @@ func (k *kubernetes) DeleteScheduler(ctx context.Context, scheduler *entities.Sc return nil } -func (k *kubernetes) UpdateScheduler(ctx context.Context, scheduler *entities.Scheduler) error { - // Check if PDB exists, if not, create it +func (k *kubernetes) MitigateDisruption( + ctx context.Context, + scheduler *entities.Scheduler, + roomAmount int, + safetyPercentage float64, +) error { + if scheduler == nil { + return errors.NewErrInvalidArgument("empty pointer received for scheduler, can not mitigate disruptions") + } + + incSafetyPercentage := 1.0 + if safetyPercentage < DefaultDisruptionSafetyPercentage { + k.logger.Warn( + "invalid safety percentage, using default percentage", + zap.Float64("safetyPercentage", safetyPercentage), + zap.Float64("DefaultDisruptionSafetyPercentage", DefaultDisruptionSafetyPercentage), + ) + safetyPercentage = DefaultDisruptionSafetyPercentage + } + incSafetyPercentage += safetyPercentage + + // For kubernetes mitigating disruptions means updating the current PDB + // minAvailable to the number of occupied rooms if above a threshold pdb, err := k.clientSet.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) if err != nil && !kerrors.IsNotFound(err) { // Non-recoverable errors @@ -184,8 +209,31 @@ func (k *kubernetes) UpdateScheduler(ctx context.Context, scheduler *entities.Sc } } + var currentPdbMinAvailable int32 + // PDB might exist and is based on MaxUnavailable + if pdb.Spec.MinAvailable != nil { + currentPdbMinAvailable = pdb.Spec.MinAvailable.IntVal + } + + if currentPdbMinAvailable == int32(float64(roomAmount)*incSafetyPercentage) { + return nil + } + + // In theory, the PDB object can be changed in the runtime in the meantime after + // fetching initial state/ask for creation (beginning of the function) and before + // updating the value. This should never happen in production because there is only + // one agent setting this PDB in the namespace and it's the worker. However, on tests + // we were seeing intermittent failures running parallel cases, hence why adding this + // code it is safer to update the PDB object + pdb, err = k.clientSet.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) + if err != nil || pdb == nil { + return errors.NewErrUnexpected("non recoverable error when getting PDB for scheduler '%s': %s", scheduler.Name, err) + } pdb.Spec = v1Policy.PodDisruptionBudgetSpec{ - MaxUnavailable: pdbEntity.ConvertStrToSpec(scheduler.PdbMaxUnavailable), + MinAvailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(float64(roomAmount) * incSafetyPercentage), + }, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ "maestro-scheduler": scheduler.Name, @@ -195,7 +243,7 @@ func (k *kubernetes) UpdateScheduler(ctx context.Context, scheduler *entities.Sc _, err = k.clientSet.PolicyV1().PodDisruptionBudgets(scheduler.Name).Update(ctx, pdb, metav1.UpdateOptions{}) if err != nil { - return errors.NewErrUnexpected("error updating PDB for scheduler '%s': %s", scheduler.Name, err) + return errors.NewErrUnexpected("error updating PDB to mitigate disruptions for scheduler '%s': %s", scheduler.Name, err) } return nil diff --git a/internal/adapters/runtime/kubernetes/scheduler_test.go b/internal/adapters/runtime/kubernetes/scheduler_test.go index ce4c30248..232541968 100644 --- a/internal/adapters/runtime/kubernetes/scheduler_test.go +++ b/internal/adapters/runtime/kubernetes/scheduler_test.go @@ -28,14 +28,18 @@ package kubernetes import ( "context" "testing" + "time" "github.com/stretchr/testify/require" "github.com/topfreegames/maestro/internal/core/entities" + "github.com/topfreegames/maestro/internal/core/entities/autoscaling" "github.com/topfreegames/maestro/internal/core/ports/errors" "github.com/topfreegames/maestro/test" v1 "k8s.io/api/core/v1" + v1Policy "k8s.io/api/policy/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" ) func TestSchedulerCreation(t *testing.T) { @@ -44,7 +48,7 @@ func TestSchedulerCreation(t *testing.T) { kubernetesRuntime := New(client, KubernetesConfig{}) t.Run("create single scheduler", func(t *testing.T) { - scheduler := &entities.Scheduler{Name: "single-scheduler-test", PdbMaxUnavailable: "5%"} + scheduler := &entities.Scheduler{Name: "single-scheduler-test"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) require.NoError(t, err) @@ -53,7 +57,7 @@ func TestSchedulerCreation(t *testing.T) { }) t.Run("fail to create scheduler with the same name", func(t *testing.T) { - scheduler := &entities.Scheduler{Name: "conflict-scheduler-test", PdbMaxUnavailable: "5%"} + scheduler := &entities.Scheduler{Name: "conflict-scheduler-test"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) require.NoError(t, err) @@ -69,7 +73,7 @@ func TestSchedulerDeletion(t *testing.T) { kubernetesRuntime := New(client, KubernetesConfig{}) t.Run("delete scheduler", func(t *testing.T) { - scheduler := &entities.Scheduler{Name: "delete-scheduler-test", PdbMaxUnavailable: "5%"} + scheduler := &entities.Scheduler{Name: "delete-scheduler-test"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) require.NoError(t, err) @@ -82,7 +86,7 @@ func TestSchedulerDeletion(t *testing.T) { }) t.Run("fail to delete inexistent scheduler", func(t *testing.T) { - scheduler := &entities.Scheduler{Name: "delete-inexistent-scheduler-test", PdbMaxUnavailable: "5%"} + scheduler := &entities.Scheduler{Name: "delete-inexistent-scheduler-test"} err := kubernetesRuntime.DeleteScheduler(ctx, scheduler) require.Error(t, err) require.ErrorIs(t, err, errors.ErrNotFound) @@ -100,7 +104,7 @@ func TestPDBCreationAndDeletion(t *testing.T) { t.SkipNow() } - scheduler := &entities.Scheduler{Name: "scheduler-pdb-test-no-autoscaling", PdbMaxUnavailable: "5%"} + scheduler := &entities.Scheduler{Name: "scheduler-pdb-test-no-autoscaling"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) if err != nil { require.ErrorIs(t, errors.ErrAlreadyExists, err) @@ -119,20 +123,61 @@ func TestPDBCreationAndDeletion(t *testing.T) { require.NotNil(t, pdb.Spec) require.NotNil(t, pdb.Spec.Selector) require.Equal(t, pdb.Name, scheduler.Name) - require.Equal(t, pdb.Spec.MaxUnavailable.StrVal, "5%") + require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) require.Contains(t, pdb.Spec.Selector.MatchLabels, "maestro-scheduler") require.Contains(t, pdb.Spec.Selector.MatchLabels["maestro-scheduler"], scheduler.Name) require.Contains(t, pdb.Labels, "app.kubernetes.io/managed-by") require.Contains(t, pdb.Labels["app.kubernetes.io/managed-by"], "maestro") }) + t.Run("pdb should not use scheduler min as minAvailable", func(t *testing.T) { + if !kubernetesRuntime.isPDBSupported() { + t.Log("Kubernetes version does not support PDB, skipping") + t.SkipNow() + } + + scheduler := &entities.Scheduler{ + Name: "scheduler-pdb-test-with-autoscaling", + Autoscaling: &autoscaling.Autoscaling{ + Enabled: true, + Min: 2, + Max: 3, + Policy: autoscaling.Policy{ + Type: autoscaling.RoomOccupancy, + Parameters: autoscaling.PolicyParameters{ + RoomOccupancy: &autoscaling.RoomOccupancyParams{ + ReadyTarget: 0.1, + }, + }, + }, + }, + } + err := kubernetesRuntime.CreateScheduler(ctx, scheduler) + if err != nil { + require.ErrorIs(t, errors.ErrAlreadyExists, err) + } + + defer func() { + err := kubernetesRuntime.DeleteScheduler(ctx, scheduler) + if err != nil { + require.ErrorIs(t, errors.ErrNotFound, err) + } + }() + + pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, pdb) + require.Equal(t, pdb.Name, scheduler.Name) + require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) + }) + t.Run("delete pdb on scheduler deletion", func(t *testing.T) { if !kubernetesRuntime.isPDBSupported() { t.Log("Kubernetes version does not support PDB, skipping") t.SkipNow() } - scheduler := &entities.Scheduler{Name: "scheduler-pdb-test-delete", PdbMaxUnavailable: "5%"} + scheduler := &entities.Scheduler{Name: "scheduler-pdb-test-delete"} err := kubernetesRuntime.CreateScheduler(ctx, scheduler) if err != nil { require.ErrorIs(t, errors.ErrAlreadyExists, err) @@ -142,6 +187,7 @@ func TestPDBCreationAndDeletion(t *testing.T) { require.NoError(t, err) require.NotNil(t, pdb) require.Equal(t, pdb.Name, scheduler.Name) + require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) err = kubernetesRuntime.DeleteScheduler(ctx, scheduler) if err != nil { @@ -152,3 +198,193 @@ func TestPDBCreationAndDeletion(t *testing.T) { require.True(t, kerrors.IsNotFound(err)) }) } + +func TestMitigateDisruption(t *testing.T) { + ctx := context.Background() + client := test.GetKubernetesClientSet(t, kubernetesContainer) + kubernetesRuntime := New(client) + + t.Run("should not mitigate disruption if scheduler is nil", func(t *testing.T) { + err := kubernetesRuntime.MitigateDisruption(ctx, nil, 0, 0.0) + require.ErrorIs(t, errors.ErrInvalidArgument, err) + }) + + t.Run("should create PDB on mitigatation if not created before", func(t *testing.T) { + if !kubernetesRuntime.isPDBSupported() { + t.Log("Kubernetes version does not support PDB, skipping") + t.SkipNow() + } + + scheduler := &entities.Scheduler{Name: "scheduler-pdb-mitigation-create"} + namespace := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: scheduler.Name, + }, + } + + _, err := client.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}) + require.NoError(t, err) + + err = kubernetesRuntime.MitigateDisruption(ctx, scheduler, 0, 0.0) + require.NoError(t, err) + + pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, pdb) + require.Equal(t, pdb.Name, scheduler.Name) + require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) + }) + + t.Run("should update PDB on mitigation if not equal to current value", func(t *testing.T) { + if !kubernetesRuntime.isPDBSupported() { + t.Log("Kubernetes version does not support PDB, skipping") + t.SkipNow() + } + + scheduler := &entities.Scheduler{ + Name: "scheduler-pdb-mitigation-update", + } + err := kubernetesRuntime.CreateScheduler(ctx, scheduler) + if err != nil { + require.ErrorIs(t, errors.ErrAlreadyExists, err) + } + + defer func() { + err := kubernetesRuntime.DeleteScheduler(ctx, scheduler) + if err != nil { + require.ErrorIs(t, errors.ErrNotFound, err) + } + }() + + pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, pdb) + require.Equal(t, pdb.Name, scheduler.Name) + require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) + + occupiedRooms := 100 + err = kubernetesRuntime.MitigateDisruption(ctx, scheduler, occupiedRooms, 0.0) + require.NoError(t, err) + + pdb, err = client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, pdb) + require.Equal(t, pdb.Name, scheduler.Name) + + incSafetyPercentage := 1.0 + DefaultDisruptionSafetyPercentage + newRoomAmount := int32(float64(occupiedRooms) * incSafetyPercentage) + require.Equal(t, pdb.Spec.MinAvailable.IntVal, newRoomAmount) + }) + + t.Run("should default safety percentage if invalid value", func(t *testing.T) { + if !kubernetesRuntime.isPDBSupported() { + t.Log("Kubernetes version does not support PDB, skipping") + t.SkipNow() + } + + scheduler := &entities.Scheduler{ + Name: "scheduler-pdb-mitigation-no-update", + Autoscaling: &autoscaling.Autoscaling{ + Enabled: true, + Min: 100, + Max: 200, + Policy: autoscaling.Policy{ + Type: autoscaling.RoomOccupancy, + Parameters: autoscaling.PolicyParameters{ + RoomOccupancy: &autoscaling.RoomOccupancyParams{ + ReadyTarget: 0.1, + }, + }, + }, + }, + } + err := kubernetesRuntime.CreateScheduler(ctx, scheduler) + if err != nil { + require.ErrorIs(t, errors.ErrAlreadyExists, err) + } + + defer func() { + err := kubernetesRuntime.DeleteScheduler(ctx, scheduler) + if err != nil { + require.ErrorIs(t, errors.ErrNotFound, err) + } + }() + + time.Sleep(time.Millisecond * 100) + pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, pdb) + require.Equal(t, pdb.Name, scheduler.Name) + require.Equal(t, pdb.Spec.MinAvailable.IntVal, int32(0)) + + newValue := 100 + err = kubernetesRuntime.MitigateDisruption(ctx, scheduler, newValue, 0.0) + require.NoError(t, err) + + time.Sleep(time.Millisecond * 100) + pdb, err = client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, pdb) + require.Equal(t, pdb.Name, scheduler.Name) + + incSafetyPercentage := 1.0 + DefaultDisruptionSafetyPercentage + require.Equal(t, int32(float64(newValue)*incSafetyPercentage), pdb.Spec.MinAvailable.IntVal) + }) + + t.Run("should clear maxUnavailable and set minAvailable if existing PDB uses maxUnavailable", func(t *testing.T) { + if !kubernetesRuntime.isPDBSupported() { + t.Log("Kubernetes version does not support PDB, skipping") + t.SkipNow() + } + scheduler := &entities.Scheduler{ + Name: "scheduler-pdb-max-unavailable", + } + pdbSpec := &v1Policy.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: scheduler.Name, + Labels: map[string]string{ + "app.kubernetes.io/managed-by": "maestro", + }, + }, + Spec: v1Policy.PodDisruptionBudgetSpec{ + MaxUnavailable: &intstr.IntOrString{ + Type: intstr.Int, + IntVal: int32(10), + }, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "maestro-scheduler": scheduler.Name, + }, + }, + }, + } + namespace := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: scheduler.Name, + }, + } + + _, err := client.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{}) + require.NoError(t, err) + + pdb, err := client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Create(ctx, pdbSpec, metav1.CreateOptions{}) + require.NoError(t, err) + require.NotNil(t, pdb) + require.Equal(t, pdb.Spec.MaxUnavailable.IntVal, int32(10)) + + occupiedRooms := 100 + err = kubernetesRuntime.MitigateDisruption(ctx, scheduler, occupiedRooms, 0.0) + require.NoError(t, err) + + pdb, err = client.PolicyV1().PodDisruptionBudgets(scheduler.Name).Get(ctx, scheduler.Name, metav1.GetOptions{}) + require.NoError(t, err) + require.NotNil(t, pdb) + require.Equal(t, pdb.Name, scheduler.Name) + + incSafetyPercentage := 1.0 + DefaultDisruptionSafetyPercentage + newRoomAmount := int32(float64(occupiedRooms) * incSafetyPercentage) + require.Equal(t, pdb.Spec.MinAvailable.IntVal, newRoomAmount) + require.Nil(t, pdb.Spec.MaxUnavailable) + + }) +} diff --git a/internal/adapters/storage/postgres/scheduler/db_scheduler.go b/internal/adapters/storage/postgres/scheduler/db_scheduler.go index 8322d8b83..6b5c30811 100644 --- a/internal/adapters/storage/postgres/scheduler/db_scheduler.go +++ b/internal/adapters/storage/postgres/scheduler/db_scheduler.go @@ -57,7 +57,6 @@ type schedulerInfo struct { TerminationGracePeriod time.Duration Toleration string Affinity string - PdbMaxUnavailable string Containers []game_room.Container PortRange *port.PortRange MaxSurge string @@ -79,7 +78,6 @@ func NewDBScheduler(scheduler *entities.Scheduler) *Scheduler { MaxSurge: scheduler.MaxSurge, RoomsReplicas: scheduler.RoomsReplicas, Forwarders: scheduler.Forwarders, - PdbMaxUnavailable: scheduler.PdbMaxUnavailable, Autoscaling: scheduler.Autoscaling, Annotations: scheduler.Annotations, Labels: scheduler.Labels, @@ -115,14 +113,13 @@ func (s *Scheduler) ToScheduler() (*entities.Scheduler, error) { Affinity: info.Affinity, Containers: info.Containers, }, - PortRange: info.PortRange, - RollbackVersion: s.RollbackVersion, - CreatedAt: s.CreatedAt.Time, - LastDownscaleAt: info.LastDownscaleAt, - MaxSurge: info.MaxSurge, - RoomsReplicas: info.RoomsReplicas, - Forwarders: info.Forwarders, - PdbMaxUnavailable: info.PdbMaxUnavailable, - Autoscaling: info.Autoscaling, + PortRange: info.PortRange, + RollbackVersion: s.RollbackVersion, + CreatedAt: s.CreatedAt.Time, + LastDownscaleAt: info.LastDownscaleAt, + MaxSurge: info.MaxSurge, + RoomsReplicas: info.RoomsReplicas, + Forwarders: info.Forwarders, + Autoscaling: info.Autoscaling, }, nil } diff --git a/internal/api/handlers/requestadapters/schedulers.go b/internal/api/handlers/requestadapters/schedulers.go index 40496d2ca..e5f20761b 100644 --- a/internal/api/handlers/requestadapters/schedulers.go +++ b/internal/api/handlers/requestadapters/schedulers.go @@ -72,10 +72,6 @@ func FromApiPatchSchedulerRequestToChangeMap(request *api.PatchSchedulerRequest) patchMap[patch.LabelSchedulerForwarders] = fromApiForwarders(request.GetForwarders()) } - if request.PdbMaxUnavailable != "" { - patchMap[patch.LabelPDBMaxUnavailable] = request.GetPdbMaxUnavailable() - } - if request.Annotations != nil { patchMap[patch.LabelAnnotations] = request.GetAnnotations() } @@ -130,7 +126,6 @@ func FromApiCreateSchedulerRequestToEntity(request *api.CreateSchedulerRequest) int(request.GetRoomsReplicas()), schedulerAutoscaling, fromApiForwarders(request.GetForwarders()), - request.GetPdbMaxUnavailable(), request.GetAnnotations(), request.GetLabels(), ) @@ -168,7 +163,6 @@ func FromApiNewSchedulerVersionRequestToEntity(request *api.NewSchedulerVersionR int(request.GetRoomsReplicas()), schedulerAutoscaling, fromApiForwarders(request.GetForwarders()), - request.GetPdbMaxUnavailable(), request.GetAnnotations(), request.GetLabels(), ) @@ -182,19 +176,18 @@ func FromEntitySchedulerToResponse(entity *entities.Scheduler) (*api.Scheduler, } return &api.Scheduler{ - Name: entity.Name, - Game: entity.Game, - State: entity.State, - PortRange: getPortRange(entity.PortRange), - CreatedAt: timestamppb.New(entity.CreatedAt), - MaxSurge: entity.MaxSurge, - RoomsReplicas: int32(entity.RoomsReplicas), - Spec: getSpec(entity.Spec), - Autoscaling: getAutoscaling(entity.Autoscaling), - Forwarders: forwarders, - PdbMaxUnavailable: entity.PdbMaxUnavailable, - Annotations: entity.Annotations, - Labels: entity.Labels, + Name: entity.Name, + Game: entity.Game, + State: entity.State, + PortRange: getPortRange(entity.PortRange), + CreatedAt: timestamppb.New(entity.CreatedAt), + MaxSurge: entity.MaxSurge, + RoomsReplicas: int32(entity.RoomsReplicas), + Spec: getSpec(entity.Spec), + Autoscaling: getAutoscaling(entity.Autoscaling), + Forwarders: forwarders, + Annotations: entity.Annotations, + Labels: entity.Labels, }, nil } diff --git a/internal/api/handlers/requestadapters/schedulers_test.go b/internal/api/handlers/requestadapters/schedulers_test.go index a823a8a57..8720734ff 100644 --- a/internal/api/handlers/requestadapters/schedulers_test.go +++ b/internal/api/handlers/requestadapters/schedulers_test.go @@ -330,7 +330,6 @@ func TestFromApiPatchSchedulerRequestToChangeMap(t *testing.T) { }, }, }, - { Title: "only autoscaling should convert api.PatchSchedulerRequest to change map", Input: Input{ @@ -574,9 +573,8 @@ func TestFromApiCreateSchedulerRequestToEntity(t *testing.T) { }, }, }, - PdbMaxUnavailable: "42%", - Annotations: map[string]string{}, - Labels: map[string]string{}, + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, Output: Output{ @@ -676,9 +674,8 @@ func TestFromApiCreateSchedulerRequestToEntity(t *testing.T) { }, }, }, - PdbMaxUnavailable: "42%", - Annotations: map[string]string{}, - Labels: map[string]string{}, + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, }, @@ -1231,9 +1228,8 @@ func TestFromApiNewSchedulerVersionRequestToEntity(t *testing.T) { }, }, }, - PdbMaxUnavailable: "12", - Annotations: map[string]string{}, - Labels: map[string]string{}, + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, Output: Output{ @@ -1333,9 +1329,8 @@ func TestFromApiNewSchedulerVersionRequestToEntity(t *testing.T) { }, }, }, - PdbMaxUnavailable: "12", - Annotations: map[string]string{}, - Labels: map[string]string{}, + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, }, @@ -1468,9 +1463,8 @@ func TestFromEntitySchedulerToResponse(t *testing.T) { }, }, }, - PdbMaxUnavailable: "10%", - Annotations: map[string]string{}, - Labels: map[string]string{}, + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, Output: Output{ @@ -1569,9 +1563,8 @@ func TestFromEntitySchedulerToResponse(t *testing.T) { }, }, }, - PdbMaxUnavailable: "10%", - Annotations: map[string]string{}, - Labels: map[string]string{}, + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, }, @@ -1651,9 +1644,8 @@ func TestFromEntitySchedulerToResponse(t *testing.T) { }, }, }, - PdbMaxUnavailable: "", - Annotations: map[string]string{}, - Labels: map[string]string{}, + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, Output: Output{ @@ -1738,9 +1730,8 @@ func TestFromEntitySchedulerToResponse(t *testing.T) { }, }, }, - PdbMaxUnavailable: "", - Annotations: map[string]string{}, - Labels: map[string]string{}, + Annotations: map[string]string{}, + Labels: map[string]string{}, }, }, }, diff --git a/internal/api/handlers/schedulers_handler_test.go b/internal/api/handlers/schedulers_handler_test.go index e407d53db..4bc50f0b1 100644 --- a/internal/api/handlers/schedulers_handler_test.go +++ b/internal/api/handlers/schedulers_handler_test.go @@ -69,7 +69,7 @@ func TestListSchedulers(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), &filters.SchedulerFilter{Name: schedulerName, Game: game, Version: version}).Return([]*entities.Scheduler{ { @@ -115,7 +115,7 @@ func TestListSchedulers(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return([]*entities.Scheduler{}, nil) @@ -146,7 +146,7 @@ func TestListSchedulers(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("GetSchedulersWithFilter error")) @@ -203,7 +203,7 @@ func TestGetScheduler(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(nil, schedulerCache, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(nil, schedulerCache, nil, nil) scheduler := &entities.Scheduler{ Name: "zooba-us", @@ -272,9 +272,8 @@ func TestGetScheduler(t *testing.T) { }, }, }, - PdbMaxUnavailable: "10%", - Annotations: map[string]string{}, - Labels: map[string]string{}, + Annotations: map[string]string{}, + Labels: map[string]string{}, } schedulerCache.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(scheduler, nil) @@ -304,7 +303,7 @@ func TestGetScheduler(t *testing.T) { schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, nil, nil) schedulerCache.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("scheduler NonExistentSchedule not found")) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("scheduler NonExistentSchedule not found")) @@ -337,7 +336,7 @@ func TestGetScheduler(t *testing.T) { schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, nil, nil) schedulerCache.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrInvalidArgument("Error")) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrInvalidArgument("Error")) @@ -372,7 +371,7 @@ func TestGetSchedulerVersions(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) createdAtV1, _ := time.Parse(time.RFC3339Nano, "2020-01-01T00:00:00.001Z") createdAtV2, _ := time.Parse(time.RFC3339Nano, "2020-01-01T00:00:00.001Z") @@ -413,7 +412,7 @@ func TestGetSchedulerVersions(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetSchedulerVersions(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("scheduler NonExistentScheduler not found")) @@ -439,7 +438,7 @@ func TestGetSchedulerVersions(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetSchedulerVersions(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrInvalidArgument("Error")) @@ -476,7 +475,7 @@ func TestCreateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) scheduler := &entities.Scheduler{ Name: "scheduler-name-1", @@ -543,9 +542,8 @@ func TestCreateScheduler(t *testing.T) { }, }, }, - PdbMaxUnavailable: "10%", - Annotations: map[string]string{"imageregistry": "https://docker.hub.com/"}, - Labels: map[string]string{"scheduler": "scheduler-name"}, + Annotations: map[string]string{"imageregistry": "https://docker.hub.com/"}, + Labels: map[string]string{"scheduler": "scheduler-name"}, } schedulerStorage.EXPECT().CreateScheduler(gomock.Any(), gomock.Any()).Do( @@ -590,7 +588,7 @@ func TestCreateScheduler(t *testing.T) { }) t.Run("with failure", func(t *testing.T) { - schedulerManager := schedulers.NewSchedulerManager(nil, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(nil, nil, nil, nil) mux := runtime.NewServeMux() err := api.RegisterSchedulersServiceHandlerServer(context.Background(), mux, ProvideSchedulersHandler(schedulerManager)) @@ -636,7 +634,7 @@ func TestCreateScheduler(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) schedulerStorage.EXPECT().CreateScheduler(gomock.Any(), gomock.Any()).Return(errors.NewErrAlreadyExists("error creating scheduler %s: name already exists", "scheduler")) @@ -682,7 +680,7 @@ func TestNewSchedulerVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(gomock.Any(), "scheduler-name-1", gomock.Any()).Return(&operation.Operation{ID: "id-1"}, nil) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), "scheduler-name-1").Return(currentScheduler, nil) @@ -713,7 +711,7 @@ func TestNewSchedulerVersion(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), "scheduler-name-1").Return(nil, errors.NewErrNotFound("err")) @@ -746,7 +744,7 @@ func TestNewSchedulerVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(gomock.Any(), "scheduler-name-1", gomock.Any()).Return(nil, errors.NewErrUnexpected("storage offline")) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), "scheduler-name-1").Return(currentScheduler, nil) @@ -782,7 +780,7 @@ func TestSwitchActiveVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(gomock.Any(), "scheduler-name-1", gomock.Any()).Return(&operation.Operation{ID: "id-1"}, nil) @@ -811,7 +809,7 @@ func TestSwitchActiveVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(gomock.Any(), "scheduler-name-1", gomock.Any()).Return(nil, errors.NewErrUnexpected("internal error")) @@ -836,7 +834,7 @@ func TestGetSchedulersInfo(t *testing.T) { schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) scheduler := newValidScheduler() scheduler.Autoscaling = &autoscaling.Autoscaling{ @@ -873,7 +871,7 @@ func TestGetSchedulersInfo(t *testing.T) { t.Run("with valid request and no scheduler and game rooms found", func(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("err")) mux := runtime.NewServeMux() @@ -897,7 +895,7 @@ func TestGetSchedulersInfo(t *testing.T) { t.Run("with unknown error", func(t *testing.T) { mockCtrl := gomock.NewController(t) schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrUnexpected("exception")) mux := runtime.NewServeMux() @@ -1094,7 +1092,7 @@ func TestPatchScheduler(t *testing.T) { schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) operationManager := mock.NewMockOperationManager(mockCtrl) - schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, operationManager, nil, schedulers.SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := schedulers.NewSchedulerManager(schedulerStorage, nil, operationManager, nil) schedulerStorage.EXPECT(). GetScheduler(gomock.Any(), "scheduler-name-1"). diff --git a/internal/core/entities/pdb/pdb.go b/internal/core/entities/pdb/pdb.go deleted file mode 100644 index 98c5d39e4..000000000 --- a/internal/core/entities/pdb/pdb.go +++ /dev/null @@ -1,53 +0,0 @@ -// MIT License -// -// Copyright (c) 2021 TFG Co -// -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package pdb - -import ( - "strconv" - "strings" - - "k8s.io/apimachinery/pkg/util/intstr" -) - -const DefaultPdbMaxUnavailablePercentage = "5%" - -func ConvertStrToSpec(val string) *intstr.IntOrString { - if val == "" { - return nil - } - if strings.HasSuffix(val, "%") { - return &intstr.IntOrString{ - Type: intstr.String, - StrVal: val, - } - } else { - intVal, err := strconv.Atoi(val) - if err != nil { - return nil - } - return &intstr.IntOrString{ - Type: intstr.Int, - IntVal: int32(intVal), - } - } -} diff --git a/internal/core/entities/scheduler.go b/internal/core/entities/scheduler.go index 08f48db50..b98c50eea 100644 --- a/internal/core/entities/scheduler.go +++ b/internal/core/entities/scheduler.go @@ -61,21 +61,20 @@ var ( // Scheduler represents one of the basic maestro structs. // It holds GameRooms specifications, as well as optional events forwarders. type Scheduler struct { - Name string `validate:"required,kube_resource_name"` - Game string `validate:"required"` - State string `validate:"required"` - RollbackVersion string - Spec game_room.Spec - Autoscaling *autoscaling.Autoscaling - PortRange *port.PortRange - RoomsReplicas int `validate:"min=0"` - CreatedAt time.Time - LastDownscaleAt time.Time - MaxSurge string `validate:"required,max_surge"` - Forwarders []*forwarder.Forwarder `validate:"dive"` - PdbMaxUnavailable string `validate:"pdb_max_unavailable"` - Annotations map[string]string - Labels map[string]string + Name string `validate:"required,kube_resource_name"` + Game string `validate:"required"` + State string `validate:"required"` + RollbackVersion string + Spec game_room.Spec + Autoscaling *autoscaling.Autoscaling + PortRange *port.PortRange + RoomsReplicas int `validate:"min=0"` + CreatedAt time.Time + LastDownscaleAt time.Time + MaxSurge string `validate:"required,max_surge"` + Forwarders []*forwarder.Forwarder `validate:"dive"` + Annotations map[string]string + Labels map[string]string } // NewScheduler instantiate a new scheduler struct. @@ -89,23 +88,21 @@ func NewScheduler( roomsReplicas int, autoscaling *autoscaling.Autoscaling, forwarders []*forwarder.Forwarder, - pdbMaxUnavailable string, annotations map[string]string, labels map[string]string, ) (*Scheduler, error) { scheduler := &Scheduler{ - Name: name, - Game: game, - State: state, - Spec: spec, - PortRange: portRange, - MaxSurge: maxSurge, - RoomsReplicas: roomsReplicas, - Autoscaling: autoscaling, - Forwarders: forwarders, - PdbMaxUnavailable: pdbMaxUnavailable, - Annotations: annotations, - Labels: labels, + Name: name, + Game: game, + State: state, + Spec: spec, + PortRange: portRange, + MaxSurge: maxSurge, + RoomsReplicas: roomsReplicas, + Autoscaling: autoscaling, + Forwarders: forwarders, + Annotations: annotations, + Labels: labels, } return scheduler, scheduler.Validate() } @@ -163,7 +160,6 @@ func (s *Scheduler) IsMajorVersion(newScheduler *Scheduler) bool { "MaxSurge", "RoomsReplicas", "Autoscaling", - "PdbMaxUnavailable", ), ) } diff --git a/internal/core/entities/scheduler_test.go b/internal/core/entities/scheduler_test.go index 78d47dd1f..9e59bb8e1 100644 --- a/internal/core/entities/scheduler_test.go +++ b/internal/core/entities/scheduler_test.go @@ -93,7 +93,6 @@ func TestNewScheduler(t *testing.T) { forwarders := []*forwarder.Forwarder{fwd} annotations := map[string]string{"imageregistry": "https://hub.docker.com/"} labels := map[string]string{"scheduler": "scheduler-name"} - pdbMaxUnavailable := "5%" t.Run("with success when create valid scheduler", func(t *testing.T) { scheduler, err := entities.NewScheduler( @@ -106,23 +105,21 @@ func TestNewScheduler(t *testing.T) { roomsReplicas, nil, forwarders, - pdbMaxUnavailable, annotations, labels) expectedScheduler := &entities.Scheduler{ - Name: name, - Game: game, - MaxSurge: maxSurge, - State: entities.StateCreating, - Spec: spec, - PortRange: portRange, - RoomsReplicas: roomsReplicas, - Autoscaling: nil, - Forwarders: forwarders, - PdbMaxUnavailable: pdbMaxUnavailable, - Annotations: annotations, - Labels: labels, + Name: name, + Game: game, + MaxSurge: maxSurge, + State: entities.StateCreating, + Spec: spec, + PortRange: portRange, + RoomsReplicas: roomsReplicas, + Autoscaling: nil, + Forwarders: forwarders, + Annotations: annotations, + Labels: labels, } require.NoError(t, err) @@ -139,10 +136,7 @@ func TestNewScheduler(t *testing.T) { portRange, roomsReplicas, nil, - forwarders, - pdbMaxUnavailable, - annotations, - labels) + forwarders, annotations, labels) require.Error(t, err) }) @@ -166,10 +160,7 @@ func TestNewScheduler(t *testing.T) { ), 0, nil, - forwarders, - pdbMaxUnavailable, - annotations, - labels) + forwarders, annotations, labels) require.Error(t, err) }) @@ -193,10 +184,7 @@ func TestNewScheduler(t *testing.T) { ), -1, nil, - forwarders, - pdbMaxUnavailable, - annotations, - labels) + forwarders, annotations, labels) require.Error(t, err) }) diff --git a/internal/core/operations/providers/operation_providers.go b/internal/core/operations/providers/operation_providers.go index 26aec3d74..bda7ce910 100644 --- a/internal/core/operations/providers/operation_providers.go +++ b/internal/core/operations/providers/operation_providers.go @@ -96,7 +96,7 @@ func ProvideExecutors( executors[removerooms.OperationName] = removerooms.NewExecutor(roomManager, roomStorage, operationManager, schedulerManager) executors[test.OperationName] = test.NewExecutor() executors[switchversion.OperationName] = switchversion.NewExecutor(schedulerManager, operationManager) - executors[newversion.OperationName] = newversion.NewExecutor(runtime, roomManager, schedulerManager, operationManager, newSchedulerVersionConfig) + executors[newversion.OperationName] = newversion.NewExecutor(roomManager, schedulerManager, operationManager, newSchedulerVersionConfig) executors[healthcontroller.OperationName] = healthcontroller.NewExecutor(roomStorage, roomManager, instanceStorage, schedulerStorage, operationManager, autoscaler, healthControllerConfig) executors[storagecleanup.OperationName] = storagecleanup.NewExecutor(operationStorage) executors[deletescheduler.OperationName] = deletescheduler.NewExecutor(schedulerStorage, schedulerCache, instanceStorage, operationStorage, operationManager, runtime) diff --git a/internal/core/operations/schedulers/create/executor.go b/internal/core/operations/schedulers/create/executor.go index e3c4e34a5..c5584bb05 100644 --- a/internal/core/operations/schedulers/create/executor.go +++ b/internal/core/operations/schedulers/create/executor.go @@ -62,7 +62,8 @@ func (e *Executor) Execute(ctx context.Context, op *operation.Operation, definit if !ok { return fmt.Errorf("invalid operation definition for %s operation", e.Name()) } - err := e.runtime.CreateScheduler(ctx, &entities.Scheduler{Name: op.SchedulerName, PdbMaxUnavailable: opDef.NewScheduler.PdbMaxUnavailable}) + + err := e.runtime.CreateScheduler(ctx, &entities.Scheduler{Name: op.SchedulerName}) if err != nil { logger.Error("error creating scheduler in runtime", zap.Error(err)) createSchedulerErr := fmt.Errorf("error creating scheduler in runtime: %w", err) diff --git a/internal/core/operations/schedulers/newversion/executor.go b/internal/core/operations/schedulers/newversion/executor.go index 9838330e2..e2f78f6f4 100644 --- a/internal/core/operations/schedulers/newversion/executor.go +++ b/internal/core/operations/schedulers/newversion/executor.go @@ -50,7 +50,6 @@ type Config struct { // Executor holds the dependecies to execute the operation to create a new scheduler version. type Executor struct { - runtime ports.Runtime roomManager ports.RoomManager schedulerManager ports.SchedulerManager operationManager ports.OperationManager @@ -61,9 +60,8 @@ type Executor struct { var _ operations.Executor = (*Executor)(nil) // NewExecutor instantiate a new scheduler version executor. -func NewExecutor(runtime ports.Runtime, roomManager ports.RoomManager, schedulerManager ports.SchedulerManager, operationManager ports.OperationManager, config Config) *Executor { +func NewExecutor(roomManager ports.RoomManager, schedulerManager ports.SchedulerManager, operationManager ports.OperationManager, config Config) *Executor { return &Executor{ - runtime: runtime, roomManager: roomManager, schedulerManager: schedulerManager, operationManager: operationManager, @@ -126,13 +124,6 @@ func (ex *Executor) Execute(ctx context.Context, op *operation.Operation, defini return err } - if newScheduler.PdbMaxUnavailable != currentActiveScheduler.PdbMaxUnavailable { - err = ex.runtime.UpdateScheduler(ctx, newScheduler) - if err != nil { - logger.Warn("error updating scheduler PDB", zap.Error(err)) - } - } - ex.operationManager.AppendOperationEventToExecutionHistory(ctx, op, fmt.Sprintf(enqueuedSwitchVersionMessageTemplate, switchOpID)) logger.Sugar().Infof("new scheduler version created: %s, is major: %t", newScheduler.Spec.Version, isSchedulerMajorVersion) logger.Sugar().Infof("%s operation succeded, %s operation enqueued to continue scheduler update process, switching to version %s", opDef.Name(), switchversion.OperationName, newScheduler.Spec.Version) diff --git a/internal/core/operations/schedulers/newversion/executor_test.go b/internal/core/operations/schedulers/newversion/executor_test.go index 93417a428..abb9ea587 100644 --- a/internal/core/operations/schedulers/newversion/executor_test.go +++ b/internal/core/operations/schedulers/newversion/executor_test.go @@ -69,14 +69,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}, {Version: "v1.1.0"}, {Version: "v1.2.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -121,14 +120,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 3, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}, {Version: "v1.1.0"}, {Version: "v1.2.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -176,14 +174,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -228,14 +225,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.1.0"}, {Version: "v1.2.0"}, {Version: "v1.3.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -280,14 +276,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.1.0"}, {Version: "v1.2.0"}, {Version: "v1.3.0"}} gameRoom := &game_room.GameRoom{ID: "id-1"} @@ -331,13 +326,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return([]*entities.SchedulerVersion{}, errors.NewErrUnexpected("some_error")) @@ -364,14 +358,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) schedulerVersions := []*entities.SchedulerVersion{{Version: "v-----"}} config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return(schedulerVersions, nil) @@ -398,13 +391,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -441,13 +433,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 3, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -489,14 +480,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 3, } ctx, cancelFn := context.WithCancel(context.Background()) - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}, {Version: "v1.1.0"}, {Version: "v1.2.0"}} @@ -532,13 +522,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -577,13 +566,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -629,13 +617,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v1.2.0"}} newSchedulerWithNewVersion := newScheduler @@ -676,14 +663,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v3.1.0"}, {Version: "v4.2.0"}} @@ -721,14 +707,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v1.3.0"}, {Version: "v1.5.0"}} @@ -766,14 +751,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) switchOpID := "switch-active-version-op-id" config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerVersions := []*entities.SchedulerVersion{{Version: "v2.0.0"}, {Version: "v2.1.0"}, {Version: "v3.5.0"}} @@ -810,13 +794,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return([]*entities.SchedulerVersion{}, errors.NewErrUnexpected("some_error")) @@ -842,14 +825,13 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) schedulerVersions := []*entities.SchedulerVersion{{Version: "v-----"}} config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return(schedulerVersions, nil) @@ -875,13 +857,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) newSchedulerWithNewVersion := newScheduler newSchedulerWithNewVersion.Spec.Version = "v1.1.0" newSchedulerWithNewVersion.RollbackVersion = "v1.0.0" @@ -913,13 +894,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) newSchedulerWithNewVersion := newScheduler newSchedulerWithNewVersion.Spec.Version = "v1.1.0" newSchedulerWithNewVersion.RollbackVersion = "v1.0.0" @@ -945,13 +925,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) newSchedulerWithNewVersion := newScheduler newSchedulerWithNewVersion.Spec.Version = "v1.1.0" newSchedulerWithNewVersion.RollbackVersion = "v1.0.0" @@ -977,13 +956,12 @@ func TestExecutor_Execute(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) // mocks for SchedulerManager GetActiveScheduler method schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) @@ -1016,13 +994,12 @@ func TestExecutor_Rollback(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) executor.AddValidationRoomID(newScheduler.Name, &game_room.GameRoom{ID: "room1"}) roomManager.EXPECT().DeleteRoom(gomock.Any(), gomock.Any(), remove.NewVersionRollback).Return(nil) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(&newScheduler, nil) @@ -1046,13 +1023,12 @@ func TestExecutor_Rollback(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) executor.AddValidationRoomID(newScheduler.Name, &game_room.GameRoom{ID: "room1"}) roomManager.EXPECT().DeleteRoom(gomock.Any(), gomock.Any(), remove.NewVersionRollback).Return(errors.NewErrUnexpected("some error")) result := executor.Rollback(context.Background(), op, operationDef, nil) @@ -1074,13 +1050,12 @@ func TestExecutor_Rollback(t *testing.T) { roomManager := mockports.NewMockRoomManager(mockCtrl) schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) config := newversion.Config{ RoomInitializationTimeout: time.Duration(120000), RoomValidationAttempts: 1, } - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) + executor := newversion.NewExecutor(roomManager, schedulerManager, operationsManager, config) schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(&newScheduler, nil) schedulerManager.EXPECT().UpdateScheduler(gomock.Any(), &newScheduler).Return(nil) result := executor.Rollback(context.Background(), op, operationDef, nil) @@ -1088,84 +1063,6 @@ func TestExecutor_Rollback(t *testing.T) { require.Nil(t, result) }) - t.Run("should call runtime to update max unavailable if it has changed", func(t *testing.T) { - mockCtrl := gomock.NewController(t) - - currentActiveScheduler := newValidSchedulerWithImageVersion("image-v1") - currentActiveScheduler.PdbMaxUnavailable = "5%" - newScheduler := *newValidSchedulerWithImageVersion("image-v1") - newScheduler.PdbMaxUnavailable = "10%" - op := &operation.Operation{ - ID: "123", - Status: operation.StatusInProgress, - DefinitionName: newversion.OperationName, - SchedulerName: newScheduler.Name, - } - operationDef := &newversion.Definition{NewScheduler: &newScheduler} - roomManager := mockports.NewMockRoomManager(mockCtrl) - schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) - operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) - switchOpID := "switch-active-version-op-id" - config := newversion.Config{ - RoomInitializationTimeout: time.Duration(120000), - RoomValidationAttempts: 1, - } - schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}} - - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) - - schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) - schedulerManager.EXPECT().UpdateScheduler(gomock.Any(), currentActiveScheduler).Return(nil) - schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return(schedulerVersions, nil) - schedulerManager.EXPECT().CreateNewSchedulerVersionAndEnqueueSwitchVersion(gomock.Any(), gomock.Any()).Return(switchOpID, nil) - runtime.EXPECT().UpdateScheduler(gomock.Any(), &newScheduler).Return(nil) - operationsManager.EXPECT().AppendOperationEventToExecutionHistory(gomock.Any(), op, fmt.Sprintf("enqueued switch active version operation with id: %s", switchOpID)) - - result := executor.Execute(context.Background(), op, operationDef) - - require.Nil(t, result) - }) - - t.Run("should not call runtime to update max unavailable if it has not changed", func(t *testing.T) { - mockCtrl := gomock.NewController(t) - - currentActiveScheduler := newValidSchedulerWithImageVersion("image-v1") - currentActiveScheduler.PdbMaxUnavailable = "5%" - newScheduler := *newValidSchedulerWithImageVersion("image-v1") - newScheduler.PdbMaxUnavailable = "5%" - newScheduler.MaxSurge = "42" - op := &operation.Operation{ - ID: "123", - Status: operation.StatusInProgress, - DefinitionName: newversion.OperationName, - SchedulerName: newScheduler.Name, - } - operationDef := &newversion.Definition{NewScheduler: &newScheduler} - roomManager := mockports.NewMockRoomManager(mockCtrl) - schedulerManager := mockports.NewMockSchedulerManager(mockCtrl) - operationsManager := mockports.NewMockOperationManager(mockCtrl) - runtime := mockports.NewMockRuntime(mockCtrl) - switchOpID := "switch-active-version-op-id" - config := newversion.Config{ - RoomInitializationTimeout: time.Duration(120000), - RoomValidationAttempts: 1, - } - schedulerVersions := []*entities.SchedulerVersion{{Version: "v1.0.0"}} - - executor := newversion.NewExecutor(runtime, roomManager, schedulerManager, operationsManager, config) - - schedulerManager.EXPECT().GetActiveScheduler(gomock.Any(), newScheduler.Name).Return(currentActiveScheduler, nil) - schedulerManager.EXPECT().UpdateScheduler(gomock.Any(), currentActiveScheduler).Return(nil) - schedulerManager.EXPECT().GetSchedulerVersions(gomock.Any(), newScheduler.Name).Return(schedulerVersions, nil) - schedulerManager.EXPECT().CreateNewSchedulerVersionAndEnqueueSwitchVersion(gomock.Any(), gomock.Any()).Return(switchOpID, nil) - runtime.EXPECT().UpdateScheduler(gomock.Any(), &newScheduler).Times(0) - operationsManager.EXPECT().AppendOperationEventToExecutionHistory(gomock.Any(), op, fmt.Sprintf("enqueued switch active version operation with id: %s", switchOpID)) - - result := executor.Execute(context.Background(), op, operationDef) - - require.Nil(t, result) - }) } func newValidSchedulerWithImageVersion(imageVersion string) *entities.Scheduler { diff --git a/internal/core/ports/mock/runtime_mock.go b/internal/core/ports/mock/runtime_mock.go index 2cd83809e..c70dbc8d8 100644 --- a/internal/core/ports/mock/runtime_mock.go +++ b/internal/core/ports/mock/runtime_mock.go @@ -109,18 +109,18 @@ func (mr *MockRuntimeMockRecorder) DeleteScheduler(ctx, scheduler interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteScheduler", reflect.TypeOf((*MockRuntime)(nil).DeleteScheduler), ctx, scheduler) } -// UpdateScheduler mocks base method. -func (m *MockRuntime) UpdateScheduler(ctx context.Context, scheduler *entities.Scheduler) error { +// MitigateDisruption mocks base method. +func (m *MockRuntime) MitigateDisruption(ctx context.Context, scheduler *entities.Scheduler, roomAmount int, safetyPercentage float64) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "UpdateScheduler", ctx, scheduler) + ret := m.ctrl.Call(m, "MitigateDisruption", ctx, scheduler, roomAmount, safetyPercentage) ret0, _ := ret[0].(error) return ret0 } -// UpdateScheduler indicates an expected call of UpdateScheduler. -func (mr *MockRuntimeMockRecorder) UpdateScheduler(ctx, scheduler interface{}) *gomock.Call { +// MitigateDisruption indicates an expected call of MitigateDisruption. +func (mr *MockRuntimeMockRecorder) MitigateDisruption(ctx, scheduler, roomAmount, safetyPercentage interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateScheduler", reflect.TypeOf((*MockRuntime)(nil).UpdateScheduler), ctx, scheduler) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MitigateDisruption", reflect.TypeOf((*MockRuntime)(nil).MitigateDisruption), ctx, scheduler, roomAmount, safetyPercentage) } // WatchGameRoomInstances mocks base method. diff --git a/internal/core/ports/runtime.go b/internal/core/ports/runtime.go index 2a3f64906..39f5e22f0 100644 --- a/internal/core/ports/runtime.go +++ b/internal/core/ports/runtime.go @@ -36,8 +36,6 @@ type Runtime interface { CreateScheduler(ctx context.Context, scheduler *entities.Scheduler) error // DeleteScheduler Deletes a scheduler on the runtime. DeleteScheduler(ctx context.Context, scheduler *entities.Scheduler) error - // UpdateScheduler Updates the scheduler on the runtime. - UpdateScheduler(ctx context.Context, scheduler *entities.Scheduler) error // CreateGameRoomInstance Creates a game room instance on the runtime using // the specification provided. CreateGameRoomInstance(ctx context.Context, scheduler *entities.Scheduler, gameRoomName string, spec game_room.Spec) (*game_room.Instance, error) @@ -47,6 +45,8 @@ type Runtime interface { WatchGameRoomInstances(ctx context.Context, scheduler *entities.Scheduler) (RuntimeWatcher, error) // CreateGameRoomName Creates a name to the room. CreateGameRoomName(ctx context.Context, scheduler entities.Scheduler) (string, error) + // Apply changes to runtime to mitigate disruptions looking at current number of rooms + MitigateDisruption(ctx context.Context, scheduler *entities.Scheduler, roomAmount int, safetyPercentage float64) error } // RuntimeWatcher defines a process of watcher, it will have a chan with the diff --git a/internal/core/services/schedulers/patch/patch_scheduler.go b/internal/core/services/schedulers/patch/patch_scheduler.go index fea77c679..4824b8889 100644 --- a/internal/core/services/schedulers/patch/patch_scheduler.go +++ b/internal/core/services/schedulers/patch/patch_scheduler.go @@ -49,8 +49,7 @@ const ( LabelAutoscaling = "autoscaling" // LabelSchedulerForwarders is the forwarders key in the patch map. LabelSchedulerForwarders = "forwarders" - // LabelPDBMaxUnavailable is the PDB's maxUnavailable spec key in the patch map. - LabelPDBMaxUnavailable = "pdbMaxUnavailable" + // LabelSpecTerminationGracePeriod is the termination grace period key in the patch map. LabelSpecTerminationGracePeriod = "termination_grace_period" // LabelSpecContainers is the containers key in the patch map. @@ -115,10 +114,6 @@ func PatchScheduler(scheduler entities.Scheduler, patchMap map[string]interface{ } } - if _, ok := patchMap[LabelPDBMaxUnavailable]; ok { - scheduler.PdbMaxUnavailable = fmt.Sprint(patchMap[LabelPDBMaxUnavailable]) - } - if _, ok := patchMap[LabelSchedulerSpec]; ok { var patchSpecMap map[string]interface{} if patchSpecMap, ok = patchMap[LabelSchedulerSpec].(map[string]interface{}); !ok { diff --git a/internal/core/services/schedulers/scheduler_manager.go b/internal/core/services/schedulers/scheduler_manager.go index 94bd2db05..2b0ebb5f8 100644 --- a/internal/core/services/schedulers/scheduler_manager.go +++ b/internal/core/services/schedulers/scheduler_manager.go @@ -48,19 +48,17 @@ type SchedulerManager struct { schedulerCache ports.SchedulerCache operationManager ports.OperationManager roomStorage ports.RoomStorage - config SchedulerManagerConfig logger *zap.Logger } var _ ports.SchedulerManager = (*SchedulerManager)(nil) -func NewSchedulerManager(schedulerStorage ports.SchedulerStorage, schedulerCache ports.SchedulerCache, operationManager ports.OperationManager, roomStorage ports.RoomStorage, config SchedulerManagerConfig) *SchedulerManager { +func NewSchedulerManager(schedulerStorage ports.SchedulerStorage, schedulerCache ports.SchedulerCache, operationManager ports.OperationManager, roomStorage ports.RoomStorage) *SchedulerManager { return &SchedulerManager{ schedulerStorage: schedulerStorage, operationManager: operationManager, schedulerCache: schedulerCache, roomStorage: roomStorage, - config: config, logger: zap.L().With(zap.String(logs.LogFieldComponent, "service"), zap.String(logs.LogFieldServiceName, "scheduler_manager")), } } @@ -91,10 +89,6 @@ func (s *SchedulerManager) CreateScheduler(ctx context.Context, scheduler *entit return nil, fmt.Errorf("failing in creating schedule: %w", err) } - if scheduler.PdbMaxUnavailable == "" { - scheduler.PdbMaxUnavailable = s.config.DefaultPdbMaxUnavailable - } - err = s.schedulerStorage.CreateScheduler(ctx, scheduler) if err != nil { return nil, err @@ -164,10 +158,6 @@ func (s *SchedulerManager) PatchSchedulerAndCreateNewSchedulerVersionOperation(c return nil, portsErrors.NewErrInvalidArgument("error patching scheduler: %s", err.Error()) } - if scheduler.PdbMaxUnavailable == "" { - scheduler.PdbMaxUnavailable = s.config.DefaultPdbMaxUnavailable - } - if err := scheduler.Validate(); err != nil { return nil, portsErrors.NewErrInvalidArgument("invalid patched scheduler: %s", err.Error()) } @@ -204,9 +194,6 @@ func (s *SchedulerManager) EnqueueNewSchedulerVersionOperation(ctx context.Conte } scheduler.Spec.Version = currentScheduler.Spec.Version - if scheduler.PdbMaxUnavailable == "" { - scheduler.PdbMaxUnavailable = s.config.DefaultPdbMaxUnavailable - } err = scheduler.Validate() if err != nil { return nil, err diff --git a/internal/core/services/schedulers/scheduler_manager_test.go b/internal/core/services/schedulers/scheduler_manager_test.go index 5d9edfeba..fe7bcc020 100644 --- a/internal/core/services/schedulers/scheduler_manager_test.go +++ b/internal/core/services/schedulers/scheduler_manager_test.go @@ -69,7 +69,7 @@ func TestCreateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) t.Run("with valid scheduler it returns no error when creating it", func(t *testing.T) { scheduler := newValidScheduler() @@ -128,7 +128,7 @@ func TestCreateNewSchedulerVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) t.Run("with valid scheduler it returns no error when creating it", func(t *testing.T) { scheduler := newValidScheduler() @@ -174,7 +174,7 @@ func TestEnqueueNewSchedulerVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, gomock.Any()).Return(&operation.Operation{}, nil) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) @@ -195,7 +195,7 @@ func TestEnqueueNewSchedulerVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) @@ -213,7 +213,7 @@ func TestEnqueueNewSchedulerVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(nil, errors.NewErrUnexpected("some_error")) @@ -230,7 +230,7 @@ func TestEnqueueNewSchedulerVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, gomock.Any()).Return(nil, errors.NewErrUnexpected("storage offline")) @@ -262,7 +262,7 @@ func TestEnqueueSwitchActiveVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, gomock.Any()).Return(&operation.Operation{}, nil) @@ -285,7 +285,7 @@ func TestEnqueueSwitchActiveVersionOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, gomock.Any()).Return(nil, errors.NewErrUnexpected("storage offline")) @@ -314,7 +314,7 @@ func TestDeleteSchedulerOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, opDef).Return(&operation.Operation{}, nil) schedulerCache.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) @@ -335,7 +335,7 @@ func TestDeleteSchedulerOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, opDef).Return(&operation.Operation{}, nil) schedulerCache.EXPECT().GetScheduler(ctx, scheduler.Name).Return(nil, errors.ErrNotFound) @@ -357,7 +357,7 @@ func TestDeleteSchedulerOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) operationManager.EXPECT().CreateOperation(ctx, scheduler.Name, opDef).Return(nil, errors.NewErrUnexpected("storage offline")) schedulerCache.EXPECT().GetScheduler(ctx, scheduler.Name).Return(scheduler, nil) @@ -377,7 +377,7 @@ func TestDeleteSchedulerOperation(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerCache.EXPECT().GetScheduler(ctx, scheduler.Name).Return(nil, errors.ErrNotFound) schedulerStorage.EXPECT().GetScheduler(ctx, scheduler.Name).Return(nil, errors.ErrNotFound) @@ -401,7 +401,7 @@ func TestGetSchedulerVersions(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().GetSchedulerVersions(ctx, scheduler.Name).Return(schedulerVersionList, nil) @@ -419,7 +419,7 @@ func TestGetSchedulerVersions(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().GetSchedulerVersions(ctx, scheduler.Name).Return(nil, errors.NewErrNotFound("scheduler not found")) @@ -440,7 +440,7 @@ func TestGetSchedulerByVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().GetSchedulerWithFilter(ctx, &filters.SchedulerFilter{ Name: scheduler.Name, @@ -460,7 +460,7 @@ func TestGetSchedulerByVersion(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().GetSchedulerWithFilter(ctx, &filters.SchedulerFilter{ Name: scheduler.Name, @@ -484,7 +484,7 @@ func TestGetScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerFilter := &filters.SchedulerFilter{ Name: scheduler.Name, @@ -506,7 +506,7 @@ func TestGetScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerFilter := &filters.SchedulerFilter{ Name: scheduler.Name, @@ -533,7 +533,7 @@ func TestGetSchedulersWithFilter(t *testing.T) { roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulers := []*entities.Scheduler{scheduler} schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().GetSchedulersWithFilter(ctx, gomock.Any()).Return(schedulers, nil) @@ -550,7 +550,7 @@ func TestGetSchedulersWithFilter(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().GetSchedulersWithFilter(ctx, gomock.Any()).Return(nil, errors.NewErrUnexpected("some error")) @@ -576,7 +576,7 @@ func TestUpdateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().UpdateScheduler(ctx, scheduler).Return(nil) schedulerCache.EXPECT().DeleteScheduler(ctx, scheduler.Name).Return(nil) @@ -594,7 +594,7 @@ func TestUpdateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().UpdateScheduler(ctx, scheduler).Return(nil) schedulerCache.EXPECT().DeleteScheduler(ctx, scheduler.Name).Return(errors.NewErrUnexpected("error")) @@ -612,7 +612,7 @@ func TestUpdateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) schedulerStorage.EXPECT().UpdateScheduler(ctx, scheduler).Return(errors.NewErrUnexpected("error")) @@ -627,7 +627,7 @@ func TestUpdateScheduler(t *testing.T) { operationManager := mock.NewMockOperationManager(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) schedulerCache := mockports.NewMockSchedulerCache(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) err := schedulerManager.CreateNewSchedulerVersion(ctx, scheduler) @@ -641,7 +641,7 @@ func TestGetSchedulersInfo(t *testing.T) { ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) schedulerFilter := filters.SchedulerFilter{Game: "Tennis-Clash"} scheduler := newValidScheduler() schedulers := []*entities.Scheduler{scheduler} @@ -666,7 +666,7 @@ func TestGetSchedulersInfo(t *testing.T) { ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) schedulerFilter := filters.SchedulerFilter{Game: "Tennis-Clash"} schedulerStorage.EXPECT().GetSchedulersWithFilter(gomock.Any(), gomock.Any()).Return(nil, errors.NewErrNotFound("err")) @@ -682,7 +682,7 @@ func TestGetSchedulersInfo(t *testing.T) { ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, roomStorage) schedulerFilter := filters.SchedulerFilter{Game: "Tennis-Clash"} scheduler := newValidScheduler() schedulers := []*entities.Scheduler{scheduler} @@ -701,7 +701,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("with valid request it returns a scheduler and game rooms information (no autoscaling)", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(10, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(15, nil) @@ -721,7 +721,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("with valid request it returns a scheduler and game rooms information and autoscaling", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(10, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(15, nil) @@ -750,7 +750,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("it returns with error when couldn't get game rooms information in ready state", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(0, errors.NewErrUnexpected("err")) scheduler := newValidScheduler() @@ -763,7 +763,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("it returns with error when couldn't get game rooms information in pending state", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(0, errors.NewErrUnexpected("err")) scheduler := newValidScheduler() @@ -777,7 +777,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("it returns with error when couldn't get game rooms information in occupied state", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(0, errors.NewErrUnexpected("err")) @@ -792,7 +792,7 @@ func TestNewSchedulerInfo(t *testing.T) { t.Run("it returns with error when couldn't get game rooms information in terminating state", func(t *testing.T) { ctx := context.Background() roomStorage := mockports.NewMockRoomStorage(mockCtrl) - schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(nil, nil, nil, roomStorage) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(5, nil) @@ -813,7 +813,7 @@ func TestDeleteScheduler(t *testing.T) { scheduler := newValidScheduler() ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), schedulerName).Return(scheduler, nil) schedulerStorage.EXPECT().DeleteScheduler(gomock.Any(), ports.TransactionID(""), scheduler).Return(nil) @@ -826,7 +826,7 @@ func TestDeleteScheduler(t *testing.T) { schedulerName := "scheduler-name" ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), schedulerName).Return(nil, errors.NewErrNotFound("err")) err := schedulerManager.DeleteScheduler(ctx, schedulerName) @@ -841,7 +841,7 @@ func TestDeleteScheduler(t *testing.T) { scheduler := newValidScheduler() ctx := context.Background() schedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(schedulerStorage, nil, nil, nil) schedulerStorage.EXPECT().GetScheduler(gomock.Any(), schedulerName).Return(scheduler, nil) schedulerStorage.EXPECT().DeleteScheduler(gomock.Any(), ports.TransactionID(""), scheduler).Return(errors.NewErrUnexpected("err")) @@ -887,7 +887,6 @@ func TestPatchSchedulerAndSwitchActiveVersionOperation(t *testing.T) { GetSchedulerError: nil, ChangedSchedulerFunction: func() *entities.Scheduler { scheduler.MaxSurge = "12%" - scheduler.PdbMaxUnavailable = "5%" return scheduler }, CreateOperationReturn: &operation.Operation{ID: "some-id"}, @@ -972,7 +971,6 @@ func TestPatchSchedulerAndSwitchActiveVersionOperation(t *testing.T) { GetSchedulerError: nil, ChangedSchedulerFunction: func() *entities.Scheduler { scheduler.MaxSurge = "17%" - scheduler.PdbMaxUnavailable = "5%" return scheduler }, CreateOperationReturn: nil, @@ -1015,7 +1013,7 @@ func TestPatchSchedulerAndSwitchActiveVersionOperation(t *testing.T) { mockCtrl := gomock.NewController(t) mockOperationManager := mock.NewMockOperationManager(mockCtrl) mockSchedulerStorage := mockports.NewMockSchedulerStorage(mockCtrl) - schedulerManager := NewSchedulerManager(mockSchedulerStorage, nil, mockOperationManager, nil, SchedulerManagerConfig{DefaultPdbMaxUnavailable: "5%"}) + schedulerManager := NewSchedulerManager(mockSchedulerStorage, nil, mockOperationManager, nil) mockSchedulerStorage.EXPECT().GetScheduler(gomock.Any(), scheduler.Name).Return(scheduler, testCase.ExpectedMock.GetSchedulerError) mockOperationManager.EXPECT(). diff --git a/internal/core/validations/validations.go b/internal/core/validations/validations.go index cbd9964aa..382d78d9a 100644 --- a/internal/core/validations/validations.go +++ b/internal/core/validations/validations.go @@ -74,31 +74,6 @@ func IsMaxSurgeValid(maxSurge string) bool { return true } -// IsPdbMaxUnavailableValid check if PdbMaxUnavailable is valid. A PdbMaxUnavailable valid is either -// a string that can be converted to a number greater than 0, or a string with percentage that its -// value is greater than 0 and less than 100. Empty strigs are valid, we'll use the defualt value on -// SchedulerManager -func IsPdbMaxUnavailableValid(pdbMaxUnavailable string) bool { - if pdbMaxUnavailable == "" { - return true - } - - if strings.HasSuffix(pdbMaxUnavailable, "%") { - percentageValue, err := strconv.Atoi(strings.TrimSuffix(pdbMaxUnavailable, "%")) - if err != nil || percentageValue <= 0 || percentageValue >= 100 { - return false - } - return true - } - - numericValue, err := strconv.Atoi(pdbMaxUnavailable) - if err != nil || numericValue <= 0 { - return false - } - - return true -} - // IsImagePullPolicySupported check if received policy is supported by maestro func IsImagePullPolicySupported(policy string) bool { policies := []string{"Always", "Never", "IfNotPresent"} diff --git a/internal/core/services/schedulers/scheduler_manager_config.go b/internal/core/worker/config/runtime_watcher_config.go similarity index 88% rename from internal/core/services/schedulers/scheduler_manager_config.go rename to internal/core/worker/config/runtime_watcher_config.go index 2a21011eb..a86c287d5 100644 --- a/internal/core/services/schedulers/scheduler_manager_config.go +++ b/internal/core/worker/config/runtime_watcher_config.go @@ -20,8 +20,11 @@ // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. -package schedulers +package config -type SchedulerManagerConfig struct { - DefaultPdbMaxUnavailable string `validate:"required,pdb_max_unavailable"` +import "time" + +type RuntimeWatcherConfig struct { + DisruptionWorkerIntervalSeconds time.Duration + DisruptionSafetyPercentage float64 } diff --git a/internal/core/worker/runtimewatcher/runtime_watcher_worker.go b/internal/core/worker/runtimewatcher/runtime_watcher_worker.go index b49e250d4..fba846e10 100644 --- a/internal/core/worker/runtimewatcher/runtime_watcher_worker.go +++ b/internal/core/worker/runtimewatcher/runtime_watcher_worker.go @@ -26,8 +26,10 @@ import ( "context" "fmt" "sync" + "time" "github.com/topfreegames/maestro/internal/core/logs" + "github.com/topfreegames/maestro/internal/core/worker/config" "github.com/topfreegames/maestro/internal/core/entities" "github.com/topfreegames/maestro/internal/core/entities/game_room" @@ -52,12 +54,14 @@ const ( type runtimeWatcherWorker struct { scheduler *entities.Scheduler roomManager ports.RoomManager + roomStorage ports.RoomStorage // TODO(gabrielcorado): should we access the port directly? do we need to // provide the same `Watcher` interface but on the RoomManager? runtime ports.Runtime logger *zap.Logger ctx context.Context cancelFunc context.CancelFunc + config *config.RuntimeWatcherConfig workerWaitGroup *sync.WaitGroup } @@ -65,9 +69,11 @@ func NewRuntimeWatcherWorker(scheduler *entities.Scheduler, opts *worker.WorkerO return &runtimeWatcherWorker{ scheduler: scheduler, roomManager: opts.RoomManager, + roomStorage: opts.RoomStorage, runtime: opts.Runtime, logger: zap.L().With(zap.String(logs.LogFieldServiceName, WorkerName), zap.String(logs.LogFieldSchedulerName, scheduler.Name)), workerWaitGroup: &sync.WaitGroup{}, + config: opts.RuntimeWatcherConfig, } } @@ -101,6 +107,77 @@ func (w *runtimeWatcherWorker) spawnUpdateRoomWatchers(resultChan chan game_room } } +func (w *runtimeWatcherWorker) mitigateDisruptions() error { + totalRoomsAmount, err := w.roomStorage.GetRoomCount(w.ctx, w.scheduler.Name) + if err != nil { + w.logger.Error( + "failed to get total rooms amount for scheduler", + zap.String("scheduler", w.scheduler.Name), + zap.Error(err), + ) + return nil + } + mitigationQuota := 0 + if totalRoomsAmount >= MinRoomsToApplyDisruption { + mitigationQuota, err = w.roomStorage.GetRoomCountByStatus(w.ctx, w.scheduler.Name, game_room.GameStatusOccupied) + if err != nil { + w.logger.Error( + "failed to get occupied rooms for scheduler", + zap.String("scheduler", w.scheduler.Name), + zap.Error(err), + ) + return nil + } + } + err = w.runtime.MitigateDisruption(w.ctx, w.scheduler, mitigationQuota, w.config.DisruptionSafetyPercentage) + if err != nil { + return err + } + w.logger.Debug( + "mitigated disruption for occupied rooms", + zap.String("scheduler", w.scheduler.Name), + zap.Int("mitigationQuota", mitigationQuota), + ) + + return nil +} + +func (w *runtimeWatcherWorker) spawnDisruptionWatcher() { + w.workerWaitGroup.Add(1) + + go func() { + defer w.workerWaitGroup.Done() + ticker := time.NewTicker(time.Second * w.config.DisruptionWorkerIntervalSeconds) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + err := w.mitigateDisruptions() + if err != nil { + w.logger.Error( + "unrecoverable error mitigating disruption", + zap.String("scheduler", w.scheduler.Name), + zap.Error(err), + ) + return + } + case <-w.ctx.Done(): + w.logger.Info("context closed, exiting disruption watcher") + return + } + } + + }() +} + +func (w *runtimeWatcherWorker) spawnWatchers( + resultChan chan game_room.InstanceEvent, +) { + w.spawnUpdateRoomWatchers(resultChan) + w.spawnDisruptionWatcher() +} + func (w *runtimeWatcherWorker) Start(ctx context.Context) error { w.logger.Info("starting runtime watcher", zap.String("scheduler", w.scheduler.Name)) watcher, err := w.runtime.WatchGameRoomInstances(ctx, w.scheduler) @@ -111,7 +188,7 @@ func (w *runtimeWatcherWorker) Start(ctx context.Context) error { w.ctx, w.cancelFunc = context.WithCancel(ctx) defer w.cancelFunc() - w.spawnUpdateRoomWatchers(watcher.ResultChan()) + w.spawnWatchers(watcher.ResultChan()) w.logger.Info("spawned all goroutines", zap.String("scheduler", w.scheduler.Name)) w.workerWaitGroup.Wait() diff --git a/internal/core/worker/runtimewatcher/runtime_watcher_worker_test.go b/internal/core/worker/runtimewatcher/runtime_watcher_worker_test.go index f92fa70d0..b7d0d3303 100644 --- a/internal/core/worker/runtimewatcher/runtime_watcher_worker_test.go +++ b/internal/core/worker/runtimewatcher/runtime_watcher_worker_test.go @@ -37,19 +37,26 @@ import ( "github.com/topfreegames/maestro/internal/core/entities" "github.com/topfreegames/maestro/internal/core/entities/game_room" porterrors "github.com/topfreegames/maestro/internal/core/ports/errors" + "github.com/topfreegames/maestro/internal/core/ports/mock" mockports "github.com/topfreegames/maestro/internal/core/ports/mock" "github.com/topfreegames/maestro/internal/core/worker" + "github.com/topfreegames/maestro/internal/core/worker/config" ) -func workerOptions(t *testing.T) (*gomock.Controller, *mockports.MockRuntime, *mockports.MockRoomManager, *worker.WorkerOptions) { +func workerOptions(t *testing.T) (*gomock.Controller, *mockports.MockRuntime, *mockports.MockRoomManager, *mockports.MockRoomStorage, *worker.WorkerOptions) { mockCtrl := gomock.NewController(t) runtime := mockports.NewMockRuntime(mockCtrl) roomManager := mockports.NewMockRoomManager(mockCtrl) + roomStorage := mock.NewMockRoomStorage(mockCtrl) - return mockCtrl, runtime, roomManager, &worker.WorkerOptions{ + return mockCtrl, runtime, roomManager, roomStorage, &worker.WorkerOptions{ Runtime: runtime, RoomManager: roomManager, + RoomStorage: roomStorage, + RuntimeWatcherConfig: &config.RuntimeWatcherConfig{ + DisruptionWorkerIntervalSeconds: 1, + }, } } @@ -61,7 +68,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { for _, event := range events { t.Run(fmt.Sprintf("when %s happens, updates instance", event.String()), func(t *testing.T) { - mockCtrl, runtime, roomManager, workerOptions := workerOptions(t) + mockCtrl, runtime, roomManager, _, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -102,7 +109,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { }) t.Run(fmt.Sprintf("when %s happens, and update instance fails, does nothing", event.String()), func(t *testing.T) { - mockCtrl, runtime, roomManager, workerOptions := workerOptions(t) + mockCtrl, runtime, roomManager, _, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -144,7 +151,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { } t.Run("fails to start watcher", func(t *testing.T) { - _, runtime, _, workerOptions := workerOptions(t) + _, runtime, _, _, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -167,7 +174,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { }) t.Run("clean room state on delete event", func(t *testing.T) { - mockCtrl, runtime, roomManager, workerOptions := workerOptions(t) + mockCtrl, runtime, roomManager, _, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -206,7 +213,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { }) t.Run("when clean room state fails, does nothing", func(t *testing.T) { - mockCtrl, runtime, roomManager, workerOptions := workerOptions(t) + mockCtrl, runtime, roomManager, _, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -245,7 +252,7 @@ func TestRuntimeWatcher_Start(t *testing.T) { }) t.Run("when resultChan is closed, worker stops without error", func(t *testing.T) { - mockCtrl, runtime, _, workerOptions := workerOptions(t) + mockCtrl, runtime, _, roomStorage, workerOptions := workerOptions(t) scheduler := &entities.Scheduler{Name: "test"} watcher := NewRuntimeWatcherWorker(scheduler, workerOptions) @@ -256,6 +263,11 @@ func TestRuntimeWatcher_Start(t *testing.T) { runtimeWatcher.EXPECT().ResultChan().Return(resultChan) runtimeWatcher.EXPECT().Stop() + runtime.EXPECT().MitigateDisruption(gomock.Any(), gomock.Any(), 0, 0.0).Return(nil).MinTimes(0) + roomStorage.EXPECT().GetRoomCount(gomock.Any(), gomock.Any()).Return(MinRoomsToApplyDisruption, nil).MinTimes(0) + // We only call GetRoomCountByStatus to apply mitigation if there is more than 1 room + roomStorage.EXPECT().GetRoomCountByStatus(gomock.Any(), gomock.Any(), gomock.Any()).Return(0, nil).MinTimes(0) + ctx, cancelFunc := context.WithCancel(context.Background()) go func() { diff --git a/internal/core/worker/worker.go b/internal/core/worker/worker.go index 1ed3ab837..2f25dd533 100644 --- a/internal/core/worker/worker.go +++ b/internal/core/worker/worker.go @@ -55,6 +55,7 @@ type WorkerOptions struct { RoomStorage ports.RoomStorage InstanceStorage ports.GameRoomInstanceStorage MetricsReporterConfig *config.MetricsReporterConfig + RuntimeWatcherConfig *config.RuntimeWatcherConfig } // Configuration holds all worker configuration parameters. diff --git a/internal/service/adapters.go b/internal/service/adapters.go index 85126edaf..29dcbb7de 100644 --- a/internal/service/adapters.go +++ b/internal/service/adapters.go @@ -102,8 +102,8 @@ const ( ) // NewSchedulerManager instantiates a new scheduler manager. -func NewSchedulerManager(schedulerStorage ports.SchedulerStorage, schedulerCache ports.SchedulerCache, operationManager ports.OperationManager, roomStorage ports.RoomStorage, config schedulers.SchedulerManagerConfig) ports.SchedulerManager { - return schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage, config) +func NewSchedulerManager(schedulerStorage ports.SchedulerStorage, schedulerCache ports.SchedulerCache, operationManager ports.OperationManager, roomStorage ports.RoomStorage) ports.SchedulerManager { + return schedulers.NewSchedulerManager(schedulerStorage, schedulerCache, operationManager, roomStorage) } // NewOperationManager instantiates a new operation manager diff --git a/internal/service/config.go b/internal/service/config.go index cefb42711..7f7fa67e5 100644 --- a/internal/service/config.go +++ b/internal/service/config.go @@ -25,17 +25,12 @@ package service import ( "time" - "github.com/topfreegames/maestro/internal/core/entities/pdb" - "github.com/topfreegames/maestro/internal/core/logs" "github.com/topfreegames/maestro/internal/core/operations/rooms/add" "github.com/topfreegames/maestro/internal/core/operations/schedulers/newversion" "github.com/topfreegames/maestro/internal/core/services/events" operationmanager "github.com/topfreegames/maestro/internal/core/services/operations" roommanager "github.com/topfreegames/maestro/internal/core/services/rooms" - schedulerManager "github.com/topfreegames/maestro/internal/core/services/schedulers" "github.com/topfreegames/maestro/internal/core/services/workers" - "github.com/topfreegames/maestro/internal/validations" - "go.uber.org/zap" "github.com/topfreegames/maestro/internal/core/operations/healthcontroller" "github.com/topfreegames/maestro/internal/core/worker" @@ -53,7 +48,6 @@ const ( operationLeaseTTLMillisConfigPath = "services.operationManager.operationLeaseTTLMillis" schedulerCacheTTLMillisConfigPath = "services.eventsForwarder.schedulerCacheTTLMillis" operationsRoomsAddLimitConfigPath = "operations.rooms.add.limit" - schedulerDefaultPdbMaxUnavailablePath = "services.schedulerManager.defaultPdbMaxUnavailable" ) // NewCreateSchedulerVersionConfig instantiate a new CreateSchedulerVersionConfig to be used by the NewSchedulerVersion operation to customize its configuration. @@ -111,21 +105,6 @@ func NewRoomManagerConfig(c config.Config) (roommanager.RoomManagerConfig, error return roomManagerConfig, nil } -// NewSchedulerManagerConfig instantiate a new SchedulerManagerConfig to be used by the SchedulerManager to customize its configuration. -func NewSchedulerManagerConfig(c config.Config) (schedulerManager.SchedulerManagerConfig, error) { - schedulerConfig := schedulerManager.SchedulerManagerConfig{ - DefaultPdbMaxUnavailable: c.GetString(schedulerDefaultPdbMaxUnavailablePath), - } - if err := validations.Validate.Struct(schedulerConfig); err != nil { - zap.L().With(zap.String(logs.LogFieldComponent, "service"), zap.String(logs.LogFieldServiceName, "config")).Info( - "error parsing default pdb max unavailable, using default", - zap.String("Default const PDB: ", pdb.DefaultPdbMaxUnavailablePercentage), - ) - schedulerConfig.DefaultPdbMaxUnavailable = pdb.DefaultPdbMaxUnavailablePercentage - } - return schedulerConfig, nil -} - // NewWorkersConfig instantiate a new workers Config stucture to be used by the workers to customize them from the config package. func NewWorkersConfig(c config.Config) (worker.Configuration, error) { healthControllerExecutionInterval := c.GetDuration(healthControllerExecutionIntervalConfigPath) diff --git a/internal/validations/validations.go b/internal/validations/validations.go index 8e93783ca..df53035ba 100644 --- a/internal/validations/validations.go +++ b/internal/validations/validations.go @@ -81,12 +81,6 @@ func RegisterValidations() error { } addTranslation(Validate, "max_surge", "{0} must be a number greater than zero or a number greater than zero with suffix '%'") - err = Validate.RegisterValidation("pdb_max_unavailable", pdbMaxUnavailableValidate) - if err != nil { - return errors.New("could not register pdbMaxUnavailableValidate") - } - addTranslation(Validate, "pdb_max_unavailable", "{0} must be either an empty string (accept default value), a number greater than zero or a percentage greater than zero and less than 100 with suffix '%'") - err = Validate.RegisterValidation("kube_resource_name", kubeResourceNameValidate) if err != nil { return errors.New("could not register kubeResourceNameValidate") @@ -146,10 +140,6 @@ func maxSurgeValidate(fl validator.FieldLevel) bool { return validations.IsMaxSurgeValid(fl.Field().String()) } -func pdbMaxUnavailableValidate(fl validator.FieldLevel) bool { - return validations.IsPdbMaxUnavailableValid(fl.Field().String()) -} - func semanticValidate(fl validator.FieldLevel) bool { return validations.IsVersionValid(fl.Field().String()) } diff --git a/pkg/api/v1/messages.pb.go b/pkg/api/v1/messages.pb.go index 4caa0a891..4bbc6f264 100644 --- a/pkg/api/v1/messages.pb.go +++ b/pkg/api/v1/messages.pb.go @@ -875,12 +875,10 @@ type Scheduler struct { Autoscaling *Autoscaling `protobuf:"bytes,9,opt,name=autoscaling,proto3,oneof" json:"autoscaling,omitempty"` // List of Scheduler forwarders Forwarders []*Forwarder `protobuf:"bytes,10,rep,name=forwarders,proto3" json:"forwarders,omitempty"` - // PDB MaxUnavailable Specification - PdbMaxUnavailable string `protobuf:"bytes,11,opt,name=pdb_max_unavailable,json=pdbMaxUnavailable,proto3" json:"pdb_max_unavailable,omitempty"` // List with annotations - Annotations map[string]string `protobuf:"bytes,12,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,11,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // List with labels - Labels map[string]string `protobuf:"bytes,13,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,12,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Scheduler) Reset() { @@ -985,13 +983,6 @@ func (x *Scheduler) GetForwarders() []*Forwarder { return nil } -func (x *Scheduler) GetPdbMaxUnavailable() string { - if x != nil { - return x.PdbMaxUnavailable - } - return "" -} - func (x *Scheduler) GetAnnotations() map[string]string { if x != nil { return x.Annotations @@ -2329,7 +2320,7 @@ var file_api_v1_messages_proto_rawDesc = []byte{ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x67, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x74, 0x6f, 0x6c, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x66, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x79, 0x22, - 0xc3, 0x05, 0x0a, 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x93, 0x05, 0x0a, 0x09, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, @@ -2353,15 +2344,12 @@ var file_api_v1_messages_proto_rawDesc = []byte{ 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, - 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, - 0x13, 0x70, 0x64, 0x62, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x64, 0x62, 0x4d, - 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x44, 0x0a, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x44, 0x0a, + 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0d, 0x20, + 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, diff --git a/pkg/api/v1/schedulers.pb.go b/pkg/api/v1/schedulers.pb.go index b65ab01f0..9f19c4c92 100644 --- a/pkg/api/v1/schedulers.pb.go +++ b/pkg/api/v1/schedulers.pb.go @@ -209,12 +209,10 @@ type CreateSchedulerRequest struct { Autoscaling *Autoscaling `protobuf:"bytes,7,opt,name=autoscaling,proto3,oneof" json:"autoscaling,omitempty"` // List of Scheduler forwarders Forwarders []*Forwarder `protobuf:"bytes,8,rep,name=forwarders,proto3" json:"forwarders,omitempty"` - // PDB MaxUnavailable Specification - PdbMaxUnavailable string `protobuf:"bytes,9,opt,name=pdb_max_unavailable,json=pdbMaxUnavailable,proto3" json:"pdb_max_unavailable,omitempty"` // Add annotations for scheduler - Annotations map[string]string `protobuf:"bytes,10,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,9,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Add labels for scheduler - Labels map[string]string `protobuf:"bytes,11,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *CreateSchedulerRequest) Reset() { @@ -305,13 +303,6 @@ func (x *CreateSchedulerRequest) GetForwarders() []*Forwarder { return nil } -func (x *CreateSchedulerRequest) GetPdbMaxUnavailable() string { - if x != nil { - return x.PdbMaxUnavailable - } - return "" -} - func (x *CreateSchedulerRequest) GetAnnotations() map[string]string { if x != nil { return x.Annotations @@ -455,12 +446,10 @@ type NewSchedulerVersionRequest struct { Autoscaling *Autoscaling `protobuf:"bytes,7,opt,name=autoscaling,proto3,oneof" json:"autoscaling,omitempty"` // List of Scheduler forwarders Forwarders []*Forwarder `protobuf:"bytes,8,rep,name=forwarders,proto3" json:"forwarders,omitempty"` - // PDB MaxUnavailable Specification - PdbMaxUnavailable string `protobuf:"bytes,9,opt,name=pdb_max_unavailable,json=pdbMaxUnavailable,proto3" json:"pdb_max_unavailable,omitempty"` // New annotations for scheduler - Annotations map[string]string `protobuf:"bytes,10,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,9,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // New labels for scheduler - Labels map[string]string `protobuf:"bytes,11,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *NewSchedulerVersionRequest) Reset() { @@ -551,13 +540,6 @@ func (x *NewSchedulerVersionRequest) GetForwarders() []*Forwarder { return nil } -func (x *NewSchedulerVersionRequest) GetPdbMaxUnavailable() string { - if x != nil { - return x.PdbMaxUnavailable - } - return "" -} - func (x *NewSchedulerVersionRequest) GetAnnotations() map[string]string { if x != nil { return x.Annotations @@ -641,12 +623,10 @@ type PatchSchedulerRequest struct { Autoscaling *OptionalAutoscaling `protobuf:"bytes,6,opt,name=autoscaling,proto3,oneof" json:"autoscaling,omitempty"` // List of Scheduler forwarders Forwarders []*Forwarder `protobuf:"bytes,7,rep,name=forwarders,proto3" json:"forwarders,omitempty"` - // PDB MaxUnavailable Specification - PdbMaxUnavailable string `protobuf:"bytes,8,opt,name=pdb_max_unavailable,json=pdbMaxUnavailable,proto3" json:"pdb_max_unavailable,omitempty"` // Annotations declaration for the scheduler - Annotations map[string]string `protobuf:"bytes,9,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Annotations map[string]string `protobuf:"bytes,8,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Labels declaration for the scheduler - Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Labels map[string]string `protobuf:"bytes,9,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *PatchSchedulerRequest) Reset() { @@ -730,13 +710,6 @@ func (x *PatchSchedulerRequest) GetForwarders() []*Forwarder { return nil } -func (x *PatchSchedulerRequest) GetPdbMaxUnavailable() string { - if x != nil { - return x.PdbMaxUnavailable - } - return "" -} - func (x *PatchSchedulerRequest) GetAnnotations() map[string]string { if x != nil { return x.Annotations @@ -1245,7 +1218,7 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x09, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x22, 0xad, 0x05, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, + 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x22, 0xfd, 0x04, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x61, 0x6d, 0x65, 0x18, 0x02, @@ -1266,16 +1239,13 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x64, 0x62, 0x5f, 0x6d, 0x61, - 0x78, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x64, 0x62, 0x4d, 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, - 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x51, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x61, 0x70, + 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x51, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x42, 0x0a, 0x06, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3e, 0x0a, @@ -1299,7 +1269,7 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, - 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x22, 0xa5, 0x05, 0x0a, 0x1a, 0x4e, + 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x22, 0xf5, 0x04, 0x0a, 0x1a, 0x4e, 0x65, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, @@ -1320,16 +1290,13 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x6e, 0x67, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x52, 0x0a, 0x66, 0x6f, - 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x70, 0x64, 0x62, 0x5f, - 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x64, 0x62, 0x4d, 0x61, 0x78, 0x55, 0x6e, 0x61, - 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, + 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x46, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x46, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, @@ -1346,7 +1313,7 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xdf, 0x05, 0x0a, 0x15, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, + 0x6f, 0x6e, 0x49, 0x64, 0x22, 0xaf, 0x05, 0x0a, 0x15, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, @@ -1367,16 +1334,13 @@ var file_api_v1_schedulers_proto_rawDesc = []byte{ 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, - 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x2e, 0x0a, - 0x13, 0x70, 0x64, 0x62, 0x5f, 0x6d, 0x61, 0x78, 0x5f, 0x75, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, - 0x61, 0x62, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x70, 0x64, 0x62, 0x4d, - 0x61, 0x78, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x50, 0x0a, - 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09, 0x20, 0x03, + 0x72, 0x52, 0x0a, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x50, 0x0a, + 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x41, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x41, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x61, 0x74, 0x63, 0x68, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, diff --git a/proto/api/v1/messages.proto b/proto/api/v1/messages.proto index 07aab4be3..0f6e267ec 100644 --- a/proto/api/v1/messages.proto +++ b/proto/api/v1/messages.proto @@ -179,12 +179,10 @@ message Scheduler { optional Autoscaling autoscaling = 9; // List of Scheduler forwarders repeated Forwarder forwarders = 10; - // PDB MaxUnavailable Specification - string pdb_max_unavailable = 11; // List with annotations - map annotations = 12; + map annotations = 11; // List with labels - map labels = 13; + map labels = 12; } // Scheduler message used in the "ListScheduler version" definition. The "spec" is not implemented diff --git a/proto/api/v1/schedulers.proto b/proto/api/v1/schedulers.proto index d7dfcbc42..c855df3f7 100644 --- a/proto/api/v1/schedulers.proto +++ b/proto/api/v1/schedulers.proto @@ -123,12 +123,10 @@ message CreateSchedulerRequest { optional Autoscaling autoscaling = 7; // List of Scheduler forwarders repeated Forwarder forwarders = 8; - // PDB MaxUnavailable Specification - string pdb_max_unavailable = 9; // Add annotations for scheduler - map annotations = 10; + map annotations = 9; // Add labels for scheduler - map labels = 11; + map labels = 10; } // Get Scheduler operation request @@ -163,12 +161,10 @@ message NewSchedulerVersionRequest { optional Autoscaling autoscaling = 7; // List of Scheduler forwarders repeated Forwarder forwarders = 8; - // PDB MaxUnavailable Specification - string pdb_max_unavailable = 9; // New annotations for scheduler - map annotations = 10; + map annotations = 9; // New labels for scheduler - map labels = 11; + map labels = 10; } // Update schedule operation response payload. @@ -193,12 +189,10 @@ message PatchSchedulerRequest { optional OptionalAutoscaling autoscaling = 6; // List of Scheduler forwarders repeated Forwarder forwarders = 7; - // PDB MaxUnavailable Specification - string pdb_max_unavailable = 8; // Annotations declaration for the scheduler - map annotations = 9; + map annotations = 8; // Labels declaration for the scheduler - map labels = 10; + map labels = 9; } // PatchSchedulerResponse have the operation response id that represents the operation creted to this change. diff --git a/proto/apidocs.swagger.json b/proto/apidocs.swagger.json index 93e41eda2..a4c112706 100644 --- a/proto/apidocs.swagger.json +++ b/proto/apidocs.swagger.json @@ -503,10 +503,6 @@ }, "title": "List of Scheduler forwarders" }, - "pdbMaxUnavailable": { - "type": "string", - "title": "PDB MaxUnavailable Specification" - }, "annotations": { "type": "object", "additionalProperties": { @@ -591,10 +587,6 @@ }, "title": "List of Scheduler forwarders" }, - "pdbMaxUnavailable": { - "type": "string", - "title": "PDB MaxUnavailable Specification" - }, "annotations": { "type": "object", "additionalProperties": { @@ -1249,10 +1241,6 @@ }, "title": "List of Scheduler forwarders" }, - "pdbMaxUnavailable": { - "type": "string", - "title": "PDB MaxUnavailable Specification" - }, "annotations": { "type": "object", "additionalProperties": { @@ -1753,10 +1741,6 @@ }, "title": "List of Scheduler forwarders" }, - "pdbMaxUnavailable": { - "type": "string", - "title": "PDB MaxUnavailable Specification" - }, "annotations": { "type": "object", "additionalProperties": { diff --git a/test/data/handlers/fixtures/response/schedulers_handler/create_scheduler.json b/test/data/handlers/fixtures/response/schedulers_handler/create_scheduler.json index e97c6e418..6d643d37f 100644 --- a/test/data/handlers/fixtures/response/schedulers_handler/create_scheduler.json +++ b/test/data/handlers/fixtures/response/schedulers_handler/create_scheduler.json @@ -77,7 +77,6 @@ } } ], - "pdbMaxUnavailable": "10%", "annotations": { "imageregistry": "https://docker.hub.com/" }, diff --git a/test/data/handlers/fixtures/response/schedulers_handler/get_scheduler.json b/test/data/handlers/fixtures/response/schedulers_handler/get_scheduler.json index 9b5eef382..191e557e0 100644 --- a/test/data/handlers/fixtures/response/schedulers_handler/get_scheduler.json +++ b/test/data/handlers/fixtures/response/schedulers_handler/get_scheduler.json @@ -77,7 +77,6 @@ } } ], - "pdbMaxUnavailable": "10%", "annotations": {}, "labels": {} }