From d3fafbcccaa21d20722c25714bfa4155fb476760 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 15:01:30 -0400 Subject: [PATCH 01/85] Add ApplyAll function --- controllers/apiserver.go | 8 +++----- controllers/common.go | 10 ++++------ controllers/database.go | 9 ++++----- controllers/dspipeline_controller.go | 13 ++++++++++++- controllers/mlmd.go | 9 ++++----- controllers/mlpipeline_ui.go | 8 +++----- controllers/persistence_agent.go | 8 +++----- controllers/scheduled_workflow.go | 8 +++----- controllers/storage.go | 11 +++++------ controllers/util/util.go | 15 +++++++++++++++ 10 files changed, 56 insertions(+), 43 deletions(-) diff --git a/controllers/apiserver.go b/controllers/apiserver.go index 1971b0e67..14b1b2c6e 100644 --- a/controllers/apiserver.go +++ b/controllers/apiserver.go @@ -59,11 +59,9 @@ func (r *DSPAReconciler) ReconcileAPIServer(ctx context.Context, dsp *dspav1alph log.Info("Applying APIServer Resources") - for _, template := range apiServerTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, apiServerTemplates) + if err != nil { + return err } if dsp.Spec.APIServer.EnableRoute { diff --git a/controllers/common.go b/controllers/common.go index f3982e236..30249a819 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -30,14 +30,12 @@ func (r *DSPAReconciler) ReconcileCommon(dsp *dspav1alpha1.DataSciencePipelinesA log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) log.Info("Applying Common Resources") - for _, template := range commonTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, commonTemplates) + if err != nil { + return err } - err := r.ApplyWithoutOwner(params, commonCusterRolebindingTemplate) + err = r.ApplyWithoutOwner(params, commonCusterRolebindingTemplate) if err != nil { return err } diff --git a/controllers/database.go b/controllers/database.go index 33a083a53..205cec4ce 100644 --- a/controllers/database.go +++ b/controllers/database.go @@ -20,6 +20,7 @@ import ( "database/sql" b64 "encoding/base64" "fmt" + _ "github.com/go-sql-driver/mysql" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" @@ -111,11 +112,9 @@ func (r *DSPAReconciler) ReconcileDatabase(ctx context.Context, dsp *dspav1alpha } } else if deployMariaDB || deployDefaultDB { log.Info("Applying mariaDB resources.") - for _, template := range dbTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, dbTemplates) + if err != nil { + return err } // If no database was not specified, deploy mariaDB by default. // Update the CR with the state of mariaDB to accurately portray diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 5f87a8545..6328ddb70 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -19,9 +19,10 @@ package controllers import ( "context" "fmt" - "sigs.k8s.io/controller-runtime/pkg/controller" "time" + "sigs.k8s.io/controller-runtime/pkg/controller" + "github.com/go-logr/logr" mf "github.com/manifestival/manifestival" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" @@ -53,6 +54,16 @@ type DSPAReconciler struct { TemplatesPath string } +func (r *DSPAReconciler) ApplyAll(owner mf.Owner, params *DSPAParams, templates []string, fns ...mf.Transformer) error { + for _, template := range templates { + err := r.Apply(owner, params, template) + if err != nil { + return err + } + } + return nil +} + func (r *DSPAReconciler) Apply(owner mf.Owner, params *DSPAParams, template string, fns ...mf.Transformer) error { tmplManifest, err := config.Manifest(r.Client, r.TemplatesPath+template, params) if err != nil { diff --git a/controllers/mlmd.go b/controllers/mlmd.go index 1823f0131..0b63c4baf 100644 --- a/controllers/mlmd.go +++ b/controllers/mlmd.go @@ -40,12 +40,11 @@ func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApp if params.UsingMLMD(dsp) { log.Info("Applying ML-Metadata (MLMD) Resources") - for _, template := range mlmdTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, mlmdTemplates) + if err != nil { + return err } + log.Info("Finished applying MLMD Resources") } return nil diff --git a/controllers/mlpipeline_ui.go b/controllers/mlpipeline_ui.go index 7e6c6f425..c32fc3d28 100644 --- a/controllers/mlpipeline_ui.go +++ b/controllers/mlpipeline_ui.go @@ -42,11 +42,9 @@ func (r *DSPAReconciler) ReconcileUI(dsp *dspav1alpha1.DataSciencePipelinesAppli } log.Info("Applying MlPipelineUI Resources") - for _, template := range mlPipelineUITemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, mlPipelineUITemplates) + if err != nil { + return err } log.Info("Finished applying MlPipelineUI Resources") diff --git a/controllers/persistence_agent.go b/controllers/persistence_agent.go index ca6462cb3..31fb90873 100644 --- a/controllers/persistence_agent.go +++ b/controllers/persistence_agent.go @@ -39,11 +39,9 @@ func (r *DSPAReconciler) ReconcilePersistenceAgent(dsp *dspav1alpha1.DataScience log.Info("Applying PersistenceAgent Resources") - for _, template := range persistenceAgentTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, persistenceAgentTemplates) + if err != nil { + return err } log.Info("Finished applying PersistenceAgent Resources") diff --git a/controllers/scheduled_workflow.go b/controllers/scheduled_workflow.go index 300fd71b9..8f0f5e0ff 100644 --- a/controllers/scheduled_workflow.go +++ b/controllers/scheduled_workflow.go @@ -41,11 +41,9 @@ func (r *DSPAReconciler) ReconcileScheduledWorkflow(dsp *dspav1alpha1.DataScienc log.Info("Applying ScheduledWorkflow Resources") - for _, template := range scheduledWorkflowTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, scheduledWorkflowTemplates) + if err != nil { + return err } log.Info("Finished applying ScheduledWorkflow Resources") diff --git a/controllers/storage.go b/controllers/storage.go index 2d7454c9d..727012948 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -21,12 +21,13 @@ import ( "encoding/base64" "errors" "fmt" + "net/http" + "github.com/go-logr/logr" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" - "net/http" ) const storageSecret = "minio/secret.yaml.tmpl" @@ -161,11 +162,9 @@ func (r *DSPAReconciler) ReconcileStorage(ctx context.Context, dsp *dspav1alpha1 } } else if deployMinio { log.Info("Applying object storage resources.") - for _, template := range storageTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, storageTemplates) + if err != nil { + return err } // If no storage was not specified, deploy minio by default. // Update the CR with the state of minio to accurately portray diff --git a/controllers/util/util.go b/controllers/util/util.go index 4e86338fc..ca3118f51 100644 --- a/controllers/util/util.go +++ b/controllers/util/util.go @@ -17,6 +17,8 @@ limitations under the License. package util import ( + "os" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -44,3 +46,16 @@ func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.Depl func BoolPointer(b bool) *bool { return &b } + +func GetTemplatesInDir(templateDirectory string) ([]string, error) { + entries, err := os.ReadDir(templateDirectory) + if err != nil { + return nil, err + } + + var templates []string + for _, e := range entries { + templates = append(templates, e.Name()) + } + return templates, nil +} From 7d60bf9634c2484bc7e4fe5362026e141814d90d Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 15:44:03 -0400 Subject: [PATCH 02/85] Add ApplyDir Reconciler function, Dynamically retrieve templates --- controllers/apiserver.go | 17 ++--------------- controllers/dspipeline_controller.go | 8 ++++++++ controllers/mlmd.go | 15 ++------------- controllers/mlpipeline_ui.go | 13 ++----------- controllers/persistence_agent.go | 9 ++------- controllers/scheduled_workflow.go | 11 ++--------- controllers/util/util.go | 9 +++++---- 7 files changed, 23 insertions(+), 59 deletions(-) diff --git a/controllers/apiserver.go b/controllers/apiserver.go index 14b1b2c6e..816b637cb 100644 --- a/controllers/apiserver.go +++ b/controllers/apiserver.go @@ -24,19 +24,7 @@ import ( "k8s.io/apimachinery/pkg/types" ) -var apiServerTemplates = []string{ - "apiserver/artifact_script.yaml.tmpl", - "apiserver/role_ds-pipeline.yaml.tmpl", - "apiserver/role_pipeline-runner.yaml.tmpl", - "apiserver/role_ds-pipeline-user-access.yaml.tmpl", - "apiserver/rolebinding_ds-pipeline.yaml.tmpl", - "apiserver/rolebinding_pipeline-runner.yaml.tmpl", - "apiserver/sa_ds-pipeline.yaml.tmpl", - "apiserver/sa_pipeline-runner.yaml.tmpl", - "apiserver/service.yaml.tmpl", - "apiserver/deployment.yaml.tmpl", - "apiserver/monitor.yaml.tmpl", -} +var apiServerTemplatesDir = "apiserver" // serverRoute is a resource deployed conditionally // as such it is handled separately @@ -58,8 +46,7 @@ func (r *DSPAReconciler) ReconcileAPIServer(ctx context.Context, dsp *dspav1alph } log.Info("Applying APIServer Resources") - - err := r.ApplyAll(dsp, params, apiServerTemplates) + err := r.ApplyDir(dsp, params, apiServerTemplatesDir) if err != nil { return err } diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 6328ddb70..185c3b59c 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -54,6 +54,14 @@ type DSPAReconciler struct { TemplatesPath string } +func (r *DSPAReconciler) ApplyDir(owner mf.Owner, params *DSPAParams, directory string, fns ...mf.Transformer) error { + templates, err := util.GetTemplatesInDir(r.TemplatesPath, directory) + if err != nil { + return err + } + return r.ApplyAll(owner, params, templates) +} + func (r *DSPAReconciler) ApplyAll(owner mf.Owner, params *DSPAParams, templates []string, fns ...mf.Transformer) error { for _, template := range templates { err := r.Apply(owner, params, template) diff --git a/controllers/mlmd.go b/controllers/mlmd.go index 0b63c4baf..78db0983c 100644 --- a/controllers/mlmd.go +++ b/controllers/mlmd.go @@ -19,18 +19,7 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var mlmdTemplates = []string{ - "ml-metadata/metadata-envoy.configmap.yaml.tmpl", - "ml-metadata/metadata-envoy.deployment.yaml.tmpl", - "ml-metadata/metadata-envoy.service.yaml.tmpl", - "ml-metadata/metadata-grpc.deployment.yaml.tmpl", - "ml-metadata/metadata-grpc.service.yaml.tmpl", - "ml-metadata/metadata-grpc.serviceaccount.yaml.tmpl", - "ml-metadata/metadata-writer.deployment.yaml.tmpl", - "ml-metadata/metadata-writer.role.yaml.tmpl", - "ml-metadata/metadata-writer.rolebinding.yaml.tmpl", - "ml-metadata/metadata-writer.serviceaccount.yaml.tmpl", -} +var mlmdTemplatesDir = "ml-metadata" func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { @@ -40,7 +29,7 @@ func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApp if params.UsingMLMD(dsp) { log.Info("Applying ML-Metadata (MLMD) Resources") - err := r.ApplyAll(dsp, params, mlmdTemplates) + err := r.ApplyDir(dsp, params, mlmdTemplatesDir) if err != nil { return err } diff --git a/controllers/mlpipeline_ui.go b/controllers/mlpipeline_ui.go index c32fc3d28..16a87a9c2 100644 --- a/controllers/mlpipeline_ui.go +++ b/controllers/mlpipeline_ui.go @@ -20,16 +20,7 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var mlPipelineUITemplates = []string{ - "mlpipelines-ui/configmap.yaml.tmpl", - "mlpipelines-ui/deployment.yaml.tmpl", - "mlpipelines-ui/role.yaml.tmpl", - "mlpipelines-ui/rolebinding.yaml.tmpl", - "mlpipelines-ui/route.yaml.tmpl", - "mlpipelines-ui/sa-ds-pipeline-ui.yaml.tmpl", - "mlpipelines-ui/sa_ds-pipelines-viewer.yaml.tmpl", - "mlpipelines-ui/service.yaml.tmpl", -} +var mlPipelineUITemplatesDir = "mlpipelines-ui" func (r *DSPAReconciler) ReconcileUI(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { @@ -42,7 +33,7 @@ func (r *DSPAReconciler) ReconcileUI(dsp *dspav1alpha1.DataSciencePipelinesAppli } log.Info("Applying MlPipelineUI Resources") - err := r.ApplyAll(dsp, params, mlPipelineUITemplates) + err := r.ApplyDir(dsp, params, mlPipelineUITemplatesDir) if err != nil { return err } diff --git a/controllers/persistence_agent.go b/controllers/persistence_agent.go index 31fb90873..94f81b66f 100644 --- a/controllers/persistence_agent.go +++ b/controllers/persistence_agent.go @@ -20,12 +20,7 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var persistenceAgentTemplates = []string{ - "persistence-agent/deployment.yaml.tmpl", - "persistence-agent/sa.yaml.tmpl", - "persistence-agent/role.yaml.tmpl", - "persistence-agent/rolebinding.yaml.tmpl", -} +var persistenceAgentTemplatesDir = "persistence-agent" func (r *DSPAReconciler) ReconcilePersistenceAgent(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { @@ -39,7 +34,7 @@ func (r *DSPAReconciler) ReconcilePersistenceAgent(dsp *dspav1alpha1.DataScience log.Info("Applying PersistenceAgent Resources") - err := r.ApplyAll(dsp, params, persistenceAgentTemplates) + err := r.ApplyDir(dsp, params, persistenceAgentTemplatesDir) if err != nil { return err } diff --git a/controllers/scheduled_workflow.go b/controllers/scheduled_workflow.go index 8f0f5e0ff..68e62c1b5 100644 --- a/controllers/scheduled_workflow.go +++ b/controllers/scheduled_workflow.go @@ -20,14 +20,7 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var scheduledWorkflowTemplates = []string{ - "scheduled-workflow/deployment.yaml.tmpl", - "scheduled-workflow/role.yaml.tmpl", - "scheduled-workflow/rolebinding.yaml.tmpl", - "scheduled-workflow/sa.yaml.tmpl", - "scheduled-workflow/role.yaml.tmpl", - "scheduled-workflow/rolebinding.yaml.tmpl", -} +var scheduledWorkflowTemplatesDir = "scheduled-workflow" func (r *DSPAReconciler) ReconcileScheduledWorkflow(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { @@ -41,7 +34,7 @@ func (r *DSPAReconciler) ReconcileScheduledWorkflow(dsp *dspav1alpha1.DataScienc log.Info("Applying ScheduledWorkflow Resources") - err := r.ApplyAll(dsp, params, scheduledWorkflowTemplates) + err := r.ApplyDir(dsp, params, scheduledWorkflowTemplatesDir) if err != nil { return err } diff --git a/controllers/util/util.go b/controllers/util/util.go index ca3118f51..ed8388db0 100644 --- a/controllers/util/util.go +++ b/controllers/util/util.go @@ -18,6 +18,7 @@ package util import ( "os" + "path/filepath" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -47,15 +48,15 @@ func BoolPointer(b bool) *bool { return &b } -func GetTemplatesInDir(templateDirectory string) ([]string, error) { - entries, err := os.ReadDir(templateDirectory) +func GetTemplatesInDir(templatesDirectory, componentSubdirectory string) ([]string, error) { + files, err := os.ReadDir(templatesDirectory + componentSubdirectory) if err != nil { return nil, err } var templates []string - for _, e := range entries { - templates = append(templates, e.Name()) + for _, f := range files { + templates = append(templates, filepath.Join(componentSubdirectory, f.Name())) } return templates, nil } From 17559ce09ab59d7307dcd35453a64618bf1491e0 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 15:56:33 -0400 Subject: [PATCH 03/85] Handle subdirectories in GetTemplatesInDir util --- controllers/util/util.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/controllers/util/util.go b/controllers/util/util.go index ed8388db0..efbaa49ea 100644 --- a/controllers/util/util.go +++ b/controllers/util/util.go @@ -56,7 +56,9 @@ func GetTemplatesInDir(templatesDirectory, componentSubdirectory string) ([]stri var templates []string for _, f := range files { - templates = append(templates, filepath.Join(componentSubdirectory, f.Name())) + if !f.IsDir() { + templates = append(templates, filepath.Join(componentSubdirectory, f.Name())) + } } return templates, nil } From e0b59b44cca05c86f1489e2b85cb9beb5a6845b3 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 16:21:39 -0400 Subject: [PATCH 04/85] Restructure Common Templates directory --- .../mlmd-envoy-dashboard-access-policy.yaml.tmpl | 0 config/internal/common/{ => default}/policy.yaml.tmpl | 0 .../common/{ => no-owner}/clusterrolebinding.yaml.tmpl | 0 controllers/common.go | 9 +++------ 4 files changed, 3 insertions(+), 6 deletions(-) rename config/internal/common/{ => default}/mlmd-envoy-dashboard-access-policy.yaml.tmpl (100%) rename config/internal/common/{ => default}/policy.yaml.tmpl (100%) rename config/internal/common/{ => no-owner}/clusterrolebinding.yaml.tmpl (100%) diff --git a/config/internal/common/mlmd-envoy-dashboard-access-policy.yaml.tmpl b/config/internal/common/default/mlmd-envoy-dashboard-access-policy.yaml.tmpl similarity index 100% rename from config/internal/common/mlmd-envoy-dashboard-access-policy.yaml.tmpl rename to config/internal/common/default/mlmd-envoy-dashboard-access-policy.yaml.tmpl diff --git a/config/internal/common/policy.yaml.tmpl b/config/internal/common/default/policy.yaml.tmpl similarity index 100% rename from config/internal/common/policy.yaml.tmpl rename to config/internal/common/default/policy.yaml.tmpl diff --git a/config/internal/common/clusterrolebinding.yaml.tmpl b/config/internal/common/no-owner/clusterrolebinding.yaml.tmpl similarity index 100% rename from config/internal/common/clusterrolebinding.yaml.tmpl rename to config/internal/common/no-owner/clusterrolebinding.yaml.tmpl diff --git a/controllers/common.go b/controllers/common.go index 30249a819..b68787b6f 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -19,18 +19,15 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var commonTemplates = []string{ - "common/policy.yaml.tmpl", - "common/mlmd-envoy-dashboard-access-policy.yaml.tmpl", -} +var commonTemplatesDir = "common/default" -const commonCusterRolebindingTemplate = "common/clusterrolebinding.yaml.tmpl" +const commonCusterRolebindingTemplate = "common/no-owner/clusterrolebinding.yaml.tmpl" func (r *DSPAReconciler) ReconcileCommon(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) log.Info("Applying Common Resources") - err := r.ApplyAll(dsp, params, commonTemplates) + err := r.ApplyDir(dsp, params, commonTemplatesDir) if err != nil { return err } From ebfde58296cc2009f7ae762650d908cee14b8cf6 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 16:25:01 -0400 Subject: [PATCH 05/85] Restructure Database templates directory --- .../mariadb/{ => default}/deployment.yaml.tmpl | 0 .../mariadb/{ => default}/mariadb-sa.yaml.tmpl | 0 .../internal/mariadb/{ => default}/pvc.yaml.tmpl | 0 .../mariadb/{ => default}/service.yaml.tmpl | 0 .../{ => generated-secret}/secret.yaml.tmpl | 0 controllers/database.go | 14 ++++---------- 6 files changed, 4 insertions(+), 10 deletions(-) rename config/internal/mariadb/{ => default}/deployment.yaml.tmpl (100%) rename config/internal/mariadb/{ => default}/mariadb-sa.yaml.tmpl (100%) rename config/internal/mariadb/{ => default}/pvc.yaml.tmpl (100%) rename config/internal/mariadb/{ => default}/service.yaml.tmpl (100%) rename config/internal/mariadb/{ => generated-secret}/secret.yaml.tmpl (100%) diff --git a/config/internal/mariadb/deployment.yaml.tmpl b/config/internal/mariadb/default/deployment.yaml.tmpl similarity index 100% rename from config/internal/mariadb/deployment.yaml.tmpl rename to config/internal/mariadb/default/deployment.yaml.tmpl diff --git a/config/internal/mariadb/mariadb-sa.yaml.tmpl b/config/internal/mariadb/default/mariadb-sa.yaml.tmpl similarity index 100% rename from config/internal/mariadb/mariadb-sa.yaml.tmpl rename to config/internal/mariadb/default/mariadb-sa.yaml.tmpl diff --git a/config/internal/mariadb/pvc.yaml.tmpl b/config/internal/mariadb/default/pvc.yaml.tmpl similarity index 100% rename from config/internal/mariadb/pvc.yaml.tmpl rename to config/internal/mariadb/default/pvc.yaml.tmpl diff --git a/config/internal/mariadb/service.yaml.tmpl b/config/internal/mariadb/default/service.yaml.tmpl similarity index 100% rename from config/internal/mariadb/service.yaml.tmpl rename to config/internal/mariadb/default/service.yaml.tmpl diff --git a/config/internal/mariadb/secret.yaml.tmpl b/config/internal/mariadb/generated-secret/secret.yaml.tmpl similarity index 100% rename from config/internal/mariadb/secret.yaml.tmpl rename to config/internal/mariadb/generated-secret/secret.yaml.tmpl diff --git a/controllers/database.go b/controllers/database.go index 205cec4ce..91e55aa95 100644 --- a/controllers/database.go +++ b/controllers/database.go @@ -26,15 +26,9 @@ import ( "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" ) -const dbSecret = "mariadb/secret.yaml.tmpl" - -var dbTemplates = []string{ - "mariadb/deployment.yaml.tmpl", - "mariadb/pvc.yaml.tmpl", - "mariadb/service.yaml.tmpl", - "mariadb/mariadb-sa.yaml.tmpl", - dbSecret, -} +const dbSecret = "mariadb/generated-secret/secret.yaml.tmpl" + +var dbTemplatesDir = "mariadb/default" // extract to var for mocking in testing var ConnectAndQueryDatabase = func(host, port, username, password, dbname string) bool { @@ -112,7 +106,7 @@ func (r *DSPAReconciler) ReconcileDatabase(ctx context.Context, dsp *dspav1alpha } } else if deployMariaDB || deployDefaultDB { log.Info("Applying mariaDB resources.") - err := r.ApplyAll(dsp, params, dbTemplates) + err := r.ApplyDir(dsp, params, dbTemplatesDir) if err != nil { return err } From b1dba38342089d56dcab51eff5ef631ffcc5f345 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 16:27:52 -0400 Subject: [PATCH 06/85] Restructure Object Storage templates directory --- .../minio/{ => default}/deployment.yaml.tmpl | 0 .../minio/{ => default}/minio-sa.yaml.tmpl | 0 config/internal/minio/{ => default}/pvc.yaml.tmpl | 0 .../internal/minio/{ => default}/service.yaml.tmpl | 0 .../minio/{ => generated-secret}/secret.yaml.tmpl | 0 controllers/storage.go | 14 ++++---------- 6 files changed, 4 insertions(+), 10 deletions(-) rename config/internal/minio/{ => default}/deployment.yaml.tmpl (100%) rename config/internal/minio/{ => default}/minio-sa.yaml.tmpl (100%) rename config/internal/minio/{ => default}/pvc.yaml.tmpl (100%) rename config/internal/minio/{ => default}/service.yaml.tmpl (100%) rename config/internal/minio/{ => generated-secret}/secret.yaml.tmpl (100%) diff --git a/config/internal/minio/deployment.yaml.tmpl b/config/internal/minio/default/deployment.yaml.tmpl similarity index 100% rename from config/internal/minio/deployment.yaml.tmpl rename to config/internal/minio/default/deployment.yaml.tmpl diff --git a/config/internal/minio/minio-sa.yaml.tmpl b/config/internal/minio/default/minio-sa.yaml.tmpl similarity index 100% rename from config/internal/minio/minio-sa.yaml.tmpl rename to config/internal/minio/default/minio-sa.yaml.tmpl diff --git a/config/internal/minio/pvc.yaml.tmpl b/config/internal/minio/default/pvc.yaml.tmpl similarity index 100% rename from config/internal/minio/pvc.yaml.tmpl rename to config/internal/minio/default/pvc.yaml.tmpl diff --git a/config/internal/minio/service.yaml.tmpl b/config/internal/minio/default/service.yaml.tmpl similarity index 100% rename from config/internal/minio/service.yaml.tmpl rename to config/internal/minio/default/service.yaml.tmpl diff --git a/config/internal/minio/secret.yaml.tmpl b/config/internal/minio/generated-secret/secret.yaml.tmpl similarity index 100% rename from config/internal/minio/secret.yaml.tmpl rename to config/internal/minio/generated-secret/secret.yaml.tmpl diff --git a/controllers/storage.go b/controllers/storage.go index 727012948..c3e133ba4 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -30,15 +30,9 @@ import ( "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" ) -const storageSecret = "minio/secret.yaml.tmpl" - -var storageTemplates = []string{ - "minio/deployment.yaml.tmpl", - "minio/pvc.yaml.tmpl", - "minio/service.yaml.tmpl", - "minio/minio-sa.yaml.tmpl", - storageSecret, -} +const storageSecret = "minio/generated-secret/secret.yaml.tmpl" + +var storageTemplatesDir = "minio/default" func joinHostPort(host, port string) (string, error) { if host == "" { @@ -162,7 +156,7 @@ func (r *DSPAReconciler) ReconcileStorage(ctx context.Context, dsp *dspav1alpha1 } } else if deployMinio { log.Info("Applying object storage resources.") - err := r.ApplyAll(dsp, params, storageTemplates) + err := r.ApplyDir(dsp, params, storageTemplatesDir) if err != nil { return err } From 0f17eeb4dbc744b1608b61db8ed3d8dbfc4336a0 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 16:38:50 -0400 Subject: [PATCH 07/85] Restructure components manifest directories with conditions - Some components have conditions (only deploy if XYZ option set), so seperate these specific manifests from rest of the 'default ones for that component --- .../apiserver/{ => default}/artifact_script.yaml.tmpl | 0 .../internal/apiserver/{ => default}/deployment.yaml.tmpl | 2 ++ config/internal/apiserver/{ => default}/monitor.yaml.tmpl | 0 .../{ => default}/role_ds-pipeline-user-access.yaml.tmpl | 0 .../apiserver/{ => default}/role_ds-pipeline.yaml.tmpl | 0 .../{ => default}/role_pipeline-runner.yaml.tmpl | 0 .../{ => default}/rolebinding_ds-pipeline.yaml.tmpl | 0 .../{ => default}/rolebinding_pipeline-runner.yaml.tmpl | 0 .../apiserver/{ => default}/sa_ds-pipeline.yaml.tmpl | 0 .../apiserver/{ => default}/sa_pipeline-runner.yaml.tmpl | 0 config/internal/apiserver/{ => default}/service.yaml.tmpl | 0 config/internal/apiserver/{ => route}/route.yaml.tmpl | 0 .../{ => sample-pipeline}/sample-config.yaml.tmpl | 0 .../{ => sample-pipeline}/sample-pipeline.yaml.tmpl | 0 config/internal/scheduled-workflow/deployment.yaml.tmpl | 6 ++++++ controllers/apiserver.go | 8 ++++---- 16 files changed, 12 insertions(+), 4 deletions(-) rename config/internal/apiserver/{ => default}/artifact_script.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/deployment.yaml.tmpl (99%) rename config/internal/apiserver/{ => default}/monitor.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/role_ds-pipeline-user-access.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/role_ds-pipeline.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/role_pipeline-runner.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/rolebinding_ds-pipeline.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/rolebinding_pipeline-runner.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/sa_ds-pipeline.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/sa_pipeline-runner.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/service.yaml.tmpl (100%) rename config/internal/apiserver/{ => route}/route.yaml.tmpl (100%) rename config/internal/apiserver/{ => sample-pipeline}/sample-config.yaml.tmpl (100%) rename config/internal/apiserver/{ => sample-pipeline}/sample-pipeline.yaml.tmpl (100%) diff --git a/config/internal/apiserver/artifact_script.yaml.tmpl b/config/internal/apiserver/default/artifact_script.yaml.tmpl similarity index 100% rename from config/internal/apiserver/artifact_script.yaml.tmpl rename to config/internal/apiserver/default/artifact_script.yaml.tmpl diff --git a/config/internal/apiserver/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl similarity index 99% rename from config/internal/apiserver/deployment.yaml.tmpl rename to config/internal/apiserver/default/deployment.yaml.tmpl index 2e42d702d..7c08f2ddc 100644 --- a/config/internal/apiserver/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -50,6 +50,8 @@ spec: value: "{{.APIServer.ArtifactImage}}" - name: ARCHIVE_LOGS value: "{{.APIServer.ArchiveLogs}}" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "{{.APIServer.TrackArtifacts}}" - name: STRIP_EOF diff --git a/config/internal/apiserver/monitor.yaml.tmpl b/config/internal/apiserver/default/monitor.yaml.tmpl similarity index 100% rename from config/internal/apiserver/monitor.yaml.tmpl rename to config/internal/apiserver/default/monitor.yaml.tmpl diff --git a/config/internal/apiserver/role_ds-pipeline-user-access.yaml.tmpl b/config/internal/apiserver/default/role_ds-pipeline-user-access.yaml.tmpl similarity index 100% rename from config/internal/apiserver/role_ds-pipeline-user-access.yaml.tmpl rename to config/internal/apiserver/default/role_ds-pipeline-user-access.yaml.tmpl diff --git a/config/internal/apiserver/role_ds-pipeline.yaml.tmpl b/config/internal/apiserver/default/role_ds-pipeline.yaml.tmpl similarity index 100% rename from config/internal/apiserver/role_ds-pipeline.yaml.tmpl rename to config/internal/apiserver/default/role_ds-pipeline.yaml.tmpl diff --git a/config/internal/apiserver/role_pipeline-runner.yaml.tmpl b/config/internal/apiserver/default/role_pipeline-runner.yaml.tmpl similarity index 100% rename from config/internal/apiserver/role_pipeline-runner.yaml.tmpl rename to config/internal/apiserver/default/role_pipeline-runner.yaml.tmpl diff --git a/config/internal/apiserver/rolebinding_ds-pipeline.yaml.tmpl b/config/internal/apiserver/default/rolebinding_ds-pipeline.yaml.tmpl similarity index 100% rename from config/internal/apiserver/rolebinding_ds-pipeline.yaml.tmpl rename to config/internal/apiserver/default/rolebinding_ds-pipeline.yaml.tmpl diff --git a/config/internal/apiserver/rolebinding_pipeline-runner.yaml.tmpl b/config/internal/apiserver/default/rolebinding_pipeline-runner.yaml.tmpl similarity index 100% rename from config/internal/apiserver/rolebinding_pipeline-runner.yaml.tmpl rename to config/internal/apiserver/default/rolebinding_pipeline-runner.yaml.tmpl diff --git a/config/internal/apiserver/sa_ds-pipeline.yaml.tmpl b/config/internal/apiserver/default/sa_ds-pipeline.yaml.tmpl similarity index 100% rename from config/internal/apiserver/sa_ds-pipeline.yaml.tmpl rename to config/internal/apiserver/default/sa_ds-pipeline.yaml.tmpl diff --git a/config/internal/apiserver/sa_pipeline-runner.yaml.tmpl b/config/internal/apiserver/default/sa_pipeline-runner.yaml.tmpl similarity index 100% rename from config/internal/apiserver/sa_pipeline-runner.yaml.tmpl rename to config/internal/apiserver/default/sa_pipeline-runner.yaml.tmpl diff --git a/config/internal/apiserver/service.yaml.tmpl b/config/internal/apiserver/default/service.yaml.tmpl similarity index 100% rename from config/internal/apiserver/service.yaml.tmpl rename to config/internal/apiserver/default/service.yaml.tmpl diff --git a/config/internal/apiserver/route.yaml.tmpl b/config/internal/apiserver/route/route.yaml.tmpl similarity index 100% rename from config/internal/apiserver/route.yaml.tmpl rename to config/internal/apiserver/route/route.yaml.tmpl diff --git a/config/internal/apiserver/sample-config.yaml.tmpl b/config/internal/apiserver/sample-pipeline/sample-config.yaml.tmpl similarity index 100% rename from config/internal/apiserver/sample-config.yaml.tmpl rename to config/internal/apiserver/sample-pipeline/sample-config.yaml.tmpl diff --git a/config/internal/apiserver/sample-pipeline.yaml.tmpl b/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl similarity index 100% rename from config/internal/apiserver/sample-pipeline.yaml.tmpl rename to config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl diff --git a/config/internal/scheduled-workflow/deployment.yaml.tmpl b/config/internal/scheduled-workflow/deployment.yaml.tmpl index c5a5da5b5..2415d25e0 100644 --- a/config/internal/scheduled-workflow/deployment.yaml.tmpl +++ b/config/internal/scheduled-workflow/deployment.yaml.tmpl @@ -24,8 +24,14 @@ spec: spec: containers: - env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace - name: CRON_SCHEDULE_TIMEZONE value: "{{.ScheduledWorkflow.CronScheduleTimezone}}" + - name: EXECUTIONTYPE + value: PipelineRun image: "{{.ScheduledWorkflow.Image}}" imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow diff --git a/controllers/apiserver.go b/controllers/apiserver.go index 816b637cb..c73e16b68 100644 --- a/controllers/apiserver.go +++ b/controllers/apiserver.go @@ -24,17 +24,17 @@ import ( "k8s.io/apimachinery/pkg/types" ) -var apiServerTemplatesDir = "apiserver" +var apiServerTemplatesDir = "apiserver/default" // serverRoute is a resource deployed conditionally // as such it is handled separately -const serverRoute = "apiserver/route.yaml.tmpl" +const serverRoute = "apiserver/route/route.yaml.tmpl" // Sample Pipeline and Config are resources deployed conditionally // as such it is handled separately var samplePipelineTemplates = map[string]string{ - "sample-pipeline": "apiserver/sample-pipeline.yaml.tmpl", - "sample-config": "apiserver/sample-config.yaml.tmpl", + "sample-pipeline": "apiserver/sample-pipeline/sample-pipeline.yaml.tmpl", + "sample-config": "apiserver/sample-pipeline/sample-config.yaml.tmpl", } func (r *DSPAReconciler) ReconcileAPIServer(ctx context.Context, dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { From c9452a65339a27aeb9d34dc59185f6e1eb95d4c0 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Fri, 15 Sep 2023 18:47:32 -0400 Subject: [PATCH 08/85] Add VisualizationServer and CRDViewer component manifests --- api/v1alpha1/dspipeline_types.go | 20 +++++++ api/v1alpha1/zz_generated.deepcopy.go | 40 +++++++++++++ config/base/kustomization.yaml | 14 +++++ config/base/params.env | 2 + config/configmaps/files/config.yaml | 2 + ...b.io_datasciencepipelinesapplications.yaml | 20 +++++++ .../internal/crdviewer/deployment.yaml.tmpl | 35 +++++++++++ config/internal/crdviewer/role.yaml.tmpl | 32 ++++++++++ .../internal/crdviewer/rolebinding.yaml.tmpl | 13 ++++ .../crdviewer/serviceaccount.yaml.tmpl | 5 ++ .../visualizationserver/deployment.yaml.tmpl | 60 +++++++++++++++++++ .../visualizationserver/service.yaml.tmpl | 19 ++++++ .../serviceaccount.yaml.tmpl | 5 ++ config/manager/manager.yaml | 4 ++ kfdef/kfdef.yaml | 4 ++ 15 files changed, 275 insertions(+) create mode 100644 config/internal/crdviewer/deployment.yaml.tmpl create mode 100644 config/internal/crdviewer/role.yaml.tmpl create mode 100644 config/internal/crdviewer/rolebinding.yaml.tmpl create mode 100644 config/internal/crdviewer/serviceaccount.yaml.tmpl create mode 100644 config/internal/visualizationserver/deployment.yaml.tmpl create mode 100644 config/internal/visualizationserver/service.yaml.tmpl create mode 100644 config/internal/visualizationserver/serviceaccount.yaml.tmpl diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index 0d4f398ea..f97629d53 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -38,6 +38,12 @@ type DSPASpec struct { // +kubebuilder:validation:Optional // +kubebuilder:default:={deploy: false} *MLMD `json:"mlmd"` + // +kubebuilder:validation:Optional + // +kubebuilder:default:={deploy: false} + *CRDViewer `json:"crdviewer"` + // +kubebuilder:validation:Optional + // +kubebuilder:default:={deploy: false} + *VisualizationServer `json:"visualizationServer"` } type APIServer struct { @@ -206,6 +212,20 @@ type Writer struct { Image string `json:"image"` } +type CRDViewer struct { + // +kubebuilder:default:=true + // +kubebuilder:validation:Optional + Deploy bool `json:"deploy"` + Image string `json:"image,omitempty"` +} + +type VisualizationServer struct { + // +kubebuilder:default:=true + // +kubebuilder:validation:Optional + Deploy bool `json:"deploy"` + Image string `json:"image,omitempty"` +} + // ResourceRequirements structures compute resource requirements. // Replaces ResourceRequirements from corev1 which also includes optional storage field. // We handle storage field separately, and should not include it as a subfield for Resources. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index babfd004f..528cd5eb9 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -66,6 +66,21 @@ func (in *ArtifactScriptConfigMap) DeepCopy() *ArtifactScriptConfigMap { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRDViewer) DeepCopyInto(out *CRDViewer) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDViewer. +func (in *CRDViewer) DeepCopy() *CRDViewer { + if in == nil { + return nil + } + out := new(CRDViewer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DSPASpec) DeepCopyInto(out *DSPASpec) { *out = *in @@ -104,6 +119,16 @@ func (in *DSPASpec) DeepCopyInto(out *DSPASpec) { *out = new(MLMD) (*in).DeepCopyInto(*out) } + if in.CRDViewer != nil { + in, out := &in.CRDViewer, &out.CRDViewer + *out = new(CRDViewer) + **out = **in + } + if in.VisualizationServer != nil { + in, out := &in.VisualizationServer, &out.VisualizationServer + *out = new(VisualizationServer) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSPASpec. @@ -546,6 +571,21 @@ func (in *SecretKeyValue) DeepCopy() *SecretKeyValue { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VisualizationServer) DeepCopyInto(out *VisualizationServer) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationServer. +func (in *VisualizationServer) DeepCopy() *VisualizationServer { + if in == nil { + return nil + } + out := new(VisualizationServer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Writer) DeepCopyInto(out *Writer) { *out = *in diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index 5ca9aafac..5e22aa45d 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -92,6 +92,20 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGES_MLMDWRITER + - name: IMAGES_CRDVIEWER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGES_CRDVIEWER + - name: IMAGES_VISUALIZATIONSERVER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGES_VISUALIZATIONSERVER - name: IMAGES_DSPO objref: kind: ConfigMap diff --git a/config/base/params.env b/config/base/params.env index 499233cb3..3f02c0849 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -10,3 +10,5 @@ IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.8 IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.8 IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 +IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 +IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index 477512b54..41319834a 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -10,3 +10,5 @@ Images: MlmdEnvoy: $(IMAGES_MLMDENVOY) MlmdGRPC: $(IMAGES_MLMDGRPC) MlmdWriter: $(IMAGES_MLMDWRITER) + CRDViewer: $(IMAGES_CRDVIEWER) + VisualizationServer: $(IMAGES_VISUALIZATIONSERVER) \ No newline at end of file diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 98eddeddb..a9fa22b28 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -133,6 +133,16 @@ spec: default: true type: boolean type: object + crdviewer: + default: + deploy: false + properties: + deploy: + default: true + type: boolean + image: + type: string + type: object database: default: mariaDB: @@ -650,6 +660,16 @@ spec: type: object type: object type: object + visualizationServer: + default: + deploy: false + properties: + deploy: + default: true + type: boolean + image: + type: string + type: object required: - objectStorage type: object diff --git a/config/internal/crdviewer/deployment.yaml.tmpl b/config/internal/crdviewer/deployment.yaml.tmpl new file mode 100644 index 000000000..1f568a2b1 --- /dev/null +++ b/config/internal/crdviewer/deployment.yaml.tmpl @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: ds-pipeline-{{.Name}} + component: data-science-pipelines + name: ds-pipeline-viewer-crd-{{.Name}} + namespace: {{.Namespace}} +spec: + selector: + matchLabels: + app: ds-pipeline-viewer-crd-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-viewer-crd-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + spec: + containers: + - env: + - name: MAX_NUM_VIEWERS + value: "50" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 + imagePullPolicy: Always + name: ds-pipeline-viewer-crd + serviceAccountName: ds-pipeline-viewer-crd-service-account-{{.Name}} diff --git a/config/internal/crdviewer/role.yaml.tmpl b/config/internal/crdviewer/role.yaml.tmpl new file mode 100644 index 000000000..ef943e9fe --- /dev/null +++ b/config/internal/crdviewer/role.yaml.tmpl @@ -0,0 +1,32 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ds-pipeline-viewer-controller-role-{{.Name}} + namespace: {{.Namespace}} +rules: +- apiGroups: + - '*' + resources: + - deployments + - services + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - kubeflow.org + resources: + - viewers + - viewers/finalizers + verbs: + - create + - get + - list + - watch + - update + - patch + - delete diff --git a/config/internal/crdviewer/rolebinding.yaml.tmpl b/config/internal/crdviewer/rolebinding.yaml.tmpl new file mode 100644 index 000000000..f927411a4 --- /dev/null +++ b/config/internal/crdviewer/rolebinding.yaml.tmpl @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ds-pipeline-viewer-crd-binding-{{.Name}} + namespace: {{.Namespace}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ds-pipeline-viewer-controller-role-{{.Name}} +subjects: +- kind: ServiceAccount + name: ds-pipeline-viewer-crd-service-account-{{.Name}} + namespace: {{.Namespace}} diff --git a/config/internal/crdviewer/serviceaccount.yaml.tmpl b/config/internal/crdviewer/serviceaccount.yaml.tmpl new file mode 100644 index 000000000..2b21e1453 --- /dev/null +++ b/config/internal/crdviewer/serviceaccount.yaml.tmpl @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ds-pipeline-viewer-crd-service-account-{{.Name}} + namespace: {{.Namespace}} diff --git a/config/internal/visualizationserver/deployment.yaml.tmpl b/config/internal/visualizationserver/deployment.yaml.tmpl new file mode 100644 index 000000000..26abdeedc --- /dev/null +++ b/config/internal/visualizationserver/deployment.yaml.tmpl @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: ds-pipeline-visualizationserver-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + name: ds-pipeline-visualizationserver-{{.Name}} + namespace: {{.Namespace}} +spec: + selector: + matchLabels: + app: ds-pipeline-visualizationserver-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-visualizationserver-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + spec: + containers: + - image: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 + imagePullPolicy: IfNotPresent + livenessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/ + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + name: ds-pipeline-visualizationserver + ports: + - containerPort: 8888 + name: http + readinessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/ + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 30m + memory: 500Mi + serviceAccountName: ds-pipeline-visualizationserver-{{.Name}} diff --git a/config/internal/visualizationserver/service.yaml.tmpl b/config/internal/visualizationserver/service.yaml.tmpl new file mode 100644 index 000000000..f2d76833f --- /dev/null +++ b/config/internal/visualizationserver/service.yaml.tmpl @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: ds-pipeline-visualizationserver-{{.Name}} + namespace: {{.Namespace}} + annotations: + service.alpha.openshift.io/serving-cert-secret-name: ds-pipelines-proxy-tls-{{.Name}} + labels: + app: ds-pipeline-{{.Name}} + component: data-science-pipelines +spec: + ports: + - name: http + port: 8888 + protocol: TCP + targetPort: 8888 + selector: + app: ds-pipeline-visualizationserver-{{.Name}} + component: data-science-pipelines diff --git a/config/internal/visualizationserver/serviceaccount.yaml.tmpl b/config/internal/visualizationserver/serviceaccount.yaml.tmpl new file mode 100644 index 000000000..e1c415786 --- /dev/null +++ b/config/internal/visualizationserver/serviceaccount.yaml.tmpl @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ds-pipeline-visualizationserver-{{.Name}} + namespace: {{.Namespace}} diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 1f1bc9a76..f4833e587 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -56,6 +56,10 @@ spec: value: $(IMAGES_MLMDGRPC) - name: IMAGES_MLMDWRITER value: $(IMAGES_MLMDWRITER) + - name: IMAGES_CRDVIEWER + value: $(IMAGES_CRDVIEWER) + - name: IMAGES_VISUALIZATIONSERVER + value: $(IMAGES_VISUALIZATIONSERVER) securityContext: allowPrivilegeEscalation: false capabilities: diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index 7bb962d86..4f1c2efe2 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -30,6 +30,10 @@ spec: value: quay.io/opendatahub/ds-pipelines-metadata-grpc:1.0.0 - name: IMAGES_MLMDWRITER value: quay.io/opendatahub/ds-pipelines-metadata-writer:1.1.0 + - name: IMAGES_CRDVIEWER + value: gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 + - name: IMAGES_VISUALIZATIONSERVER + value: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 repoRef: name: manifests path: config From 51fbc8eb40e63ff4660e7ec558ec25ac9cc910ac Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 19 Sep 2023 15:53:20 -0400 Subject: [PATCH 09/85] Add Reconcile Handlers for CRDViewer and VisualizationServer --- controllers/crdviewer.go | 44 +++++++++ controllers/crdviewer_test.go | 118 +++++++++++++++++++++++ controllers/dspipeline_controller.go | 11 +++ controllers/dspipeline_params.go | 2 + controllers/visualization_server.go | 44 +++++++++ controllers/visualization_server_test.go | 118 +++++++++++++++++++++++ 6 files changed, 337 insertions(+) create mode 100644 controllers/crdviewer.go create mode 100644 controllers/crdviewer_test.go create mode 100644 controllers/visualization_server.go create mode 100644 controllers/visualization_server_test.go diff --git a/controllers/crdviewer.go b/controllers/crdviewer.go new file mode 100644 index 000000000..e173de877 --- /dev/null +++ b/controllers/crdviewer.go @@ -0,0 +1,44 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" +) + +var crdViewerTemplatesDir = "crdviewer" + +func (r *DSPAReconciler) ReconcileCRDViewer(dsp *dspav1alpha1.DataSciencePipelinesApplication, + params *DSPAParams) error { + + log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) + + if !dsp.Spec.CRDViewer.Deploy { + log.Info("Skipping Application of CRD Viewer Resources") + return nil + } + + log.Info("Applying CRDViewer Resources") + + err := r.ApplyDir(dsp, params, crdViewerTemplatesDir) + if err != nil { + return err + } + + log.Info("Finished applying CRD Viewer Resources") + return nil +} diff --git a/controllers/crdviewer_test.go b/controllers/crdviewer_test.go new file mode 100644 index 000000000..4a23804a8 --- /dev/null +++ b/controllers/crdviewer_test.go @@ -0,0 +1,118 @@ +//go:build test_all || test_unit + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" +) + +func TestDeployCRDViewer(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedCRDViewerName := "ds-pipeline-viewer-crd-testdspa" + + // Construct DSPASpec with deployed CRD Viewer + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + CRDViewer: &dspav1alpha1.CRDViewer{ + Deploy: true, + }, + Database: &dspav1alpha1.Database{ + DisableHealthCheck: false, + MariaDB: &dspav1alpha1.MariaDB{ + Deploy: true, + }, + }, + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: false, + Minio: &dspav1alpha1.Minio{ + Deploy: false, + Image: "someimage", + }, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Namespace = testNamespace + dspa.Name = testDSPAName + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + err := params.ExtractParams(ctx, dspa, reconciler.Client, reconciler.Log) + assert.Nil(t, err) + + // Ensure CRD Viewer Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedCRDViewerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileCRDViewer(dspa, params) + assert.Nil(t, err) + + // Ensure CRD Viewer Deployment now exists + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedCRDViewerName, testNamespace) + assert.True(t, created) + assert.Nil(t, err) + +} + +func TestDontDeployCRDViewer(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedCRDViewerName := "ds-pipeline-viewer-crd-testdspa" + + // Construct DSPASpec with non-deployed CRD Viewer + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + CRDViewer: &dspav1alpha1.CRDViewer{ + Deploy: false, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + + // Ensure CRD Viewer Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedCRDViewerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileCRDViewer(dspa, params) + assert.Nil(t, err) + + // Ensure CRD Viewer Deployment still doesn't exist + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedCRDViewerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) +} diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 185c3b59c..925c621fa 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -276,6 +276,17 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. if err != nil { return ctrl.Result{}, err } + + err = r.ReconcileCRDViewer(dspa, params) + if err != nil { + return ctrl.Result{}, err + } + + err = r.ReconcileVisualizationServer(dspa, params) + if err != nil { + return ctrl.Result{}, err + } + } log.Info("Updating CR status") diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index b4ef158d7..d19582253 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -48,6 +48,8 @@ type DSPAParams struct { MariaDB *dspa.MariaDB Minio *dspa.Minio MLMD *dspa.MLMD + CRDViewer *dspa.CRDViewer + VisualizationServer *dspa.VisualizationServer DBConnection ObjectStorageConnection } diff --git a/controllers/visualization_server.go b/controllers/visualization_server.go new file mode 100644 index 000000000..817341fbb --- /dev/null +++ b/controllers/visualization_server.go @@ -0,0 +1,44 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" +) + +var visualizationServerTemplatesDir = "visualizationserver" + +func (r *DSPAReconciler) ReconcileVisualizationServer(dsp *dspav1alpha1.DataSciencePipelinesApplication, + params *DSPAParams) error { + + log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) + + if !dsp.Spec.VisualizationServer.Deploy { + log.Info("Skipping Application of Visualization Server Resources") + return nil + } + + log.Info("Applying Visualization Server Resources") + + err := r.ApplyDir(dsp, params, visualizationServerTemplatesDir) + if err != nil { + return err + } + + log.Info("Finished applying Visualization Server Resources") + return nil +} diff --git a/controllers/visualization_server_test.go b/controllers/visualization_server_test.go new file mode 100644 index 000000000..7a06f6ca2 --- /dev/null +++ b/controllers/visualization_server_test.go @@ -0,0 +1,118 @@ +//go:build test_all || test_unit + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" +) + +func TestDeployVisualizationServer(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedVisualizationServerName := "ds-pipeline-visualizationserver-testdspa" + + // Construct DSPASpec with deployed Visualization Server + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + VisualizationServer: &dspav1alpha1.VisualizationServer{ + Deploy: true, + }, + Database: &dspav1alpha1.Database{ + DisableHealthCheck: false, + MariaDB: &dspav1alpha1.MariaDB{ + Deploy: true, + }, + }, + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: false, + Minio: &dspav1alpha1.Minio{ + Deploy: false, + Image: "someimage", + }, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Namespace = testNamespace + dspa.Name = testDSPAName + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + err := params.ExtractParams(ctx, dspa, reconciler.Client, reconciler.Log) + assert.Nil(t, err) + + // Ensure Visualization Server Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileVisualizationServer(dspa, params) + assert.Nil(t, err) + + // Ensure Visualization Server Deployment now exists + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) + assert.True(t, created) + assert.Nil(t, err) + +} + +func TestDontDeployVisualizationServer(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedVisualizationServerName := "ds-pipeline-visualization-server-testdspa" + + // Construct DSPASpec with non-deployed Visualization Server + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + VisualizationServer: &dspav1alpha1.VisualizationServer{ + Deploy: false, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + + // Ensure Visualization Server Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileVisualizationServer(dspa, params) + assert.Nil(t, err) + + // Ensure Visualization Server Deployment still doesn't exist + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) +} From 8d3c8bd5c3116a488dfb490a1b391bf02b203677 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 19 Sep 2023 16:05:10 -0400 Subject: [PATCH 10/85] Fix Functional Tests (handle new envvars) --- config/internal/persistence-agent/deployment.yaml.tmpl | 10 ++++++++++ .../internal/scheduled-workflow/deployment.yaml.tmpl | 4 +--- .../case_0/expected/created/apiserver_deployment.yaml | 2 ++ .../expected/created/persistence-agent_deployment.yaml | 10 ++++++++++ .../created/scheduled-workflow_deployment.yaml | 4 ++++ .../case_2/expected/created/apiserver_deployment.yaml | 2 ++ .../expected/created/persistence-agent_deployment.yaml | 10 ++++++++++ .../created/scheduled-workflow_deployment.yaml | 4 ++++ .../case_3/expected/created/apiserver_deployment.yaml | 2 ++ .../case_4/expected/created/apiserver_deployment.yaml | 2 ++ .../expected/created/persistence-agent_deployment.yaml | 10 ++++++++++ .../created/scheduled-workflow_deployment.yaml | 4 ++++ .../case_5/expected/created/apiserver_deployment.yaml | 2 ++ .../expected/created/persistence-agent_deployment.yaml | 10 ++++++++++ .../created/scheduled-workflow_deployment.yaml | 4 ++++ 15 files changed, 77 insertions(+), 3 deletions(-) diff --git a/config/internal/persistence-agent/deployment.yaml.tmpl b/config/internal/persistence-agent/deployment.yaml.tmpl index 1c160ec59..cef718369 100644 --- a/config/internal/persistence-agent/deployment.yaml.tmpl +++ b/config/internal/persistence-agent/deployment.yaml.tmpl @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "{{.Namespace}}" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: "{{.PersistenceAgent.Image}}" imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/config/internal/scheduled-workflow/deployment.yaml.tmpl b/config/internal/scheduled-workflow/deployment.yaml.tmpl index 2415d25e0..2b76d429e 100644 --- a/config/internal/scheduled-workflow/deployment.yaml.tmpl +++ b/config/internal/scheduled-workflow/deployment.yaml.tmpl @@ -25,9 +25,7 @@ spec: containers: - env: - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + value: "{{.Namespace}}" - name: CRON_SCHEDULE_TIMEZONE value: "{{.ScheduledWorkflow.CronScheduleTimezone}}" - name: EXECUTIONTYPE diff --git a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml index fa277a796..ecf172926 100644 --- a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: "artifact-manager:test0" - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml index bf0171dc3..ecce799ab 100644 --- a/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "default" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: persistenceagent:test0 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml index d069ca8fb..e5aee424a 100644 --- a/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml @@ -24,8 +24,12 @@ spec: spec: containers: - env: + - name: NAMESPACE + value: "default" - name: CRON_SCHEDULE_TIMEZONE value: "UTC" + - name: EXECUTIONTYPE + value: PipelineRun image: scheduledworkflow:test0 imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow diff --git a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml index 5c1263828..1489a4e0d 100644 --- a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: "artifact-manager:test2" - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml index 8342eace3..db064397e 100644 --- a/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "default" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: persistenceagent:test2 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml index f912bc2f7..78b8b382b 100644 --- a/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml @@ -24,8 +24,12 @@ spec: spec: containers: - env: + - name: NAMESPACE + value: "default" - name: CRON_SCHEDULE_TIMEZONE value: "EST" + - name: EXECUTIONTYPE + value: PipelineRun image: scheduledworkflow:test2 imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow diff --git a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml index 0b617788d..6371d1460 100644 --- a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: artifact-manager:test3 - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml index 94524294c..043a86a37 100644 --- a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: "this-artifact-manager-image-from-cr-should-be-used:test4" - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml index da750bb99..c4118d680 100644 --- a/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "default" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: this-persistenceagent-image-from-cr-should-be-used:test4 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml index 0d7e88db6..80e2084ad 100644 --- a/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml @@ -24,8 +24,12 @@ spec: spec: containers: - env: + - name: NAMESPACE + value: "default" - name: CRON_SCHEDULE_TIMEZONE value: "EST" + - name: EXECUTIONTYPE + value: PipelineRun image: this-scheduledworkflow-image-from-cr-should-be-used:test4 imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow diff --git a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml index 92f6ac5b9..672ecd431 100644 --- a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: "artifact-manager:test5" - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml index 3255d1281..2a22a22ae 100644 --- a/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "default" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: persistenceagent:test5 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml index d03c4daf8..ab88f8de9 100644 --- a/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml @@ -24,8 +24,12 @@ spec: spec: containers: - env: + - name: NAMESPACE + value: "default" - name: CRON_SCHEDULE_TIMEZONE value: "UTC" + - name: EXECUTIONTYPE + value: PipelineRun image: scheduledworkflow:test5 imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow From e67b42cc9e1a9b27eef50b5029bc3e49b06466fb Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 26 Sep 2023 03:41:01 -0400 Subject: [PATCH 11/85] Fix self-deployed DB/Storage missing secrets --- controllers/database.go | 4 ++++ controllers/storage.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/controllers/database.go b/controllers/database.go index 91e55aa95..deee77b80 100644 --- a/controllers/database.go +++ b/controllers/database.go @@ -110,6 +110,10 @@ func (r *DSPAReconciler) ReconcileDatabase(ctx context.Context, dsp *dspav1alpha if err != nil { return err } + err = r.Apply(dsp, params, dbSecret) + if err != nil { + return err + } // If no database was not specified, deploy mariaDB by default. // Update the CR with the state of mariaDB to accurately portray // desired state. diff --git a/controllers/storage.go b/controllers/storage.go index c3e133ba4..a57697f53 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -160,6 +160,10 @@ func (r *DSPAReconciler) ReconcileStorage(ctx context.Context, dsp *dspav1alpha1 if err != nil { return err } + err = r.Apply(dsp, params, storageSecret) + if err != nil { + return err + } // If no storage was not specified, deploy minio by default. // Update the CR with the state of minio to accurately portray // desired state. From bc692e9171af2a73accbe5b9659cdead66e1263a Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 26 Sep 2023 03:41:45 -0400 Subject: [PATCH 12/85] WIP: Add DSPv2 Internal Manifests --- config/v2/cache/clusterrole.yaml | 35 +++++++++ config/v2/cache/clusterrolebinding.yaml | 12 +++ config/v2/cache/kustomization.yaml | 4 + config/v2/cache/serviceaccount.yaml | 11 +++ config/v2/driver/clusterrole.yaml | 63 +++++++++++++++ config/v2/driver/clusterrolebinding.yaml | 17 ++++ config/v2/driver/deployment.yaml | 57 ++++++++++++++ config/v2/driver/kustomization.yaml | 8 ++ config/v2/driver/role.yaml | 77 ++++++++++++++++++ config/v2/driver/rolebinding.yaml | 17 ++++ config/v2/driver/service.yaml | 24 ++++++ config/v2/driver/serviceaccount.yaml | 10 +++ .../clusterrole.leaderelection.yaml | 20 +++++ .../controller/clusterrole.clusteraccess.yaml | 66 ++++++++++++++++ .../controller/clusterrole.tenantaccess.yaml | 21 +++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ .../clusterrolebinding.tenantaccess.yaml | 16 ++++ .../v2/exithandler/controller/deployment.yaml | 60 ++++++++++++++ .../exithandler/controller/kustomization.yaml | 10 +++ config/v2/exithandler/controller/role.yaml | 37 +++++++++ .../exithandler/controller/rolebinding.yaml | 16 ++++ .../controller/serviceaccount.yaml | 10 +++ config/v2/exithandler/crd.yaml | 29 +++++++ config/v2/exithandler/kustomization.yaml | 5 ++ .../webhook/clusterrole.clusteraccess.yaml | 78 +++++++++++++++++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ config/v2/exithandler/webhook/deployment.yaml | 71 +++++++++++++++++ .../v2/exithandler/webhook/kustomization.yaml | 11 +++ .../webhook/mutatingwebhookconfig.yaml | 19 +++++ config/v2/exithandler/webhook/role.yaml | 53 +++++++++++++ .../v2/exithandler/webhook/rolebinding.yaml | 16 ++++ config/v2/exithandler/webhook/secret.yaml | 9 +++ config/v2/exithandler/webhook/service.yaml | 30 +++++++ .../exithandler/webhook/serviceaccount.yaml | 10 +++ .../webhook/validatingwebhookconfig.yaml | 19 +++++ .../kfptask/clusterrole.leaderelection.yaml | 20 +++++ .../controller/clusterrole.clusteraccess.yaml | 66 ++++++++++++++++ .../controller/clusterrole.tenantaccess.yaml | 21 +++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ .../clusterrolebinding.tenantaccess.yaml | 16 ++++ config/v2/kfptask/controller/deployment.yaml | 60 ++++++++++++++ .../v2/kfptask/controller/kustomization.yaml | 10 +++ config/v2/kfptask/controller/role.yaml | 38 +++++++++ config/v2/kfptask/controller/rolebinding.yaml | 17 ++++ .../v2/kfptask/controller/serviceaccount.yaml | 10 +++ config/v2/kfptask/crd.yaml | 29 +++++++ config/v2/kfptask/kustomization.yaml | 5 ++ .../webhook/clusterrole.clusteraccess.yaml | 78 +++++++++++++++++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ config/v2/kfptask/webhook/deployment.yaml | 71 +++++++++++++++++ config/v2/kfptask/webhook/kustomization.yaml | 12 +++ .../webhook/mutatingwebhookconfig.yaml | 19 +++++ config/v2/kfptask/webhook/role.yaml | 53 +++++++++++++ config/v2/kfptask/webhook/rolebinding.yaml | 16 ++++ config/v2/kfptask/webhook/secret.yaml | 9 +++ config/v2/kfptask/webhook/service.yaml | 30 +++++++ config/v2/kfptask/webhook/serviceaccount.yaml | 10 +++ .../webhook/validatingwebhookconfig.yaml | 19 +++++ config/v2/kustomization.yaml | 21 +++++ config/v2/params.env | 14 ++++ config/v2/params.yaml | 7 ++ .../clusterrole.leaderelection.yaml | 20 +++++ .../controller/clusterrole.clusteraccess.yaml | 66 ++++++++++++++++ .../controller/clusterrole.tenantaccess.yaml | 21 +++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ .../clusterrolebinding.tenantaccess.yaml | 16 ++++ .../pipelineloop/controller/deployment.yaml | 60 ++++++++++++++ .../controller/kustomization.yaml | 11 +++ config/v2/pipelineloop/controller/role.yaml | 36 +++++++++ .../pipelineloop/controller/rolebinding.yaml | 16 ++++ .../controller/serviceaccount.yaml | 10 +++ config/v2/pipelineloop/crd.yaml | 29 +++++++ config/v2/pipelineloop/kustomization.yaml | 5 ++ .../webhook/clusterrole.clusteraccess.yaml | 78 +++++++++++++++++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ .../v2/pipelineloop/webhook/deployment.yaml | 71 +++++++++++++++++ .../pipelineloop/webhook/kustomization.yaml | 12 +++ .../webhook/mutatingwebhookconfig.yaml | 19 +++++ config/v2/pipelineloop/webhook/role.yaml | 52 +++++++++++++ .../v2/pipelineloop/webhook/rolebinding.yaml | 16 ++++ config/v2/pipelineloop/webhook/service.yaml | 30 +++++++ .../pipelineloop/webhook/serviceaccount.yaml | 10 +++ .../webhook/validatingwebhookconfig.yaml | 19 +++++ config/v2/tektoncrds/crd.yaml | 28 +++++++ config/v2/tektoncrds/kustomization.yaml | 4 + config/v2/tektoncrds/scc.anyuid.yaml | 61 +++++++++++++++ config/v2/tektoncrds/scc.privileged.yaml | 62 +++++++++++++++ 92 files changed, 2520 insertions(+) create mode 100644 config/v2/cache/clusterrole.yaml create mode 100644 config/v2/cache/clusterrolebinding.yaml create mode 100644 config/v2/cache/kustomization.yaml create mode 100644 config/v2/cache/serviceaccount.yaml create mode 100644 config/v2/driver/clusterrole.yaml create mode 100644 config/v2/driver/clusterrolebinding.yaml create mode 100644 config/v2/driver/deployment.yaml create mode 100644 config/v2/driver/kustomization.yaml create mode 100644 config/v2/driver/role.yaml create mode 100644 config/v2/driver/rolebinding.yaml create mode 100644 config/v2/driver/service.yaml create mode 100644 config/v2/driver/serviceaccount.yaml create mode 100644 config/v2/exithandler/clusterrole.leaderelection.yaml create mode 100644 config/v2/exithandler/controller/clusterrole.clusteraccess.yaml create mode 100644 config/v2/exithandler/controller/clusterrole.tenantaccess.yaml create mode 100644 config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml create mode 100644 config/v2/exithandler/controller/deployment.yaml create mode 100644 config/v2/exithandler/controller/kustomization.yaml create mode 100644 config/v2/exithandler/controller/role.yaml create mode 100644 config/v2/exithandler/controller/rolebinding.yaml create mode 100644 config/v2/exithandler/controller/serviceaccount.yaml create mode 100644 config/v2/exithandler/crd.yaml create mode 100644 config/v2/exithandler/kustomization.yaml create mode 100644 config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml create mode 100644 config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/exithandler/webhook/deployment.yaml create mode 100644 config/v2/exithandler/webhook/kustomization.yaml create mode 100644 config/v2/exithandler/webhook/mutatingwebhookconfig.yaml create mode 100644 config/v2/exithandler/webhook/role.yaml create mode 100644 config/v2/exithandler/webhook/rolebinding.yaml create mode 100644 config/v2/exithandler/webhook/secret.yaml create mode 100644 config/v2/exithandler/webhook/service.yaml create mode 100644 config/v2/exithandler/webhook/serviceaccount.yaml create mode 100644 config/v2/exithandler/webhook/validatingwebhookconfig.yaml create mode 100644 config/v2/kfptask/clusterrole.leaderelection.yaml create mode 100644 config/v2/kfptask/controller/clusterrole.clusteraccess.yaml create mode 100644 config/v2/kfptask/controller/clusterrole.tenantaccess.yaml create mode 100644 config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml create mode 100644 config/v2/kfptask/controller/deployment.yaml create mode 100644 config/v2/kfptask/controller/kustomization.yaml create mode 100644 config/v2/kfptask/controller/role.yaml create mode 100644 config/v2/kfptask/controller/rolebinding.yaml create mode 100644 config/v2/kfptask/controller/serviceaccount.yaml create mode 100644 config/v2/kfptask/crd.yaml create mode 100644 config/v2/kfptask/kustomization.yaml create mode 100644 config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml create mode 100644 config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/kfptask/webhook/deployment.yaml create mode 100644 config/v2/kfptask/webhook/kustomization.yaml create mode 100644 config/v2/kfptask/webhook/mutatingwebhookconfig.yaml create mode 100644 config/v2/kfptask/webhook/role.yaml create mode 100644 config/v2/kfptask/webhook/rolebinding.yaml create mode 100644 config/v2/kfptask/webhook/secret.yaml create mode 100644 config/v2/kfptask/webhook/service.yaml create mode 100644 config/v2/kfptask/webhook/serviceaccount.yaml create mode 100644 config/v2/kfptask/webhook/validatingwebhookconfig.yaml create mode 100644 config/v2/kustomization.yaml create mode 100644 config/v2/params.env create mode 100644 config/v2/params.yaml create mode 100644 config/v2/pipelineloop/clusterrole.leaderelection.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml create mode 100644 config/v2/pipelineloop/controller/deployment.yaml create mode 100644 config/v2/pipelineloop/controller/kustomization.yaml create mode 100644 config/v2/pipelineloop/controller/role.yaml create mode 100644 config/v2/pipelineloop/controller/rolebinding.yaml create mode 100644 config/v2/pipelineloop/controller/serviceaccount.yaml create mode 100644 config/v2/pipelineloop/crd.yaml create mode 100644 config/v2/pipelineloop/kustomization.yaml create mode 100644 config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml create mode 100644 config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/pipelineloop/webhook/deployment.yaml create mode 100644 config/v2/pipelineloop/webhook/kustomization.yaml create mode 100644 config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml create mode 100644 config/v2/pipelineloop/webhook/role.yaml create mode 100644 config/v2/pipelineloop/webhook/rolebinding.yaml create mode 100644 config/v2/pipelineloop/webhook/service.yaml create mode 100644 config/v2/pipelineloop/webhook/serviceaccount.yaml create mode 100644 config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml create mode 100644 config/v2/tektoncrds/crd.yaml create mode 100644 config/v2/tektoncrds/kustomization.yaml create mode 100644 config/v2/tektoncrds/scc.anyuid.yaml create mode 100644 config/v2/tektoncrds/scc.privileged.yaml diff --git a/config/v2/cache/clusterrole.yaml b/config/v2/cache/clusterrole.yaml new file mode 100644 index 000000000..5178f4f4b --- /dev/null +++ b/config/v2/cache/clusterrole.yaml @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: kubeflow-pipelines-cache-deployer-clusterrole + name: kubeflow-pipelines-cache-deployer-clusterrole +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + - certificatesigningrequests/approval + verbs: + - create + - delete + - get + - update +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - patch +- apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/* + resources: + - signers + verbs: + - approve diff --git a/config/v2/cache/clusterrolebinding.yaml b/config/v2/cache/clusterrolebinding.yaml new file mode 100644 index 000000000..e9cf41f45 --- /dev/null +++ b/config/v2/cache/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubeflow-pipelines-cache-deployer-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeflow-pipelines-cache-deployer-clusterrole +subjects: +- kind: ServiceAccount + name: kubeflow-pipelines-cache-deployer-sa + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/cache/kustomization.yaml b/config/v2/cache/kustomization.yaml new file mode 100644 index 000000000..51229db72 --- /dev/null +++ b/config/v2/cache/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- clusterrole.yaml +- clusterrolebinding.yaml +- serviceaccount.yaml diff --git a/config/v2/cache/serviceaccount.yaml b/config/v2/cache/serviceaccount.yaml new file mode 100644 index 000000000..ffa5d061b --- /dev/null +++ b/config/v2/cache/serviceaccount.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kubeflow-pipelines-cache-deployer-sa + \ No newline at end of file diff --git a/config/v2/driver/clusterrole.yaml b/config/v2/driver/clusterrole.yaml new file mode 100644 index 000000000..45a51fbf3 --- /dev/null +++ b/config/v2/driver/clusterrole.yaml @@ -0,0 +1,63 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + name: kfp-driver-cluster-access-clusterrole +rules: +- apiGroups: + - tekton.dev + resources: + - runs + - customruns + - runs/finalizers + - customruns/finalizers + - runs/status + - customruns/status + - pipelineruns + - task + - taskruns + - conditions + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list diff --git a/config/v2/driver/clusterrolebinding.yaml b/config/v2/driver/clusterrolebinding.yaml new file mode 100644 index 000000000..05db8c567 --- /dev/null +++ b/config/v2/driver/clusterrolebinding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + name: kfp-driver-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-driver-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-driver + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/driver/deployment.yaml b/config/v2/driver/deployment.yaml new file mode 100644 index 000000000..ce150c265 --- /dev/null +++ b/config/v2/driver/deployment.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: ckfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline + app.kubernetes.io/version: devel + name: kfp-driver +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: kfp-driver + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline + app.kubernetes.io/version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-driver:2.0.0 + imagePullPolicy: Always + name: kfp-driver + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfp-driver diff --git a/config/v2/driver/kustomization.yaml b/config/v2/driver/kustomization.yaml new file mode 100644 index 000000000..4968c8918 --- /dev/null +++ b/config/v2/driver/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- clusterrole.yaml +- clusterrolebinding.yaml +- deployment.yaml +- role.yaml +- rolebinding.yaml +- service.yaml +- serviceaccount.yaml \ No newline at end of file diff --git a/config/v2/driver/role.yaml b/config/v2/driver/role.yaml new file mode 100644 index 000000000..b4c9f9130 --- /dev/null +++ b/config/v2/driver/role.yaml @@ -0,0 +1,77 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + namespace: datasciencepipelinesapplications-controller + name: kfp-driver-role +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +- apiGroups: + - "" + resources: + - persistentvolumes + - persistentvolumeclaims + verbs: + - '*' +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - delete + - get +- apiGroups: + - "" + resources: + - pods + - pods/exec + - pods/log + - services + verbs: + - '*' +- apiGroups: + - "" + - apps + - extensions + resources: + - deployments + - replicasets + verbs: + - '*' +- apiGroups: + - kubeflow.org + resources: + - '*' + verbs: + - '*' +- apiGroups: + - batch + resources: + - jobs + verbs: + - '*' +- apiGroups: + - machinelearning.seldon.io + resources: + - seldondeployments + verbs: + - '*' +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/driver/rolebinding.yaml b/config/v2/driver/rolebinding.yaml new file mode 100644 index 000000000..9819d3b97 --- /dev/null +++ b/config/v2/driver/rolebinding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + name: kfp-driver-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfp-driver-role +subjects: +- kind: ServiceAccount + name: kfp-driver + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/driver/service.yaml b/config/v2/driver/service.yaml new file mode 100644 index 000000000..2d4e2bbd5 --- /dev/null +++ b/config/v2/driver/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: kfp-driver + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfp-driver +spec: + ports: + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline diff --git a/config/v2/driver/serviceaccount.yaml b/config/v2/driver/serviceaccount.yaml new file mode 100644 index 000000000..76988053a --- /dev/null +++ b/config/v2/driver/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + namespace: datasciencepipelinesapplications-controller + name: kfp-driver diff --git a/config/v2/exithandler/clusterrole.leaderelection.yaml b/config/v2/exithandler/clusterrole.leaderelection.yaml new file mode 100644 index 000000000..2d68dd2df --- /dev/null +++ b/config/v2/exithandler/clusterrole.leaderelection.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-leader-election +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..a681c899a --- /dev/null +++ b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml @@ -0,0 +1,66 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-cluster-access-clusterrole +rules: +- apiGroups: + - tekton.dev + resources: + - runs + - customruns + - taskruns + - pipelineruns + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - tekton.dev + resources: + - runs/status + - customruns/status + - taskruns/status + - pipelineruns/status + - runs/finalizers + - customruns/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - custom.tekton.dev + resources: + - exithandlers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml b/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml new file mode 100644 index 000000000..3e5643fbe --- /dev/null +++ b/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-tenant-access-clusterrole +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..1f3b550ff --- /dev/null +++ b/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-exithandler-controller-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-exithandler-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml b/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..efeac890b --- /dev/null +++ b/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-exithandler-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-exithandler-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml new file mode 100644 index 000000000..05af4d4bd --- /dev/null +++ b/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-tenant-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-exithandler-controller-tenant-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-exithandler-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/deployment.yaml b/config/v2/exithandler/controller/deployment.yaml new file mode 100644 index 000000000..af843e38d --- /dev/null +++ b/config/v2/exithandler/controller/deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfp-exithandler-controller +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: kfp-exithandler-controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-exithandler-controller:2.0.0 + name: kfp-exithandler-controller + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfp-exithandler-controller diff --git a/config/v2/exithandler/controller/kustomization.yaml b/config/v2/exithandler/controller/kustomization.yaml new file mode 100644 index 000000000..865426037 --- /dev/null +++ b/config/v2/exithandler/controller/kustomization.yaml @@ -0,0 +1,10 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrole.tenantaccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- clusterrolebinding.tenantaccess.yaml +- deployment.yaml +- role.yaml +- rolebinding.yaml +- serviceaccount.yaml \ No newline at end of file diff --git a/config/v2/exithandler/controller/role.yaml b/config/v2/exithandler/controller/role.yaml new file mode 100644 index 000000000..67d9cc2cd --- /dev/null +++ b/config/v2/exithandler/controller/role.yaml @@ -0,0 +1,37 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-leader-election + - config-logging + - config-observability + - object-store-config + - cache-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/exithandler/controller/rolebinding.yaml b/config/v2/exithandler/controller/rolebinding.yaml new file mode 100644 index 000000000..47958e2ce --- /dev/null +++ b/config/v2/exithandler/controller/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfp-exithandler-controller-role +subjects: +- kind: ServiceAccount + name: kfp-exithandler-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/serviceaccount.yaml b/config/v2/exithandler/controller/serviceaccount.yaml new file mode 100644 index 000000000..bd82939e1 --- /dev/null +++ b/config/v2/exithandler/controller/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kfp-exithandler-controller diff --git a/config/v2/exithandler/crd.yaml b/config/v2/exithandler/crd.yaml new file mode 100644 index 000000000..da184975b --- /dev/null +++ b/config/v2/exithandler/crd.yaml @@ -0,0 +1,29 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + version: devel + name: exithandlers.custom.tekton.dev +spec: + group: custom.tekton.dev + names: + categories: + - tekton + - tekton-pipelines + - openshift-pipelines + kind: ExitHandler + plural: exithandlers + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} diff --git a/config/v2/exithandler/kustomization.yaml b/config/v2/exithandler/kustomization.yaml new file mode 100644 index 000000000..2fac3a648 --- /dev/null +++ b/config/v2/exithandler/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- crd.yaml +- clusterrole.leaderelection.yaml +- ./controller +- ./webhook \ No newline at end of file diff --git a/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml b/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..0e810e3e6 --- /dev/null +++ b/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml @@ -0,0 +1,78 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-webhook-cluster-access-clusterrole +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + - customresourcedefinitions/status + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - webhook.exithandler.custom.tekton.dev + resources: + - mutatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - validation.webhook.exithandler.custom.tekton.dev + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..ae1d4ca15 --- /dev/null +++ b/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-webhook-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-exithandler-webhook-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/webhook/deployment.yaml b/config/v2/exithandler/webhook/deployment.yaml new file mode 100644 index 000000000..5ed24e94b --- /dev/null +++ b/config/v2/exithandler/webhook/deployment.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfp-exithandler-webhook +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: WEBHOOK_SERVICE_NAME + value: kfp-exithandler-webhook + - name: WEBHOOK_SECRET_NAME + value: kfp-exithandler-webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-exithandler-webhook:2.0.0 + name: webhook + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + - containerPort: 8443 + name: https-webhook + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfp-exithandler-webhook diff --git a/config/v2/exithandler/webhook/kustomization.yaml b/config/v2/exithandler/webhook/kustomization.yaml new file mode 100644 index 000000000..1b432c759 --- /dev/null +++ b/config/v2/exithandler/webhook/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrolebinding.clusteraccess.yaml +- deployment.yaml +- mutatingwebhookconfig.yaml +- role.yaml +- rolebinding.yaml +- secret.yaml +- service.yaml +- serviceaccount.yaml +- validatingwebhookconfig.yaml \ No newline at end of file diff --git a/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml b/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml new file mode 100644 index 000000000..7d8679d1a --- /dev/null +++ b/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: webhook.exithandler.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: webhook.exithandler.custom.tekton.dev + sideEffects: None diff --git a/config/v2/exithandler/webhook/role.yaml b/config/v2/exithandler/webhook/role.yaml new file mode 100644 index 000000000..f7ef29288 --- /dev/null +++ b/config/v2/exithandler/webhook/role.yaml @@ -0,0 +1,53 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-webhook-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-logging + - config-observability + - config-leader-election + - object-store-config + - cache-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - kfp-exithandler-webhook-certs + resources: + - secrets + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/exithandler/webhook/rolebinding.yaml b/config/v2/exithandler/webhook/rolebinding.yaml new file mode 100644 index 000000000..757701663 --- /dev/null +++ b/config/v2/exithandler/webhook/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-webhook-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfp-exithandler-webhook-role +subjects: +- kind: ServiceAccount + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/webhook/secret.yaml b/config/v2/exithandler/webhook/secret.yaml new file mode 100644 index 000000000..ae60d20fa --- /dev/null +++ b/config/v2/exithandler/webhook/secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: kfp-exithandler-webhook-certs diff --git a/config/v2/exithandler/webhook/service.yaml b/config/v2/exithandler/webhook/service.yaml new file mode 100644 index 000000000..437bcefa7 --- /dev/null +++ b/config/v2/exithandler/webhook/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton diff --git a/config/v2/exithandler/webhook/serviceaccount.yaml b/config/v2/exithandler/webhook/serviceaccount.yaml new file mode 100644 index 000000000..a5048a80a --- /dev/null +++ b/config/v2/exithandler/webhook/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kfp-exithandler-webhook diff --git a/config/v2/exithandler/webhook/validatingwebhookconfig.yaml b/config/v2/exithandler/webhook/validatingwebhookconfig.yaml new file mode 100644 index 000000000..c34a0b903 --- /dev/null +++ b/config/v2/exithandler/webhook/validatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: validation.webhook.exithandler.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: validation.webhook.exithandler.custom.tekton.dev + sideEffects: None diff --git a/config/v2/kfptask/clusterrole.leaderelection.yaml b/config/v2/kfptask/clusterrole.leaderelection.yaml new file mode 100644 index 000000000..664e9c627 --- /dev/null +++ b/config/v2/kfptask/clusterrole.leaderelection.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-leader-election +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml b/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..0580fafa5 --- /dev/null +++ b/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml @@ -0,0 +1,66 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-cluster-access-clusterrole +rules: +- apiGroups: + - tekton.dev + resources: + - runs + - customruns + - taskruns + - pipelineruns + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - tekton.dev + resources: + - runs/status + - customruns/status + - taskruns/status + - pipelineruns/status + - runs/finalizers + - customruns/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - custom.tekton.dev + resources: + - kfptasks + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml b/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml new file mode 100644 index 000000000..11576abd2 --- /dev/null +++ b/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-tenant-access-clusterrole +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..88108183b --- /dev/null +++ b/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-controller-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml b/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..fa63b846e --- /dev/null +++ b/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml new file mode 100644 index 000000000..4123d161e --- /dev/null +++ b/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-tenant-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-controller-tenant-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/deployment.yaml b/config/v2/kfptask/controller/deployment.yaml new file mode 100644 index 000000000..e3c3eee33 --- /dev/null +++ b/config/v2/kfptask/controller/deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfptask-controller +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: kfptask-controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-kfptask-controller:2.0.0 + name: kfptask-controller + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfptask-controller diff --git a/config/v2/kfptask/controller/kustomization.yaml b/config/v2/kfptask/controller/kustomization.yaml new file mode 100644 index 000000000..54449bfd8 --- /dev/null +++ b/config/v2/kfptask/controller/kustomization.yaml @@ -0,0 +1,10 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrole.tenantaccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- clusterrolebinding.tenantaccess.yaml +- deployment.yaml +- role.yaml +- rolebinding.yaml +- serviceaccount.yaml diff --git a/config/v2/kfptask/controller/role.yaml b/config/v2/kfptask/controller/role.yaml new file mode 100644 index 000000000..e7d6964e3 --- /dev/null +++ b/config/v2/kfptask/controller/role.yaml @@ -0,0 +1,38 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-leader-election + - config-logging + - config-observability + - object-store-config + - cache-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/kfptask/controller/rolebinding.yaml b/config/v2/kfptask/controller/rolebinding.yaml new file mode 100644 index 000000000..1fc2d1047 --- /dev/null +++ b/config/v2/kfptask/controller/rolebinding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfptask-controller-role +subjects: +- kind: ServiceAccount + name: kfptask-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/serviceaccount.yaml b/config/v2/kfptask/controller/serviceaccount.yaml new file mode 100644 index 000000000..642dbef0b --- /dev/null +++ b/config/v2/kfptask/controller/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kfptask-controller diff --git a/config/v2/kfptask/crd.yaml b/config/v2/kfptask/crd.yaml new file mode 100644 index 000000000..a3ec9de60 --- /dev/null +++ b/config/v2/kfptask/crd.yaml @@ -0,0 +1,29 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + version: devel + name: kfptasks.custom.tekton.dev +spec: + group: custom.tekton.dev + names: + categories: + - tekton + - tekton-pipelines + - openshift-pipelines + kind: KfpTask + plural: kfptasks + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} diff --git a/config/v2/kfptask/kustomization.yaml b/config/v2/kfptask/kustomization.yaml new file mode 100644 index 000000000..2fac3a648 --- /dev/null +++ b/config/v2/kfptask/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- crd.yaml +- clusterrole.leaderelection.yaml +- ./controller +- ./webhook \ No newline at end of file diff --git a/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..c4e2177b6 --- /dev/null +++ b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml @@ -0,0 +1,78 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-cluster-access-clusterrole +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + - customresourcedefinitions/status + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - webhook.kfptask.custom.tekton.dev + resources: + - mutatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - validation.webhook.kfptask.custom.tekton.dev + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..bfd617465 --- /dev/null +++ b/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-webhook-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml b/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..56bfcd42f --- /dev/null +++ b/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/deployment.yaml b/config/v2/kfptask/webhook/deployment.yaml new file mode 100644 index 000000000..48bd44617 --- /dev/null +++ b/config/v2/kfptask/webhook/deployment.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfptask-webhook +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: WEBHOOK_SERVICE_NAME + value: kfptask-webhook + - name: WEBHOOK_SECRET_NAME + value: kfptask-webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-kfptask-webhook:2.0.0 + name: webhook + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + - containerPort: 8443 + name: https-webhook + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfptask-webhook diff --git a/config/v2/kfptask/webhook/kustomization.yaml b/config/v2/kfptask/webhook/kustomization.yaml new file mode 100644 index 000000000..6692ef450 --- /dev/null +++ b/config/v2/kfptask/webhook/kustomization.yaml @@ -0,0 +1,12 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- deployment.yaml +- mutatingwebhookconfig.yaml +- role.yaml +- rolebinding.yaml +- secret.yaml +- service.yaml +- serviceaccount.yaml +- validatingwebhookconfig.yaml diff --git a/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml b/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml new file mode 100644 index 000000000..8b494fcba --- /dev/null +++ b/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: webhook.kfptask.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: webhook.kfptask.custom.tekton.dev + sideEffects: None diff --git a/config/v2/kfptask/webhook/role.yaml b/config/v2/kfptask/webhook/role.yaml new file mode 100644 index 000000000..c81cfc18e --- /dev/null +++ b/config/v2/kfptask/webhook/role.yaml @@ -0,0 +1,53 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-logging + - config-observability + - config-leader-election + - object-store-config + - cache-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - kfptask-webhook-certs + resources: + - secrets + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/kfptask/webhook/rolebinding.yaml b/config/v2/kfptask/webhook/rolebinding.yaml new file mode 100644 index 000000000..e3d798921 --- /dev/null +++ b/config/v2/kfptask/webhook/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfptask-webhook-role +subjects: +- kind: ServiceAccount + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/secret.yaml b/config/v2/kfptask/webhook/secret.yaml new file mode 100644 index 000000000..6387033ce --- /dev/null +++ b/config/v2/kfptask/webhook/secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: kfptask-webhook-certs diff --git a/config/v2/kfptask/webhook/service.yaml b/config/v2/kfptask/webhook/service.yaml new file mode 100644 index 000000000..7f1b02e34 --- /dev/null +++ b/config/v2/kfptask/webhook/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton diff --git a/config/v2/kfptask/webhook/serviceaccount.yaml b/config/v2/kfptask/webhook/serviceaccount.yaml new file mode 100644 index 000000000..27ffcfd45 --- /dev/null +++ b/config/v2/kfptask/webhook/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kfptask-webhook diff --git a/config/v2/kfptask/webhook/validatingwebhookconfig.yaml b/config/v2/kfptask/webhook/validatingwebhookconfig.yaml new file mode 100644 index 000000000..a50ee1e25 --- /dev/null +++ b/config/v2/kfptask/webhook/validatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: validation.webhook.kfptask.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: validation.webhook.kfptask.custom.tekton.dev + sideEffects: None diff --git a/config/v2/kustomization.yaml b/config/v2/kustomization.yaml new file mode 100644 index 000000000..227f61350 --- /dev/null +++ b/config/v2/kustomization.yaml @@ -0,0 +1,21 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: foo-pipelines +namePrefix: data-science-pipelines-operator- + +# namePrefix: data-science-pipelines-operator- +# configMapGenerator: +# - envs: +# - params.env +# name: dspo-parameters + +resources: +- ./cache +- ./driver +- ./exithandler +- ./kfptask +- ./pipelineloop +- ./tektoncrds + +# configurations: +# - params.yaml diff --git a/config/v2/params.env b/config/v2/params.env new file mode 100644 index 000000000..3f02c0849 --- /dev/null +++ b/config/v2/params.env @@ -0,0 +1,14 @@ +IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server:latest +IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:latest +IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent:latest +IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow:latest +IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy:latest +IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc:latest +IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer:latest +IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator:latest +IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.8 +IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.8 +IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 +IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 +IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 +IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 diff --git a/config/v2/params.yaml b/config/v2/params.yaml new file mode 100644 index 000000000..28beccc8c --- /dev/null +++ b/config/v2/params.yaml @@ -0,0 +1,7 @@ +varReference: +- path: data + kind: ConfigMap +- path: spec/template/spec/containers/env/value + kind: Deployment +- path: spec/template/spec/containers/image + kind: Deployment diff --git a/config/v2/pipelineloop/clusterrole.leaderelection.yaml b/config/v2/pipelineloop/clusterrole.leaderelection.yaml new file mode 100644 index 000000000..f5ba9f9e9 --- /dev/null +++ b/config/v2/pipelineloop/clusterrole.leaderelection.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-leader-election +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml b/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..d53b3bd25 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml @@ -0,0 +1,66 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-cluster-access-clusterrole +rules: +- apiGroups: + - tekton.dev + resources: + - runs + - customruns + - taskruns + - pipelineruns + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - tekton.dev + resources: + - runs/status + - customruns/status + - taskruns/status + - pipelineruns/status + - runs/finalizers + - customruns/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - custom.tekton.dev + resources: + - pipelineloops + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml b/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml new file mode 100644 index 000000000..f4c3d6c61 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-tenant-access-clusterrole +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..b8ff40533 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-controller-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..b4dced872 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml new file mode 100644 index 000000000..0aa6b29b6 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-tenant-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-controller-tenant-access-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/deployment.yaml b/config/v2/pipelineloop/controller/deployment.yaml new file mode 100644 index 000000000..933bbe0b6 --- /dev/null +++ b/config/v2/pipelineloop/controller/deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: tekton-pipelineloop-controller +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: tekton-pipeline-loops + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: tekton-pipelineloop-controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-pipelineloop-controller:2.0.0 + name: tekton-pipelineloop-controller + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: tekton-pipelineloop-controller diff --git a/config/v2/pipelineloop/controller/kustomization.yaml b/config/v2/pipelineloop/controller/kustomization.yaml new file mode 100644 index 000000000..0824469d5 --- /dev/null +++ b/config/v2/pipelineloop/controller/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrole.tenantaccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- clusterrolebinding.tenantaccess.yaml +- deployment.yaml +- role.yaml +- rolebinding.yaml +- serviceaccount.yaml + diff --git a/config/v2/pipelineloop/controller/role.yaml b/config/v2/pipelineloop/controller/role.yaml new file mode 100644 index 000000000..cb3c3e9a5 --- /dev/null +++ b/config/v2/pipelineloop/controller/role.yaml @@ -0,0 +1,36 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-leader-election + - config-logging + - config-observability + - object-store-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/pipelineloop/controller/rolebinding.yaml b/config/v2/pipelineloop/controller/rolebinding.yaml new file mode 100644 index 000000000..4e26f6d24 --- /dev/null +++ b/config/v2/pipelineloop/controller/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: tekton-pipelineloop-controller-role +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/serviceaccount.yaml b/config/v2/pipelineloop/controller/serviceaccount.yaml new file mode 100644 index 000000000..5b1bafc59 --- /dev/null +++ b/config/v2/pipelineloop/controller/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/name: data-science-pipelines-operator + namespace: datasciencepipelinesapplications-controller + name: tekton-pipelineloop-controller diff --git a/config/v2/pipelineloop/crd.yaml b/config/v2/pipelineloop/crd.yaml new file mode 100644 index 000000000..860c6a7b7 --- /dev/null +++ b/config/v2/pipelineloop/crd.yaml @@ -0,0 +1,29 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + pipeline.tekton.dev/release: devel + version: devel + name: pipelineloops.custom.tekton.dev +spec: + group: custom.tekton.dev + names: + categories: + - tekton + - tekton-pipelines + - openshift-pipelines + kind: PipelineLoop + plural: pipelineloops + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} diff --git a/config/v2/pipelineloop/kustomization.yaml b/config/v2/pipelineloop/kustomization.yaml new file mode 100644 index 000000000..2fac3a648 --- /dev/null +++ b/config/v2/pipelineloop/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- crd.yaml +- clusterrole.leaderelection.yaml +- ./controller +- ./webhook \ No newline at end of file diff --git a/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml b/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..f3b5f165d --- /dev/null +++ b/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml @@ -0,0 +1,78 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-cluster-access-clusterrole +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + - customresourcedefinitions/status + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - webhook.pipelineloop.custom.tekton.dev + resources: + - mutatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - validation.webhook.pipelineloop.custom.tekton.dev + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..9bab6fc0f --- /dev/null +++ b/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-webhook-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml b/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..72ccc7792 --- /dev/null +++ b/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/deployment.yaml b/config/v2/pipelineloop/webhook/deployment.yaml new file mode 100644 index 000000000..cc61bbcca --- /dev/null +++ b/config/v2/pipelineloop/webhook/deployment.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: tekton-pipelineloop-webhook +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: WEBHOOK_SERVICE_NAME + value: tekton-pipelineloop-webhook + - name: WEBHOOK_SECRET_NAME + value: tekton-pipelineloop-webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-pipelineloop-webhook:2.0.0 + name: webhook + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + - containerPort: 8443 + name: https-webhook + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: tekton-pipelineloop-webhook diff --git a/config/v2/pipelineloop/webhook/kustomization.yaml b/config/v2/pipelineloop/webhook/kustomization.yaml new file mode 100644 index 000000000..519a9540f --- /dev/null +++ b/config/v2/pipelineloop/webhook/kustomization.yaml @@ -0,0 +1,12 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- deployment.yaml +- mutatingwebhookconfig.yaml +- role.yaml +- rolebinding.yaml +- service.yaml +- serviceaccount.yaml +- validatingwebhookconfig.yaml + diff --git a/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml b/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml new file mode 100644 index 000000000..bb8faf3d3 --- /dev/null +++ b/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + pipeline.tekton.dev/release: devel + name: webhook.pipelineloop.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: webhook.pipelineloop.custom.tekton.dev + sideEffects: None diff --git a/config/v2/pipelineloop/webhook/role.yaml b/config/v2/pipelineloop/webhook/role.yaml new file mode 100644 index 000000000..7c0cad973 --- /dev/null +++ b/config/v2/pipelineloop/webhook/role.yaml @@ -0,0 +1,52 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-logging + - config-observability + - config-leader-election + - object-store-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - tekton-pipelineloop-webhook-certs + resources: + - secrets + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/pipelineloop/webhook/rolebinding.yaml b/config/v2/pipelineloop/webhook/rolebinding.yaml new file mode 100644 index 000000000..f47f37f90 --- /dev/null +++ b/config/v2/pipelineloop/webhook/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: tekton-pipelineloop-webhook-role +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/service.yaml b/config/v2/pipelineloop/webhook/service.yaml new file mode 100644 index 000000000..b8f09a7b1 --- /dev/null +++ b/config/v2/pipelineloop/webhook/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops diff --git a/config/v2/pipelineloop/webhook/serviceaccount.yaml b/config/v2/pipelineloop/webhook/serviceaccount.yaml new file mode 100644 index 000000000..2bd0bdddf --- /dev/null +++ b/config/v2/pipelineloop/webhook/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/name: data-science-pipelines-operator + namespace: datasciencepipelinesapplications-controller + name: tekton-pipelineloop-webhook diff --git a/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml b/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml new file mode 100644 index 000000000..063ee7056 --- /dev/null +++ b/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + pipeline.tekton.dev/release: devel + name: validation.webhook.pipelineloop.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: validation.webhook.pipelineloop.custom.tekton.dev + sideEffects: None diff --git a/config/v2/tektoncrds/crd.yaml b/config/v2/tektoncrds/crd.yaml new file mode 100644 index 000000000..155c675a3 --- /dev/null +++ b/config/v2/tektoncrds/crd.yaml @@ -0,0 +1,28 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + pipeline.tekton.dev/release: devel + version: devel + name: breaktasks.custom.tekton.dev +spec: + group: custom.tekton.dev + names: + categories: + - tekton + - tekton-pipelines + kind: BreakTask + plural: breaktasks + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} diff --git a/config/v2/tektoncrds/kustomization.yaml b/config/v2/tektoncrds/kustomization.yaml new file mode 100644 index 000000000..2a8fa4333 --- /dev/null +++ b/config/v2/tektoncrds/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- crd.yaml +- scc.anyuid.yaml +- scc.privileged.yaml diff --git a/config/v2/tektoncrds/scc.anyuid.yaml b/config/v2/tektoncrds/scc.anyuid.yaml new file mode 100644 index 000000000..a25c7e939 --- /dev/null +++ b/config/v2/tektoncrds/scc.anyuid.yaml @@ -0,0 +1,61 @@ +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: null +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: null +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: kubeflow-anyuid provides all features of the restricted + SCC but allows users to run with any UID and any GID. + name: kubeflow-anyuid-kfp-tekton +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +- system:serviceaccount:kubeflow:metadatadb +- system:serviceaccount:kubeflow:minio +- system:serviceaccount:kubeflow:default +- system:serviceaccount:kubeflow:pipeline-runner +- system:serviceaccount:kubeflow:kubeflow-pipelines-cache +- system:serviceaccount:kubeflow:kubeflow-pipelines-cache-deployer-sa +- system:serviceaccount:kubeflow:metadata-grpc-server +- system:serviceaccount:kubeflow:kubeflow-pipelines-metadata-writer +- system:serviceaccount:kubeflow:ml-pipeline +- system:serviceaccount:kubeflow:ml-pipeline-persistenceagent +- system:serviceaccount:kubeflow:ml-pipeline-scheduledworkflow +- system:serviceaccount:kubeflow:ml-pipeline-ui +- system:serviceaccount:kubeflow:ml-pipeline-viewer-crd-service-account +- system:serviceaccount:kubeflow:ml-pipeline-visualizationserver +- system:serviceaccount:kubeflow:mysql +- system:serviceaccount:kubeflow:kfp-csi-s3 +- system:serviceaccount:kubeflow:kfp-csi-attacher +- system:serviceaccount:kubeflow:kfp-csi-provisioner +- system:serviceaccount:openshift-pipelines:kfp-driver +- system:serviceaccount:openshift-pipelines:kfp-exithandler-controller +- system:serviceaccount:openshift-pipelines:kfp-exithandler-webhook +- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-controller +- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-webhook +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret diff --git a/config/v2/tektoncrds/scc.privileged.yaml b/config/v2/tektoncrds/scc.privileged.yaml new file mode 100644 index 000000000..eafc24ea2 --- /dev/null +++ b/config/v2/tektoncrds/scc.privileged.yaml @@ -0,0 +1,62 @@ +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: null +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: null +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: kubeflow-anyuid provides all features of the restricted + SCC but allows users to run with any UID and any GID. + name: kubeflow-privileged-kfp-tekton +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +- system:serviceaccount:kubeflow:metadatadb +- system:serviceaccount:kubeflow:minio +- system:serviceaccount:kubeflow:default +- system:serviceaccount:kubeflow:pipeline-runner +- system:serviceaccount:kubeflow:kubeflow-pipelines-cache +- system:serviceaccount:kubeflow:kubeflow-pipelines-cache-deployer-sa +- system:serviceaccount:kubeflow:metadata-grpc-server +- system:serviceaccount:kubeflow:kubeflow-pipelines-metadata-writer +- system:serviceaccount:kubeflow:ml-pipeline +- system:serviceaccount:kubeflow:ml-pipeline-persistenceagent +- system:serviceaccount:kubeflow:ml-pipeline-scheduledworkflow +- system:serviceaccount:kubeflow:ml-pipeline-ui +- system:serviceaccount:kubeflow:ml-pipeline-viewer-crd-service-account +- system:serviceaccount:kubeflow:ml-pipeline-visualizationserver +- system:serviceaccount:kubeflow:mysql +- system:serviceaccount:kubeflow:kfp-csi-s3 +- system:serviceaccount:kubeflow:kfp-csi-attacher +- system:serviceaccount:kubeflow:kfp-csi-provisioner +- system:serviceaccount:openshift-pipelines:kfp-driver +- system:serviceaccount:openshift-pipelines:kfp-exithandler-controller +- system:serviceaccount:openshift-pipelines:kfp-exithandler-webhook +- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-controller +- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-webhook +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +- hostPath From be4814a13a3f1977001a577c14c33ff2a3dba450 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 26 Sep 2023 03:42:18 -0400 Subject: [PATCH 13/85] WIP: Implement v2deploy make command --- Makefile | 8 ++++++++ config/overlays/make-v2deploy/kustomization.yaml | 5 +++++ 2 files changed, 13 insertions(+) create mode 100644 config/overlays/make-v2deploy/kustomization.yaml diff --git a/Makefile b/Makefile index 7ab88e250..bb57ab03c 100644 --- a/Makefile +++ b/Makefile @@ -52,6 +52,8 @@ IMG ?= quay.io/opendatahub/data-science-pipelines-operator:main ENVTEST_K8S_VERSION = 1.25.0 # Namespace to deploy the operator OPERATOR_NS ?= odh-applications +# Namespace to deploy v2 infrastructure +V2INFRA_NS ?= openshift-pipelines # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -159,6 +161,12 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi cd config/overlays/make-deploy && $(KUSTOMIZE) edit set namespace ${OPERATOR_NS} $(KUSTOMIZE) build config/overlays/make-deploy | kubectl delete --ignore-not-found=$(ignore-not-found) -f - +.PHONY: v2deploy +v2deploy: manifests kustomize + cd config/overlays/make-v2deploy \ + && $(KUSTOMIZE) edit set namespace ${V2INFRA_NS} + $(KUSTOMIZE) build config/overlays/make-v2deploy | kubectl apply -f - + ##@ Build Dependencies ## Location to install dependencies to diff --git a/config/overlays/make-v2deploy/kustomization.yaml b/config/overlays/make-v2deploy/kustomization.yaml new file mode 100644 index 000000000..6d2e0a3ca --- /dev/null +++ b/config/overlays/make-v2deploy/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: openshift-pipelines +resources: +- ../../v2 From 94adfa6005eac41d6f12c3b10113df0358452f4e Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 27 Sep 2023 18:28:14 -0400 Subject: [PATCH 14/85] Implement DSPVersion item in DSPA CRD --- api/v1alpha1/dspipeline_types.go | 3 + config/base/kustomization.yaml | 63 +++++++++++++++++++ config/base/params.env | 9 +++ config/configmaps/files/config.yaml | 12 +++- ...b.io_datasciencepipelinesapplications.yaml | 3 + config/manager/manager.yaml | 18 ++++++ controllers/config/defaults.go | 16 ++++- controllers/dspipeline_params.go | 56 +++++++++++++---- kfdef/kfdef.yaml | 19 ++++++ 9 files changed, 185 insertions(+), 14 deletions(-) diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index f97629d53..fbb266877 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -44,6 +44,9 @@ type DSPASpec struct { // +kubebuilder:validation:Optional // +kubebuilder:default:={deploy: false} *VisualizationServer `json:"visualizationServer"` + // +kubebuilder:validation:Optional + // +kubebuilder:default:="v1" + DSPVersion string `json:"dspVersion,omitempty"` } type APIServer struct { diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index 5e22aa45d..bfb0fbb0e 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -113,5 +113,68 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGES_DSPO + - name: IMAGESV2_APISERVER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_APISERVER + - name: IMAGESV2_ARTIFACT + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_ARTIFACT + - name: IMAGESV2_PERSISTENTAGENT + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_PERSISTENTAGENT + - name: IMAGESV2_SCHEDULEDWORKFLOW + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_SCHEDULEDWORKFLOW + - name: IMAGESV2_CACHE + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_CACHE + - name: IMAGESV2_MOVERESULTSIMAGE + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_MOVERESULTSIMAGE + - name: IMAGESV2_MLMDENVOY + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_MLMDENVOY + - name: IMAGESV2_MLMDGRPC + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_MLMDGRPC + - name: IMAGESV2_MLMDWRITER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_MLMDWRITER configurations: - params.yaml diff --git a/config/base/params.env b/config/base/params.env index 3f02c0849..ec8443819 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -12,3 +12,12 @@ IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 +IMAGESV2_APISERVER=quay.io/rmartine/apiserver:v2 +IMAGESV2_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:main +IMAGESV2_PERSISTENTAGENT=quay.io/rmartine/persistenceagent-dev:6b8723529 +IMAGESV2_SCHEDULEDWORKFLOW=quay.io/rmartine/swf-dev:6b8723529 +IMAGESV2_MLMDENVOY=gcr.io/ml-pipeline/metadata-envoy:2.0.0-rc.2 +IMAGESV2_MLMDGRPC=gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 +IMAGESV2_MLMDWRITER=gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 +IMAGESV2_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.7 +IMAGESV2_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.7 diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index 41319834a..f246d7dc6 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -11,4 +11,14 @@ Images: MlmdGRPC: $(IMAGES_MLMDGRPC) MlmdWriter: $(IMAGES_MLMDWRITER) CRDViewer: $(IMAGES_CRDVIEWER) - VisualizationServer: $(IMAGES_VISUALIZATIONSERVER) \ No newline at end of file + VisualizationServer: $(IMAGES_VISUALIZATIONSERVER) +ImagesV2: + ApiServer: $(IMAGESV2_APISERVER) + Artifact: $(IMAGESV2_ARTIFACT) + Cache: $(IMAGESV2_CACHE) + MoveResultsImage: $(IMAGESV2_MOVERESULTSIMAGE) + PersistentAgent: $(IMAGESV2_PERSISTENTAGENT) + ScheduledWorkflow: $(IMAGESV2_SCHEDULEDWORKFLOW) + MlmdEnvoy: $(IMAGESV2_MLMDENVOY) + MlmdGRPC: $(IMAGESV2_MLMDGRPC) + MlmdWriter: $(IMAGESV2_MLMDWRITER) \ No newline at end of file diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index a9fa22b28..0326280fa 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -254,6 +254,9 @@ spec: type: string type: object type: object + dspVersion: + default: v1 + type: string mlmd: default: deploy: false diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index f4833e587..bce166797 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -60,6 +60,24 @@ spec: value: $(IMAGES_CRDVIEWER) - name: IMAGES_VISUALIZATIONSERVER value: $(IMAGES_VISUALIZATIONSERVER) + - name: IMAGESV2_APISERVER + value: $(IMAGESV2_APISERVER) + - name: IMAGESV2_ARTIFACT + value: $(IMAGESV2_ARTIFACT) + - name: IMAGESV2_PERSISTENTAGENT + value: $(IMAGESV2_PERSISTENTAGENT) + - name: IMAGESV2_SCHEDULEDWORKFLOW + value: $(IMAGESV2_SCHEDULEDWORKFLOW) + - name: IMAGESV2_CACHE + value: $(IMAGESV2_CACHE) + - name: IMAGESV2_MOVERESULTSIMAGE + value: $(IMAGESV2_MOVERESULTSIMAGE) + - name: IMAGESV2_MLMDENVOY + value: $(IMAGESV2_MLMDENVOY) + - name: IMAGESV2_MLMDGRPC + value: $(IMAGESV2_MLMDGRPC) + - name: IMAGESV2_MLMDWRITER + value: $(IMAGESV2_MLMDWRITER) securityContext: allowPrivilegeEscalation: false capabilities: diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index 4abfdb23a..d909c160b 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -17,10 +17,11 @@ limitations under the License. package config import ( + "time" + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/spf13/viper" "k8s.io/apimachinery/pkg/api/resource" - "time" ) const ( @@ -68,6 +69,19 @@ const ( MlmdWriterImagePath = "Images.MlmdWriter" ) +// DSPV2 Image Paths +const ( + APIServerImagePathV2 = "ImagesV2.ApiServer" + APIServerArtifactImagePathV2 = "ImagesV2.Artifact" + APIServerCacheImagePathV2 = "ImagesV2.Cache" + APIServerMoveResultsImagePathV2 = "ImagesV2.MoveResultsImage" + PersistenceAgentImagePathV2 = "ImagesV2.PersistentAgent" + ScheduledWorkflowImagePathV2 = "ImagesV2.ScheduledWorkflow" + MlmdEnvoyImagePathV2 = "ImagesV2.MlmdEnvoy" + MlmdGRPCImagePathV2 = "ImagesV2.MlmdGRPC" + MlmdWriterImagePathV2 = "ImagesV2.MlmdWriter" +) + // DSPA Status Condition Types const ( DatabaseAvailable = "DatabaseAvailable" diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index d19582253..e6be6229a 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -75,6 +75,10 @@ type ObjectStorageConnection struct { SecretAccessKey string } +func (p *DSPAParams) UsingV2Pipelines(dsp *dspa.DataSciencePipelinesApplication) bool { + return dsp.Spec.DSPVersion == "v2" +} + // UsingExternalDB will return true if an external Database is specified in the CR, otherwise false. func (p *DSPAParams) UsingExternalDB(dsp *dspa.DataSciencePipelinesApplication) bool { if dsp.Spec.Database != nil && dsp.Spec.Database.ExternalDB != nil { @@ -379,25 +383,33 @@ func (p *DSPAParams) SetupObjectParams(ctx context.Context, dsp *dspa.DataScienc func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelinesApplication, client client.Client, log logr.Logger) error { if p.MLMD != nil { + MlmdEnvoyImagePath := config.MlmdEnvoyImagePath + MlmdGRPCImagePath := config.MlmdGRPCImagePath + MlmdWriterImagePath := config.MlmdWriterImagePath + if p.UsingV2Pipelines(dsp) { + MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2 + MlmdGRPCImagePath = config.MlmdGRPCImagePathV2 + MlmdWriterImagePath = config.MlmdWriterImagePathV2 + } if p.MLMD.Envoy == nil { p.MLMD.Envoy = &dspa.Envoy{ - Image: config.GetStringConfigWithDefault(config.MlmdEnvoyImagePath, config.DefaultImageValue), + Image: config.GetStringConfigWithDefault(MlmdEnvoyImagePath, config.DefaultImageValue), } } if p.MLMD.GRPC == nil { p.MLMD.GRPC = &dspa.GRPC{ - Image: config.GetStringConfigWithDefault(config.MlmdGRPCImagePath, config.DefaultImageValue), + Image: config.GetStringConfigWithDefault(MlmdGRPCImagePath, config.DefaultImageValue), } } if p.MLMD.Writer == nil { p.MLMD.Writer = &dspa.Writer{ - Image: config.GetStringConfigWithDefault(config.MlmdWriterImagePath, config.DefaultImageValue), + Image: config.GetStringConfigWithDefault(MlmdWriterImagePath, config.DefaultImageValue), } } - mlmdEnvoyImageFromConfig := config.GetStringConfigWithDefault(config.MlmdEnvoyImagePath, config.DefaultImageValue) - mlmdGRPCImageFromConfig := config.GetStringConfigWithDefault(config.MlmdGRPCImagePath, config.DefaultImageValue) - mlmdWriterImageFromConfig := config.GetStringConfigWithDefault(config.MlmdWriterImagePath, config.DefaultImageValue) + mlmdEnvoyImageFromConfig := config.GetStringConfigWithDefault(MlmdEnvoyImagePath, config.DefaultImageValue) + mlmdGRPCImageFromConfig := config.GetStringConfigWithDefault(MlmdGRPCImagePath, config.DefaultImageValue) + mlmdWriterImageFromConfig := config.GetStringConfigWithDefault(MlmdWriterImagePath, config.DefaultImageValue) setStringDefault(mlmdEnvoyImageFromConfig, &p.MLMD.Envoy.Image) setStringDefault(mlmdGRPCImageFromConfig, &p.MLMD.GRPC.Image) @@ -440,12 +452,24 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip // TODO: If p. is nil we should create defaults + pipelinesV2Images := p.UsingV2Pipelines(dsp) + if p.APIServer != nil { + APIServerImagePath := config.APIServerImagePath + APIServerArtifactImagePath := config.APIServerArtifactImagePath + APIServerCacheImagePath := config.APIServerCacheImagePath + APIServerMoveResultsImagePath := config.APIServerMoveResultsImagePath + if pipelinesV2Images { + APIServerImagePath = config.APIServerImagePathV2 + APIServerArtifactImagePath = config.APIServerArtifactImagePathV2 + APIServerCacheImagePath = config.APIServerCacheImagePathV2 + APIServerMoveResultsImagePath = config.APIServerMoveResultsImagePathV2 + } - serverImageFromConfig := config.GetStringConfigWithDefault(config.APIServerImagePath, config.DefaultImageValue) - artifactImageFromConfig := config.GetStringConfigWithDefault(config.APIServerArtifactImagePath, config.DefaultImageValue) - cacheImageFromConfig := config.GetStringConfigWithDefault(config.APIServerCacheImagePath, config.DefaultImageValue) - moveResultsImageFromConfig := config.GetStringConfigWithDefault(config.APIServerMoveResultsImagePath, config.DefaultImageValue) + serverImageFromConfig := config.GetStringConfigWithDefault(APIServerImagePath, config.DefaultImageValue) + artifactImageFromConfig := config.GetStringConfigWithDefault(APIServerArtifactImagePath, config.DefaultImageValue) + cacheImageFromConfig := config.GetStringConfigWithDefault(APIServerCacheImagePath, config.DefaultImageValue) + moveResultsImageFromConfig := config.GetStringConfigWithDefault(APIServerMoveResultsImagePath, config.DefaultImageValue) setStringDefault(serverImageFromConfig, &p.APIServer.Image) setStringDefault(artifactImageFromConfig, &p.APIServer.ArtifactImage) @@ -462,12 +486,20 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip } } if p.PersistenceAgent != nil { - persistenceAgentImageFromConfig := config.GetStringConfigWithDefault(config.PersistenceAgentImagePath, config.DefaultImageValue) + PersistenceAgentImagePath := config.PersistenceAgentImagePath + if pipelinesV2Images { + PersistenceAgentImagePath = config.PersistenceAgentImagePathV2 + } + persistenceAgentImageFromConfig := config.GetStringConfigWithDefault(PersistenceAgentImagePath, config.DefaultImageValue) setStringDefault(persistenceAgentImageFromConfig, &p.PersistenceAgent.Image) setResourcesDefault(config.PersistenceAgentResourceRequirements, &p.PersistenceAgent.Resources) } if p.ScheduledWorkflow != nil { - scheduledWorkflowImageFromConfig := config.GetStringConfigWithDefault(config.ScheduledWorkflowImagePath, config.DefaultImageValue) + ScheduledWorkflowImagePath := config.ScheduledWorkflowImagePath + if pipelinesV2Images { + ScheduledWorkflowImagePath = config.ScheduledWorkflowImagePathV2 + } + scheduledWorkflowImageFromConfig := config.GetStringConfigWithDefault(ScheduledWorkflowImagePath, config.DefaultImageValue) setStringDefault(scheduledWorkflowImageFromConfig, &p.ScheduledWorkflow.Image) setResourcesDefault(config.ScheduledWorkflowResourceRequirements, &p.ScheduledWorkflow.Resources) } diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index 4f1c2efe2..2778cdfc7 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -34,6 +34,25 @@ spec: value: gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 - name: IMAGES_VISUALIZATIONSERVER value: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 + - name: IMAGESV2_APISERVER + value: quay.io/rmartine/apiserver:v2 + - name: IMAGESV2_ARTIFACT + value: quay.io/opendatahub/ds-pipelines-artifact-manager:main + - name: IMAGESV2_PERSISTENTAGENT + value: quay.io/rmartine/persistenceagent-dev:6b8723529 + - name: IMAGESV2_SCHEDULEDWORKFLOW + value: quay.io/rmartine/swf-dev:6b8723529 + - name: IMAGESV2_CACHE + value: registry.access.redhat.com/ubi8/ubi-minimal:8.7 + - name: IMAGESV2_MOVERESULTSIMAGE + value: registry.access.redhat.com/ubi8/ubi-micro:8.7 + - name: IMAGESV2_MLMDENVOY + value: gcr.io/ml-pipeline/metadata-envoy:2.0.0-rc.2 + - name: IMAGESV2_MLMDGRPC + value: gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 + - name: IMAGESV2_MLMDWRITER + value: gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 + repoRef: name: manifests path: config From a23666c2a84bf75e02e22c492e84c93a56497891 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 27 Sep 2023 18:29:45 -0400 Subject: [PATCH 15/85] Remove unneeded v2 kustomization params --- config/v2/kustomization.yaml | 13 +++---------- config/v2/params.env | 14 -------------- config/v2/params.yaml | 7 ------- 3 files changed, 3 insertions(+), 31 deletions(-) delete mode 100644 config/v2/params.env delete mode 100644 config/v2/params.yaml diff --git a/config/v2/kustomization.yaml b/config/v2/kustomization.yaml index 227f61350..ffc6327ef 100644 --- a/config/v2/kustomization.yaml +++ b/config/v2/kustomization.yaml @@ -1,13 +1,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: foo-pipelines +namespace: openshift-pipelines namePrefix: data-science-pipelines-operator- -# namePrefix: data-science-pipelines-operator- -# configMapGenerator: -# - envs: -# - params.env -# name: dspo-parameters +namePrefix: data-science-pipelines-operator- resources: - ./cache @@ -15,7 +11,4 @@ resources: - ./exithandler - ./kfptask - ./pipelineloop -- ./tektoncrds - -# configurations: -# - params.yaml +- ./tektoncrds \ No newline at end of file diff --git a/config/v2/params.env b/config/v2/params.env deleted file mode 100644 index 3f02c0849..000000000 --- a/config/v2/params.env +++ /dev/null @@ -1,14 +0,0 @@ -IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server:latest -IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:latest -IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent:latest -IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow:latest -IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy:latest -IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc:latest -IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer:latest -IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator:latest -IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.8 -IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.8 -IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 -IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 -IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 -IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 diff --git a/config/v2/params.yaml b/config/v2/params.yaml deleted file mode 100644 index 28beccc8c..000000000 --- a/config/v2/params.yaml +++ /dev/null @@ -1,7 +0,0 @@ -varReference: -- path: data - kind: ConfigMap -- path: spec/template/spec/containers/env/value - kind: Deployment -- path: spec/template/spec/containers/image - kind: Deployment From 611b415b609ec6c0271dfc9a2c9e3e6edf1cdef5 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 15:01:30 -0400 Subject: [PATCH 16/85] Add ApplyAll function --- controllers/apiserver.go | 8 +++----- controllers/common.go | 10 ++++------ controllers/database.go | 9 ++++----- controllers/dspipeline_controller.go | 13 ++++++++++++- controllers/mlmd.go | 9 ++++----- controllers/mlpipeline_ui.go | 8 +++----- controllers/persistence_agent.go | 8 +++----- controllers/scheduled_workflow.go | 8 +++----- controllers/storage.go | 11 +++++------ controllers/util/util.go | 15 +++++++++++++++ 10 files changed, 56 insertions(+), 43 deletions(-) diff --git a/controllers/apiserver.go b/controllers/apiserver.go index 1971b0e67..14b1b2c6e 100644 --- a/controllers/apiserver.go +++ b/controllers/apiserver.go @@ -59,11 +59,9 @@ func (r *DSPAReconciler) ReconcileAPIServer(ctx context.Context, dsp *dspav1alph log.Info("Applying APIServer Resources") - for _, template := range apiServerTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, apiServerTemplates) + if err != nil { + return err } if dsp.Spec.APIServer.EnableRoute { diff --git a/controllers/common.go b/controllers/common.go index f3982e236..30249a819 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -30,14 +30,12 @@ func (r *DSPAReconciler) ReconcileCommon(dsp *dspav1alpha1.DataSciencePipelinesA log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) log.Info("Applying Common Resources") - for _, template := range commonTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, commonTemplates) + if err != nil { + return err } - err := r.ApplyWithoutOwner(params, commonCusterRolebindingTemplate) + err = r.ApplyWithoutOwner(params, commonCusterRolebindingTemplate) if err != nil { return err } diff --git a/controllers/database.go b/controllers/database.go index 33a083a53..205cec4ce 100644 --- a/controllers/database.go +++ b/controllers/database.go @@ -20,6 +20,7 @@ import ( "database/sql" b64 "encoding/base64" "fmt" + _ "github.com/go-sql-driver/mysql" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" @@ -111,11 +112,9 @@ func (r *DSPAReconciler) ReconcileDatabase(ctx context.Context, dsp *dspav1alpha } } else if deployMariaDB || deployDefaultDB { log.Info("Applying mariaDB resources.") - for _, template := range dbTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, dbTemplates) + if err != nil { + return err } // If no database was not specified, deploy mariaDB by default. // Update the CR with the state of mariaDB to accurately portray diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 5f87a8545..6328ddb70 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -19,9 +19,10 @@ package controllers import ( "context" "fmt" - "sigs.k8s.io/controller-runtime/pkg/controller" "time" + "sigs.k8s.io/controller-runtime/pkg/controller" + "github.com/go-logr/logr" mf "github.com/manifestival/manifestival" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" @@ -53,6 +54,16 @@ type DSPAReconciler struct { TemplatesPath string } +func (r *DSPAReconciler) ApplyAll(owner mf.Owner, params *DSPAParams, templates []string, fns ...mf.Transformer) error { + for _, template := range templates { + err := r.Apply(owner, params, template) + if err != nil { + return err + } + } + return nil +} + func (r *DSPAReconciler) Apply(owner mf.Owner, params *DSPAParams, template string, fns ...mf.Transformer) error { tmplManifest, err := config.Manifest(r.Client, r.TemplatesPath+template, params) if err != nil { diff --git a/controllers/mlmd.go b/controllers/mlmd.go index 1823f0131..0b63c4baf 100644 --- a/controllers/mlmd.go +++ b/controllers/mlmd.go @@ -40,12 +40,11 @@ func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApp if params.UsingMLMD(dsp) { log.Info("Applying ML-Metadata (MLMD) Resources") - for _, template := range mlmdTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, mlmdTemplates) + if err != nil { + return err } + log.Info("Finished applying MLMD Resources") } return nil diff --git a/controllers/mlpipeline_ui.go b/controllers/mlpipeline_ui.go index 7e6c6f425..c32fc3d28 100644 --- a/controllers/mlpipeline_ui.go +++ b/controllers/mlpipeline_ui.go @@ -42,11 +42,9 @@ func (r *DSPAReconciler) ReconcileUI(dsp *dspav1alpha1.DataSciencePipelinesAppli } log.Info("Applying MlPipelineUI Resources") - for _, template := range mlPipelineUITemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, mlPipelineUITemplates) + if err != nil { + return err } log.Info("Finished applying MlPipelineUI Resources") diff --git a/controllers/persistence_agent.go b/controllers/persistence_agent.go index ca6462cb3..31fb90873 100644 --- a/controllers/persistence_agent.go +++ b/controllers/persistence_agent.go @@ -39,11 +39,9 @@ func (r *DSPAReconciler) ReconcilePersistenceAgent(dsp *dspav1alpha1.DataScience log.Info("Applying PersistenceAgent Resources") - for _, template := range persistenceAgentTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, persistenceAgentTemplates) + if err != nil { + return err } log.Info("Finished applying PersistenceAgent Resources") diff --git a/controllers/scheduled_workflow.go b/controllers/scheduled_workflow.go index 300fd71b9..8f0f5e0ff 100644 --- a/controllers/scheduled_workflow.go +++ b/controllers/scheduled_workflow.go @@ -41,11 +41,9 @@ func (r *DSPAReconciler) ReconcileScheduledWorkflow(dsp *dspav1alpha1.DataScienc log.Info("Applying ScheduledWorkflow Resources") - for _, template := range scheduledWorkflowTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, scheduledWorkflowTemplates) + if err != nil { + return err } log.Info("Finished applying ScheduledWorkflow Resources") diff --git a/controllers/storage.go b/controllers/storage.go index 2d7454c9d..727012948 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -21,12 +21,13 @@ import ( "encoding/base64" "errors" "fmt" + "net/http" + "github.com/go-logr/logr" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" - "net/http" ) const storageSecret = "minio/secret.yaml.tmpl" @@ -161,11 +162,9 @@ func (r *DSPAReconciler) ReconcileStorage(ctx context.Context, dsp *dspav1alpha1 } } else if deployMinio { log.Info("Applying object storage resources.") - for _, template := range storageTemplates { - err := r.Apply(dsp, params, template) - if err != nil { - return err - } + err := r.ApplyAll(dsp, params, storageTemplates) + if err != nil { + return err } // If no storage was not specified, deploy minio by default. // Update the CR with the state of minio to accurately portray diff --git a/controllers/util/util.go b/controllers/util/util.go index 4e86338fc..ca3118f51 100644 --- a/controllers/util/util.go +++ b/controllers/util/util.go @@ -17,6 +17,8 @@ limitations under the License. package util import ( + "os" + appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -44,3 +46,16 @@ func GetDeploymentCondition(status appsv1.DeploymentStatus, condType appsv1.Depl func BoolPointer(b bool) *bool { return &b } + +func GetTemplatesInDir(templateDirectory string) ([]string, error) { + entries, err := os.ReadDir(templateDirectory) + if err != nil { + return nil, err + } + + var templates []string + for _, e := range entries { + templates = append(templates, e.Name()) + } + return templates, nil +} From 88004d173ffe8f65030ddf658549623258571d0b Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 15:44:03 -0400 Subject: [PATCH 17/85] Add ApplyDir Reconciler function, Dynamically retrieve templates --- controllers/apiserver.go | 17 ++--------------- controllers/dspipeline_controller.go | 8 ++++++++ controllers/mlmd.go | 15 ++------------- controllers/mlpipeline_ui.go | 13 ++----------- controllers/persistence_agent.go | 9 ++------- controllers/scheduled_workflow.go | 11 ++--------- controllers/util/util.go | 9 +++++---- 7 files changed, 23 insertions(+), 59 deletions(-) diff --git a/controllers/apiserver.go b/controllers/apiserver.go index 14b1b2c6e..816b637cb 100644 --- a/controllers/apiserver.go +++ b/controllers/apiserver.go @@ -24,19 +24,7 @@ import ( "k8s.io/apimachinery/pkg/types" ) -var apiServerTemplates = []string{ - "apiserver/artifact_script.yaml.tmpl", - "apiserver/role_ds-pipeline.yaml.tmpl", - "apiserver/role_pipeline-runner.yaml.tmpl", - "apiserver/role_ds-pipeline-user-access.yaml.tmpl", - "apiserver/rolebinding_ds-pipeline.yaml.tmpl", - "apiserver/rolebinding_pipeline-runner.yaml.tmpl", - "apiserver/sa_ds-pipeline.yaml.tmpl", - "apiserver/sa_pipeline-runner.yaml.tmpl", - "apiserver/service.yaml.tmpl", - "apiserver/deployment.yaml.tmpl", - "apiserver/monitor.yaml.tmpl", -} +var apiServerTemplatesDir = "apiserver" // serverRoute is a resource deployed conditionally // as such it is handled separately @@ -58,8 +46,7 @@ func (r *DSPAReconciler) ReconcileAPIServer(ctx context.Context, dsp *dspav1alph } log.Info("Applying APIServer Resources") - - err := r.ApplyAll(dsp, params, apiServerTemplates) + err := r.ApplyDir(dsp, params, apiServerTemplatesDir) if err != nil { return err } diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 6328ddb70..185c3b59c 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -54,6 +54,14 @@ type DSPAReconciler struct { TemplatesPath string } +func (r *DSPAReconciler) ApplyDir(owner mf.Owner, params *DSPAParams, directory string, fns ...mf.Transformer) error { + templates, err := util.GetTemplatesInDir(r.TemplatesPath, directory) + if err != nil { + return err + } + return r.ApplyAll(owner, params, templates) +} + func (r *DSPAReconciler) ApplyAll(owner mf.Owner, params *DSPAParams, templates []string, fns ...mf.Transformer) error { for _, template := range templates { err := r.Apply(owner, params, template) diff --git a/controllers/mlmd.go b/controllers/mlmd.go index 0b63c4baf..78db0983c 100644 --- a/controllers/mlmd.go +++ b/controllers/mlmd.go @@ -19,18 +19,7 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var mlmdTemplates = []string{ - "ml-metadata/metadata-envoy.configmap.yaml.tmpl", - "ml-metadata/metadata-envoy.deployment.yaml.tmpl", - "ml-metadata/metadata-envoy.service.yaml.tmpl", - "ml-metadata/metadata-grpc.deployment.yaml.tmpl", - "ml-metadata/metadata-grpc.service.yaml.tmpl", - "ml-metadata/metadata-grpc.serviceaccount.yaml.tmpl", - "ml-metadata/metadata-writer.deployment.yaml.tmpl", - "ml-metadata/metadata-writer.role.yaml.tmpl", - "ml-metadata/metadata-writer.rolebinding.yaml.tmpl", - "ml-metadata/metadata-writer.serviceaccount.yaml.tmpl", -} +var mlmdTemplatesDir = "ml-metadata" func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { @@ -40,7 +29,7 @@ func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApp if params.UsingMLMD(dsp) { log.Info("Applying ML-Metadata (MLMD) Resources") - err := r.ApplyAll(dsp, params, mlmdTemplates) + err := r.ApplyDir(dsp, params, mlmdTemplatesDir) if err != nil { return err } diff --git a/controllers/mlpipeline_ui.go b/controllers/mlpipeline_ui.go index c32fc3d28..16a87a9c2 100644 --- a/controllers/mlpipeline_ui.go +++ b/controllers/mlpipeline_ui.go @@ -20,16 +20,7 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var mlPipelineUITemplates = []string{ - "mlpipelines-ui/configmap.yaml.tmpl", - "mlpipelines-ui/deployment.yaml.tmpl", - "mlpipelines-ui/role.yaml.tmpl", - "mlpipelines-ui/rolebinding.yaml.tmpl", - "mlpipelines-ui/route.yaml.tmpl", - "mlpipelines-ui/sa-ds-pipeline-ui.yaml.tmpl", - "mlpipelines-ui/sa_ds-pipelines-viewer.yaml.tmpl", - "mlpipelines-ui/service.yaml.tmpl", -} +var mlPipelineUITemplatesDir = "mlpipelines-ui" func (r *DSPAReconciler) ReconcileUI(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { @@ -42,7 +33,7 @@ func (r *DSPAReconciler) ReconcileUI(dsp *dspav1alpha1.DataSciencePipelinesAppli } log.Info("Applying MlPipelineUI Resources") - err := r.ApplyAll(dsp, params, mlPipelineUITemplates) + err := r.ApplyDir(dsp, params, mlPipelineUITemplatesDir) if err != nil { return err } diff --git a/controllers/persistence_agent.go b/controllers/persistence_agent.go index 31fb90873..94f81b66f 100644 --- a/controllers/persistence_agent.go +++ b/controllers/persistence_agent.go @@ -20,12 +20,7 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var persistenceAgentTemplates = []string{ - "persistence-agent/deployment.yaml.tmpl", - "persistence-agent/sa.yaml.tmpl", - "persistence-agent/role.yaml.tmpl", - "persistence-agent/rolebinding.yaml.tmpl", -} +var persistenceAgentTemplatesDir = "persistence-agent" func (r *DSPAReconciler) ReconcilePersistenceAgent(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { @@ -39,7 +34,7 @@ func (r *DSPAReconciler) ReconcilePersistenceAgent(dsp *dspav1alpha1.DataScience log.Info("Applying PersistenceAgent Resources") - err := r.ApplyAll(dsp, params, persistenceAgentTemplates) + err := r.ApplyDir(dsp, params, persistenceAgentTemplatesDir) if err != nil { return err } diff --git a/controllers/scheduled_workflow.go b/controllers/scheduled_workflow.go index 8f0f5e0ff..68e62c1b5 100644 --- a/controllers/scheduled_workflow.go +++ b/controllers/scheduled_workflow.go @@ -20,14 +20,7 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var scheduledWorkflowTemplates = []string{ - "scheduled-workflow/deployment.yaml.tmpl", - "scheduled-workflow/role.yaml.tmpl", - "scheduled-workflow/rolebinding.yaml.tmpl", - "scheduled-workflow/sa.yaml.tmpl", - "scheduled-workflow/role.yaml.tmpl", - "scheduled-workflow/rolebinding.yaml.tmpl", -} +var scheduledWorkflowTemplatesDir = "scheduled-workflow" func (r *DSPAReconciler) ReconcileScheduledWorkflow(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { @@ -41,7 +34,7 @@ func (r *DSPAReconciler) ReconcileScheduledWorkflow(dsp *dspav1alpha1.DataScienc log.Info("Applying ScheduledWorkflow Resources") - err := r.ApplyAll(dsp, params, scheduledWorkflowTemplates) + err := r.ApplyDir(dsp, params, scheduledWorkflowTemplatesDir) if err != nil { return err } diff --git a/controllers/util/util.go b/controllers/util/util.go index ca3118f51..ed8388db0 100644 --- a/controllers/util/util.go +++ b/controllers/util/util.go @@ -18,6 +18,7 @@ package util import ( "os" + "path/filepath" appsv1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -47,15 +48,15 @@ func BoolPointer(b bool) *bool { return &b } -func GetTemplatesInDir(templateDirectory string) ([]string, error) { - entries, err := os.ReadDir(templateDirectory) +func GetTemplatesInDir(templatesDirectory, componentSubdirectory string) ([]string, error) { + files, err := os.ReadDir(templatesDirectory + componentSubdirectory) if err != nil { return nil, err } var templates []string - for _, e := range entries { - templates = append(templates, e.Name()) + for _, f := range files { + templates = append(templates, filepath.Join(componentSubdirectory, f.Name())) } return templates, nil } From 7b4c1cbf4368eac2ddd8f6e114d379f6458380b8 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 15:56:33 -0400 Subject: [PATCH 18/85] Handle subdirectories in GetTemplatesInDir util --- controllers/util/util.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/controllers/util/util.go b/controllers/util/util.go index ed8388db0..efbaa49ea 100644 --- a/controllers/util/util.go +++ b/controllers/util/util.go @@ -56,7 +56,9 @@ func GetTemplatesInDir(templatesDirectory, componentSubdirectory string) ([]stri var templates []string for _, f := range files { - templates = append(templates, filepath.Join(componentSubdirectory, f.Name())) + if !f.IsDir() { + templates = append(templates, filepath.Join(componentSubdirectory, f.Name())) + } } return templates, nil } From 2856b85ab4e2dc2ccd8a2f9d70137796875546f8 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 16:21:39 -0400 Subject: [PATCH 19/85] Restructure Common Templates directory --- .../mlmd-envoy-dashboard-access-policy.yaml.tmpl | 0 config/internal/common/{ => default}/policy.yaml.tmpl | 0 .../common/{ => no-owner}/clusterrolebinding.yaml.tmpl | 0 controllers/common.go | 9 +++------ 4 files changed, 3 insertions(+), 6 deletions(-) rename config/internal/common/{ => default}/mlmd-envoy-dashboard-access-policy.yaml.tmpl (100%) rename config/internal/common/{ => default}/policy.yaml.tmpl (100%) rename config/internal/common/{ => no-owner}/clusterrolebinding.yaml.tmpl (100%) diff --git a/config/internal/common/mlmd-envoy-dashboard-access-policy.yaml.tmpl b/config/internal/common/default/mlmd-envoy-dashboard-access-policy.yaml.tmpl similarity index 100% rename from config/internal/common/mlmd-envoy-dashboard-access-policy.yaml.tmpl rename to config/internal/common/default/mlmd-envoy-dashboard-access-policy.yaml.tmpl diff --git a/config/internal/common/policy.yaml.tmpl b/config/internal/common/default/policy.yaml.tmpl similarity index 100% rename from config/internal/common/policy.yaml.tmpl rename to config/internal/common/default/policy.yaml.tmpl diff --git a/config/internal/common/clusterrolebinding.yaml.tmpl b/config/internal/common/no-owner/clusterrolebinding.yaml.tmpl similarity index 100% rename from config/internal/common/clusterrolebinding.yaml.tmpl rename to config/internal/common/no-owner/clusterrolebinding.yaml.tmpl diff --git a/controllers/common.go b/controllers/common.go index 30249a819..b68787b6f 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -19,18 +19,15 @@ import ( dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" ) -var commonTemplates = []string{ - "common/policy.yaml.tmpl", - "common/mlmd-envoy-dashboard-access-policy.yaml.tmpl", -} +var commonTemplatesDir = "common/default" -const commonCusterRolebindingTemplate = "common/clusterrolebinding.yaml.tmpl" +const commonCusterRolebindingTemplate = "common/no-owner/clusterrolebinding.yaml.tmpl" func (r *DSPAReconciler) ReconcileCommon(dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) log.Info("Applying Common Resources") - err := r.ApplyAll(dsp, params, commonTemplates) + err := r.ApplyDir(dsp, params, commonTemplatesDir) if err != nil { return err } From eb86f6de6d2f27dedf25397b0f837ffcd3f40011 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 16:25:01 -0400 Subject: [PATCH 20/85] Restructure Database templates directory --- .../mariadb/{ => default}/deployment.yaml.tmpl | 0 .../mariadb/{ => default}/mariadb-sa.yaml.tmpl | 0 .../internal/mariadb/{ => default}/pvc.yaml.tmpl | 0 .../mariadb/{ => default}/service.yaml.tmpl | 0 .../{ => generated-secret}/secret.yaml.tmpl | 0 controllers/database.go | 14 ++++---------- 6 files changed, 4 insertions(+), 10 deletions(-) rename config/internal/mariadb/{ => default}/deployment.yaml.tmpl (100%) rename config/internal/mariadb/{ => default}/mariadb-sa.yaml.tmpl (100%) rename config/internal/mariadb/{ => default}/pvc.yaml.tmpl (100%) rename config/internal/mariadb/{ => default}/service.yaml.tmpl (100%) rename config/internal/mariadb/{ => generated-secret}/secret.yaml.tmpl (100%) diff --git a/config/internal/mariadb/deployment.yaml.tmpl b/config/internal/mariadb/default/deployment.yaml.tmpl similarity index 100% rename from config/internal/mariadb/deployment.yaml.tmpl rename to config/internal/mariadb/default/deployment.yaml.tmpl diff --git a/config/internal/mariadb/mariadb-sa.yaml.tmpl b/config/internal/mariadb/default/mariadb-sa.yaml.tmpl similarity index 100% rename from config/internal/mariadb/mariadb-sa.yaml.tmpl rename to config/internal/mariadb/default/mariadb-sa.yaml.tmpl diff --git a/config/internal/mariadb/pvc.yaml.tmpl b/config/internal/mariadb/default/pvc.yaml.tmpl similarity index 100% rename from config/internal/mariadb/pvc.yaml.tmpl rename to config/internal/mariadb/default/pvc.yaml.tmpl diff --git a/config/internal/mariadb/service.yaml.tmpl b/config/internal/mariadb/default/service.yaml.tmpl similarity index 100% rename from config/internal/mariadb/service.yaml.tmpl rename to config/internal/mariadb/default/service.yaml.tmpl diff --git a/config/internal/mariadb/secret.yaml.tmpl b/config/internal/mariadb/generated-secret/secret.yaml.tmpl similarity index 100% rename from config/internal/mariadb/secret.yaml.tmpl rename to config/internal/mariadb/generated-secret/secret.yaml.tmpl diff --git a/controllers/database.go b/controllers/database.go index 205cec4ce..91e55aa95 100644 --- a/controllers/database.go +++ b/controllers/database.go @@ -26,15 +26,9 @@ import ( "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" ) -const dbSecret = "mariadb/secret.yaml.tmpl" - -var dbTemplates = []string{ - "mariadb/deployment.yaml.tmpl", - "mariadb/pvc.yaml.tmpl", - "mariadb/service.yaml.tmpl", - "mariadb/mariadb-sa.yaml.tmpl", - dbSecret, -} +const dbSecret = "mariadb/generated-secret/secret.yaml.tmpl" + +var dbTemplatesDir = "mariadb/default" // extract to var for mocking in testing var ConnectAndQueryDatabase = func(host, port, username, password, dbname string) bool { @@ -112,7 +106,7 @@ func (r *DSPAReconciler) ReconcileDatabase(ctx context.Context, dsp *dspav1alpha } } else if deployMariaDB || deployDefaultDB { log.Info("Applying mariaDB resources.") - err := r.ApplyAll(dsp, params, dbTemplates) + err := r.ApplyDir(dsp, params, dbTemplatesDir) if err != nil { return err } From 45ddc2ca7328c61597001eaa48c9660c62b92c4d Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 16:27:52 -0400 Subject: [PATCH 21/85] Restructure Object Storage templates directory --- .../minio/{ => default}/deployment.yaml.tmpl | 0 .../minio/{ => default}/minio-sa.yaml.tmpl | 0 config/internal/minio/{ => default}/pvc.yaml.tmpl | 0 .../internal/minio/{ => default}/service.yaml.tmpl | 0 .../minio/{ => generated-secret}/secret.yaml.tmpl | 0 controllers/storage.go | 14 ++++---------- 6 files changed, 4 insertions(+), 10 deletions(-) rename config/internal/minio/{ => default}/deployment.yaml.tmpl (100%) rename config/internal/minio/{ => default}/minio-sa.yaml.tmpl (100%) rename config/internal/minio/{ => default}/pvc.yaml.tmpl (100%) rename config/internal/minio/{ => default}/service.yaml.tmpl (100%) rename config/internal/minio/{ => generated-secret}/secret.yaml.tmpl (100%) diff --git a/config/internal/minio/deployment.yaml.tmpl b/config/internal/minio/default/deployment.yaml.tmpl similarity index 100% rename from config/internal/minio/deployment.yaml.tmpl rename to config/internal/minio/default/deployment.yaml.tmpl diff --git a/config/internal/minio/minio-sa.yaml.tmpl b/config/internal/minio/default/minio-sa.yaml.tmpl similarity index 100% rename from config/internal/minio/minio-sa.yaml.tmpl rename to config/internal/minio/default/minio-sa.yaml.tmpl diff --git a/config/internal/minio/pvc.yaml.tmpl b/config/internal/minio/default/pvc.yaml.tmpl similarity index 100% rename from config/internal/minio/pvc.yaml.tmpl rename to config/internal/minio/default/pvc.yaml.tmpl diff --git a/config/internal/minio/service.yaml.tmpl b/config/internal/minio/default/service.yaml.tmpl similarity index 100% rename from config/internal/minio/service.yaml.tmpl rename to config/internal/minio/default/service.yaml.tmpl diff --git a/config/internal/minio/secret.yaml.tmpl b/config/internal/minio/generated-secret/secret.yaml.tmpl similarity index 100% rename from config/internal/minio/secret.yaml.tmpl rename to config/internal/minio/generated-secret/secret.yaml.tmpl diff --git a/controllers/storage.go b/controllers/storage.go index 727012948..c3e133ba4 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -30,15 +30,9 @@ import ( "github.com/opendatahub-io/data-science-pipelines-operator/controllers/config" ) -const storageSecret = "minio/secret.yaml.tmpl" - -var storageTemplates = []string{ - "minio/deployment.yaml.tmpl", - "minio/pvc.yaml.tmpl", - "minio/service.yaml.tmpl", - "minio/minio-sa.yaml.tmpl", - storageSecret, -} +const storageSecret = "minio/generated-secret/secret.yaml.tmpl" + +var storageTemplatesDir = "minio/default" func joinHostPort(host, port string) (string, error) { if host == "" { @@ -162,7 +156,7 @@ func (r *DSPAReconciler) ReconcileStorage(ctx context.Context, dsp *dspav1alpha1 } } else if deployMinio { log.Info("Applying object storage resources.") - err := r.ApplyAll(dsp, params, storageTemplates) + err := r.ApplyDir(dsp, params, storageTemplatesDir) if err != nil { return err } From 3f5e09f635e3e00400585685bad70d1a187f8260 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 14 Sep 2023 16:38:50 -0400 Subject: [PATCH 22/85] Restructure components manifest directories with conditions - Some components have conditions (only deploy if XYZ option set), so seperate these specific manifests from rest of the 'default ones for that component --- .../apiserver/{ => default}/artifact_script.yaml.tmpl | 0 .../internal/apiserver/{ => default}/deployment.yaml.tmpl | 2 ++ config/internal/apiserver/{ => default}/monitor.yaml.tmpl | 0 .../{ => default}/role_ds-pipeline-user-access.yaml.tmpl | 0 .../apiserver/{ => default}/role_ds-pipeline.yaml.tmpl | 0 .../{ => default}/role_pipeline-runner.yaml.tmpl | 0 .../{ => default}/rolebinding_ds-pipeline.yaml.tmpl | 0 .../{ => default}/rolebinding_pipeline-runner.yaml.tmpl | 0 .../apiserver/{ => default}/sa_ds-pipeline.yaml.tmpl | 0 .../apiserver/{ => default}/sa_pipeline-runner.yaml.tmpl | 0 config/internal/apiserver/{ => default}/service.yaml.tmpl | 0 config/internal/apiserver/{ => route}/route.yaml.tmpl | 0 .../{ => sample-pipeline}/sample-config.yaml.tmpl | 0 .../{ => sample-pipeline}/sample-pipeline.yaml.tmpl | 0 config/internal/scheduled-workflow/deployment.yaml.tmpl | 6 ++++++ controllers/apiserver.go | 8 ++++---- 16 files changed, 12 insertions(+), 4 deletions(-) rename config/internal/apiserver/{ => default}/artifact_script.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/deployment.yaml.tmpl (99%) rename config/internal/apiserver/{ => default}/monitor.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/role_ds-pipeline-user-access.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/role_ds-pipeline.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/role_pipeline-runner.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/rolebinding_ds-pipeline.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/rolebinding_pipeline-runner.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/sa_ds-pipeline.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/sa_pipeline-runner.yaml.tmpl (100%) rename config/internal/apiserver/{ => default}/service.yaml.tmpl (100%) rename config/internal/apiserver/{ => route}/route.yaml.tmpl (100%) rename config/internal/apiserver/{ => sample-pipeline}/sample-config.yaml.tmpl (100%) rename config/internal/apiserver/{ => sample-pipeline}/sample-pipeline.yaml.tmpl (100%) diff --git a/config/internal/apiserver/artifact_script.yaml.tmpl b/config/internal/apiserver/default/artifact_script.yaml.tmpl similarity index 100% rename from config/internal/apiserver/artifact_script.yaml.tmpl rename to config/internal/apiserver/default/artifact_script.yaml.tmpl diff --git a/config/internal/apiserver/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl similarity index 99% rename from config/internal/apiserver/deployment.yaml.tmpl rename to config/internal/apiserver/default/deployment.yaml.tmpl index 2e42d702d..7c08f2ddc 100644 --- a/config/internal/apiserver/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -50,6 +50,8 @@ spec: value: "{{.APIServer.ArtifactImage}}" - name: ARCHIVE_LOGS value: "{{.APIServer.ArchiveLogs}}" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "{{.APIServer.TrackArtifacts}}" - name: STRIP_EOF diff --git a/config/internal/apiserver/monitor.yaml.tmpl b/config/internal/apiserver/default/monitor.yaml.tmpl similarity index 100% rename from config/internal/apiserver/monitor.yaml.tmpl rename to config/internal/apiserver/default/monitor.yaml.tmpl diff --git a/config/internal/apiserver/role_ds-pipeline-user-access.yaml.tmpl b/config/internal/apiserver/default/role_ds-pipeline-user-access.yaml.tmpl similarity index 100% rename from config/internal/apiserver/role_ds-pipeline-user-access.yaml.tmpl rename to config/internal/apiserver/default/role_ds-pipeline-user-access.yaml.tmpl diff --git a/config/internal/apiserver/role_ds-pipeline.yaml.tmpl b/config/internal/apiserver/default/role_ds-pipeline.yaml.tmpl similarity index 100% rename from config/internal/apiserver/role_ds-pipeline.yaml.tmpl rename to config/internal/apiserver/default/role_ds-pipeline.yaml.tmpl diff --git a/config/internal/apiserver/role_pipeline-runner.yaml.tmpl b/config/internal/apiserver/default/role_pipeline-runner.yaml.tmpl similarity index 100% rename from config/internal/apiserver/role_pipeline-runner.yaml.tmpl rename to config/internal/apiserver/default/role_pipeline-runner.yaml.tmpl diff --git a/config/internal/apiserver/rolebinding_ds-pipeline.yaml.tmpl b/config/internal/apiserver/default/rolebinding_ds-pipeline.yaml.tmpl similarity index 100% rename from config/internal/apiserver/rolebinding_ds-pipeline.yaml.tmpl rename to config/internal/apiserver/default/rolebinding_ds-pipeline.yaml.tmpl diff --git a/config/internal/apiserver/rolebinding_pipeline-runner.yaml.tmpl b/config/internal/apiserver/default/rolebinding_pipeline-runner.yaml.tmpl similarity index 100% rename from config/internal/apiserver/rolebinding_pipeline-runner.yaml.tmpl rename to config/internal/apiserver/default/rolebinding_pipeline-runner.yaml.tmpl diff --git a/config/internal/apiserver/sa_ds-pipeline.yaml.tmpl b/config/internal/apiserver/default/sa_ds-pipeline.yaml.tmpl similarity index 100% rename from config/internal/apiserver/sa_ds-pipeline.yaml.tmpl rename to config/internal/apiserver/default/sa_ds-pipeline.yaml.tmpl diff --git a/config/internal/apiserver/sa_pipeline-runner.yaml.tmpl b/config/internal/apiserver/default/sa_pipeline-runner.yaml.tmpl similarity index 100% rename from config/internal/apiserver/sa_pipeline-runner.yaml.tmpl rename to config/internal/apiserver/default/sa_pipeline-runner.yaml.tmpl diff --git a/config/internal/apiserver/service.yaml.tmpl b/config/internal/apiserver/default/service.yaml.tmpl similarity index 100% rename from config/internal/apiserver/service.yaml.tmpl rename to config/internal/apiserver/default/service.yaml.tmpl diff --git a/config/internal/apiserver/route.yaml.tmpl b/config/internal/apiserver/route/route.yaml.tmpl similarity index 100% rename from config/internal/apiserver/route.yaml.tmpl rename to config/internal/apiserver/route/route.yaml.tmpl diff --git a/config/internal/apiserver/sample-config.yaml.tmpl b/config/internal/apiserver/sample-pipeline/sample-config.yaml.tmpl similarity index 100% rename from config/internal/apiserver/sample-config.yaml.tmpl rename to config/internal/apiserver/sample-pipeline/sample-config.yaml.tmpl diff --git a/config/internal/apiserver/sample-pipeline.yaml.tmpl b/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl similarity index 100% rename from config/internal/apiserver/sample-pipeline.yaml.tmpl rename to config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl diff --git a/config/internal/scheduled-workflow/deployment.yaml.tmpl b/config/internal/scheduled-workflow/deployment.yaml.tmpl index c5a5da5b5..2415d25e0 100644 --- a/config/internal/scheduled-workflow/deployment.yaml.tmpl +++ b/config/internal/scheduled-workflow/deployment.yaml.tmpl @@ -24,8 +24,14 @@ spec: spec: containers: - env: + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace - name: CRON_SCHEDULE_TIMEZONE value: "{{.ScheduledWorkflow.CronScheduleTimezone}}" + - name: EXECUTIONTYPE + value: PipelineRun image: "{{.ScheduledWorkflow.Image}}" imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow diff --git a/controllers/apiserver.go b/controllers/apiserver.go index 816b637cb..c73e16b68 100644 --- a/controllers/apiserver.go +++ b/controllers/apiserver.go @@ -24,17 +24,17 @@ import ( "k8s.io/apimachinery/pkg/types" ) -var apiServerTemplatesDir = "apiserver" +var apiServerTemplatesDir = "apiserver/default" // serverRoute is a resource deployed conditionally // as such it is handled separately -const serverRoute = "apiserver/route.yaml.tmpl" +const serverRoute = "apiserver/route/route.yaml.tmpl" // Sample Pipeline and Config are resources deployed conditionally // as such it is handled separately var samplePipelineTemplates = map[string]string{ - "sample-pipeline": "apiserver/sample-pipeline.yaml.tmpl", - "sample-config": "apiserver/sample-config.yaml.tmpl", + "sample-pipeline": "apiserver/sample-pipeline/sample-pipeline.yaml.tmpl", + "sample-config": "apiserver/sample-pipeline/sample-config.yaml.tmpl", } func (r *DSPAReconciler) ReconcileAPIServer(ctx context.Context, dsp *dspav1alpha1.DataSciencePipelinesApplication, params *DSPAParams) error { From b4f0ece3f718c7a3c0a2a26a41f17252a5177774 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Fri, 15 Sep 2023 18:47:32 -0400 Subject: [PATCH 23/85] Add VisualizationServer and CRDViewer component manifests --- api/v1alpha1/dspipeline_types.go | 20 +++++++ api/v1alpha1/zz_generated.deepcopy.go | 40 +++++++++++++ config/base/kustomization.yaml | 14 +++++ config/base/params.env | 2 + config/configmaps/files/config.yaml | 2 + ...b.io_datasciencepipelinesapplications.yaml | 20 +++++++ .../internal/crdviewer/deployment.yaml.tmpl | 35 +++++++++++ config/internal/crdviewer/role.yaml.tmpl | 32 ++++++++++ .../internal/crdviewer/rolebinding.yaml.tmpl | 13 ++++ .../crdviewer/serviceaccount.yaml.tmpl | 5 ++ .../visualizationserver/deployment.yaml.tmpl | 60 +++++++++++++++++++ .../visualizationserver/service.yaml.tmpl | 19 ++++++ .../serviceaccount.yaml.tmpl | 5 ++ config/manager/manager.yaml | 4 ++ kfdef/kfdef.yaml | 4 ++ 15 files changed, 275 insertions(+) create mode 100644 config/internal/crdviewer/deployment.yaml.tmpl create mode 100644 config/internal/crdviewer/role.yaml.tmpl create mode 100644 config/internal/crdviewer/rolebinding.yaml.tmpl create mode 100644 config/internal/crdviewer/serviceaccount.yaml.tmpl create mode 100644 config/internal/visualizationserver/deployment.yaml.tmpl create mode 100644 config/internal/visualizationserver/service.yaml.tmpl create mode 100644 config/internal/visualizationserver/serviceaccount.yaml.tmpl diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index bed158e47..1525b95b7 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -43,6 +43,12 @@ type DSPASpec struct { // +kubebuilder:validation:Optional // +kubebuilder:default:={deploy: false} *MLMD `json:"mlmd"` + // +kubebuilder:validation:Optional + // +kubebuilder:default:={deploy: false} + *CRDViewer `json:"crdviewer"` + // +kubebuilder:validation:Optional + // +kubebuilder:default:={deploy: false} + *VisualizationServer `json:"visualizationServer"` } type APIServer struct { @@ -251,6 +257,20 @@ type Writer struct { Image string `json:"image"` } +type CRDViewer struct { + // +kubebuilder:default:=true + // +kubebuilder:validation:Optional + Deploy bool `json:"deploy"` + Image string `json:"image,omitempty"` +} + +type VisualizationServer struct { + // +kubebuilder:default:=true + // +kubebuilder:validation:Optional + Deploy bool `json:"deploy"` + Image string `json:"image,omitempty"` +} + // ResourceRequirements structures compute resource requirements. // Replaces ResourceRequirements from corev1 which also includes optional storage field. // We handle storage field separately, and should not include it as a subfield for Resources. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index babfd004f..528cd5eb9 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -66,6 +66,21 @@ func (in *ArtifactScriptConfigMap) DeepCopy() *ArtifactScriptConfigMap { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CRDViewer) DeepCopyInto(out *CRDViewer) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CRDViewer. +func (in *CRDViewer) DeepCopy() *CRDViewer { + if in == nil { + return nil + } + out := new(CRDViewer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DSPASpec) DeepCopyInto(out *DSPASpec) { *out = *in @@ -104,6 +119,16 @@ func (in *DSPASpec) DeepCopyInto(out *DSPASpec) { *out = new(MLMD) (*in).DeepCopyInto(*out) } + if in.CRDViewer != nil { + in, out := &in.CRDViewer, &out.CRDViewer + *out = new(CRDViewer) + **out = **in + } + if in.VisualizationServer != nil { + in, out := &in.VisualizationServer, &out.VisualizationServer + *out = new(VisualizationServer) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSPASpec. @@ -546,6 +571,21 @@ func (in *SecretKeyValue) DeepCopy() *SecretKeyValue { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VisualizationServer) DeepCopyInto(out *VisualizationServer) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationServer. +func (in *VisualizationServer) DeepCopy() *VisualizationServer { + if in == nil { + return nil + } + out := new(VisualizationServer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Writer) DeepCopyInto(out *Writer) { *out = *in diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index fc6450c20..d36804d32 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -92,6 +92,20 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGES_MLMDWRITER + - name: IMAGES_CRDVIEWER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGES_CRDVIEWER + - name: IMAGES_VISUALIZATIONSERVER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGES_VISUALIZATIONSERVER - name: IMAGES_DSPO objref: kind: ConfigMap diff --git a/config/base/params.env b/config/base/params.env index 0ad210d41..dd7f56925 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -10,4 +10,6 @@ IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.8 IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.8 IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 +IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 +IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 ZAP_LOG_LEVEL=info diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index 477512b54..41319834a 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -10,3 +10,5 @@ Images: MlmdEnvoy: $(IMAGES_MLMDENVOY) MlmdGRPC: $(IMAGES_MLMDGRPC) MlmdWriter: $(IMAGES_MLMDWRITER) + CRDViewer: $(IMAGES_CRDVIEWER) + VisualizationServer: $(IMAGES_VISUALIZATIONSERVER) \ No newline at end of file diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 6732f52fb..0060ca717 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -152,6 +152,16 @@ spec: description: 'Default: true' type: boolean type: object + crdviewer: + default: + deploy: false + properties: + deploy: + default: true + type: boolean + image: + type: string + type: object database: default: mariaDB: @@ -715,6 +725,16 @@ spec: type: object type: object type: object + visualizationServer: + default: + deploy: false + properties: + deploy: + default: true + type: boolean + image: + type: string + type: object required: - objectStorage type: object diff --git a/config/internal/crdviewer/deployment.yaml.tmpl b/config/internal/crdviewer/deployment.yaml.tmpl new file mode 100644 index 000000000..1f568a2b1 --- /dev/null +++ b/config/internal/crdviewer/deployment.yaml.tmpl @@ -0,0 +1,35 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: ds-pipeline-{{.Name}} + component: data-science-pipelines + name: ds-pipeline-viewer-crd-{{.Name}} + namespace: {{.Namespace}} +spec: + selector: + matchLabels: + app: ds-pipeline-viewer-crd-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-viewer-crd-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + spec: + containers: + - env: + - name: MAX_NUM_VIEWERS + value: "50" + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 + imagePullPolicy: Always + name: ds-pipeline-viewer-crd + serviceAccountName: ds-pipeline-viewer-crd-service-account-{{.Name}} diff --git a/config/internal/crdviewer/role.yaml.tmpl b/config/internal/crdviewer/role.yaml.tmpl new file mode 100644 index 000000000..ef943e9fe --- /dev/null +++ b/config/internal/crdviewer/role.yaml.tmpl @@ -0,0 +1,32 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: ds-pipeline-viewer-controller-role-{{.Name}} + namespace: {{.Namespace}} +rules: +- apiGroups: + - '*' + resources: + - deployments + - services + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - kubeflow.org + resources: + - viewers + - viewers/finalizers + verbs: + - create + - get + - list + - watch + - update + - patch + - delete diff --git a/config/internal/crdviewer/rolebinding.yaml.tmpl b/config/internal/crdviewer/rolebinding.yaml.tmpl new file mode 100644 index 000000000..f927411a4 --- /dev/null +++ b/config/internal/crdviewer/rolebinding.yaml.tmpl @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: ds-pipeline-viewer-crd-binding-{{.Name}} + namespace: {{.Namespace}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ds-pipeline-viewer-controller-role-{{.Name}} +subjects: +- kind: ServiceAccount + name: ds-pipeline-viewer-crd-service-account-{{.Name}} + namespace: {{.Namespace}} diff --git a/config/internal/crdviewer/serviceaccount.yaml.tmpl b/config/internal/crdviewer/serviceaccount.yaml.tmpl new file mode 100644 index 000000000..2b21e1453 --- /dev/null +++ b/config/internal/crdviewer/serviceaccount.yaml.tmpl @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ds-pipeline-viewer-crd-service-account-{{.Name}} + namespace: {{.Namespace}} diff --git a/config/internal/visualizationserver/deployment.yaml.tmpl b/config/internal/visualizationserver/deployment.yaml.tmpl new file mode 100644 index 000000000..26abdeedc --- /dev/null +++ b/config/internal/visualizationserver/deployment.yaml.tmpl @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: ds-pipeline-visualizationserver-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + name: ds-pipeline-visualizationserver-{{.Name}} + namespace: {{.Namespace}} +spec: + selector: + matchLabels: + app: ds-pipeline-visualizationserver-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-visualizationserver-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + spec: + containers: + - image: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 + imagePullPolicy: IfNotPresent + livenessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/ + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + name: ds-pipeline-visualizationserver + ports: + - containerPort: 8888 + name: http + readinessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/ + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 30m + memory: 500Mi + serviceAccountName: ds-pipeline-visualizationserver-{{.Name}} diff --git a/config/internal/visualizationserver/service.yaml.tmpl b/config/internal/visualizationserver/service.yaml.tmpl new file mode 100644 index 000000000..f2d76833f --- /dev/null +++ b/config/internal/visualizationserver/service.yaml.tmpl @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: ds-pipeline-visualizationserver-{{.Name}} + namespace: {{.Namespace}} + annotations: + service.alpha.openshift.io/serving-cert-secret-name: ds-pipelines-proxy-tls-{{.Name}} + labels: + app: ds-pipeline-{{.Name}} + component: data-science-pipelines +spec: + ports: + - name: http + port: 8888 + protocol: TCP + targetPort: 8888 + selector: + app: ds-pipeline-visualizationserver-{{.Name}} + component: data-science-pipelines diff --git a/config/internal/visualizationserver/serviceaccount.yaml.tmpl b/config/internal/visualizationserver/serviceaccount.yaml.tmpl new file mode 100644 index 000000000..e1c415786 --- /dev/null +++ b/config/internal/visualizationserver/serviceaccount.yaml.tmpl @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ds-pipeline-visualizationserver-{{.Name}} + namespace: {{.Namespace}} diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index d765088d0..d643464f6 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -57,6 +57,10 @@ spec: value: $(IMAGES_MLMDGRPC) - name: IMAGES_MLMDWRITER value: $(IMAGES_MLMDWRITER) + - name: IMAGES_CRDVIEWER + value: $(IMAGES_CRDVIEWER) + - name: IMAGES_VISUALIZATIONSERVER + value: $(IMAGES_VISUALIZATIONSERVER) - name: ZAP_LOG_LEVEL value: $(ZAP_LOG_LEVEL) securityContext: diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index 7bb962d86..4f1c2efe2 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -30,6 +30,10 @@ spec: value: quay.io/opendatahub/ds-pipelines-metadata-grpc:1.0.0 - name: IMAGES_MLMDWRITER value: quay.io/opendatahub/ds-pipelines-metadata-writer:1.1.0 + - name: IMAGES_CRDVIEWER + value: gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 + - name: IMAGES_VISUALIZATIONSERVER + value: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 repoRef: name: manifests path: config From 3fcbb142b122400dc08eeb879de8c9fbe1fb969d Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 19 Sep 2023 15:53:20 -0400 Subject: [PATCH 24/85] Add Reconcile Handlers for CRDViewer and VisualizationServer --- controllers/crdviewer.go | 44 +++++++++ controllers/crdviewer_test.go | 118 +++++++++++++++++++++++ controllers/dspipeline_controller.go | 11 +++ controllers/dspipeline_params.go | 2 + controllers/visualization_server.go | 44 +++++++++ controllers/visualization_server_test.go | 118 +++++++++++++++++++++++ 6 files changed, 337 insertions(+) create mode 100644 controllers/crdviewer.go create mode 100644 controllers/crdviewer_test.go create mode 100644 controllers/visualization_server.go create mode 100644 controllers/visualization_server_test.go diff --git a/controllers/crdviewer.go b/controllers/crdviewer.go new file mode 100644 index 000000000..e173de877 --- /dev/null +++ b/controllers/crdviewer.go @@ -0,0 +1,44 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" +) + +var crdViewerTemplatesDir = "crdviewer" + +func (r *DSPAReconciler) ReconcileCRDViewer(dsp *dspav1alpha1.DataSciencePipelinesApplication, + params *DSPAParams) error { + + log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) + + if !dsp.Spec.CRDViewer.Deploy { + log.Info("Skipping Application of CRD Viewer Resources") + return nil + } + + log.Info("Applying CRDViewer Resources") + + err := r.ApplyDir(dsp, params, crdViewerTemplatesDir) + if err != nil { + return err + } + + log.Info("Finished applying CRD Viewer Resources") + return nil +} diff --git a/controllers/crdviewer_test.go b/controllers/crdviewer_test.go new file mode 100644 index 000000000..4a23804a8 --- /dev/null +++ b/controllers/crdviewer_test.go @@ -0,0 +1,118 @@ +//go:build test_all || test_unit + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" +) + +func TestDeployCRDViewer(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedCRDViewerName := "ds-pipeline-viewer-crd-testdspa" + + // Construct DSPASpec with deployed CRD Viewer + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + CRDViewer: &dspav1alpha1.CRDViewer{ + Deploy: true, + }, + Database: &dspav1alpha1.Database{ + DisableHealthCheck: false, + MariaDB: &dspav1alpha1.MariaDB{ + Deploy: true, + }, + }, + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: false, + Minio: &dspav1alpha1.Minio{ + Deploy: false, + Image: "someimage", + }, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Namespace = testNamespace + dspa.Name = testDSPAName + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + err := params.ExtractParams(ctx, dspa, reconciler.Client, reconciler.Log) + assert.Nil(t, err) + + // Ensure CRD Viewer Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedCRDViewerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileCRDViewer(dspa, params) + assert.Nil(t, err) + + // Ensure CRD Viewer Deployment now exists + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedCRDViewerName, testNamespace) + assert.True(t, created) + assert.Nil(t, err) + +} + +func TestDontDeployCRDViewer(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedCRDViewerName := "ds-pipeline-viewer-crd-testdspa" + + // Construct DSPASpec with non-deployed CRD Viewer + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + CRDViewer: &dspav1alpha1.CRDViewer{ + Deploy: false, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + + // Ensure CRD Viewer Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedCRDViewerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileCRDViewer(dspa, params) + assert.Nil(t, err) + + // Ensure CRD Viewer Deployment still doesn't exist + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedCRDViewerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) +} diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 185c3b59c..925c621fa 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -276,6 +276,17 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. if err != nil { return ctrl.Result{}, err } + + err = r.ReconcileCRDViewer(dspa, params) + if err != nil { + return ctrl.Result{}, err + } + + err = r.ReconcileVisualizationServer(dspa, params) + if err != nil { + return ctrl.Result{}, err + } + } log.Info("Updating CR status") diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index b4ef158d7..d19582253 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -48,6 +48,8 @@ type DSPAParams struct { MariaDB *dspa.MariaDB Minio *dspa.Minio MLMD *dspa.MLMD + CRDViewer *dspa.CRDViewer + VisualizationServer *dspa.VisualizationServer DBConnection ObjectStorageConnection } diff --git a/controllers/visualization_server.go b/controllers/visualization_server.go new file mode 100644 index 000000000..817341fbb --- /dev/null +++ b/controllers/visualization_server.go @@ -0,0 +1,44 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" +) + +var visualizationServerTemplatesDir = "visualizationserver" + +func (r *DSPAReconciler) ReconcileVisualizationServer(dsp *dspav1alpha1.DataSciencePipelinesApplication, + params *DSPAParams) error { + + log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) + + if !dsp.Spec.VisualizationServer.Deploy { + log.Info("Skipping Application of Visualization Server Resources") + return nil + } + + log.Info("Applying Visualization Server Resources") + + err := r.ApplyDir(dsp, params, visualizationServerTemplatesDir) + if err != nil { + return err + } + + log.Info("Finished applying Visualization Server Resources") + return nil +} diff --git a/controllers/visualization_server_test.go b/controllers/visualization_server_test.go new file mode 100644 index 000000000..7a06f6ca2 --- /dev/null +++ b/controllers/visualization_server_test.go @@ -0,0 +1,118 @@ +//go:build test_all || test_unit + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" +) + +func TestDeployVisualizationServer(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedVisualizationServerName := "ds-pipeline-visualizationserver-testdspa" + + // Construct DSPASpec with deployed Visualization Server + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + VisualizationServer: &dspav1alpha1.VisualizationServer{ + Deploy: true, + }, + Database: &dspav1alpha1.Database{ + DisableHealthCheck: false, + MariaDB: &dspav1alpha1.MariaDB{ + Deploy: true, + }, + }, + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: false, + Minio: &dspav1alpha1.Minio{ + Deploy: false, + Image: "someimage", + }, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Namespace = testNamespace + dspa.Name = testDSPAName + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + err := params.ExtractParams(ctx, dspa, reconciler.Client, reconciler.Log) + assert.Nil(t, err) + + // Ensure Visualization Server Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileVisualizationServer(dspa, params) + assert.Nil(t, err) + + // Ensure Visualization Server Deployment now exists + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) + assert.True(t, created) + assert.Nil(t, err) + +} + +func TestDontDeployVisualizationServer(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedVisualizationServerName := "ds-pipeline-visualization-server-testdspa" + + // Construct DSPASpec with non-deployed Visualization Server + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + VisualizationServer: &dspav1alpha1.VisualizationServer{ + Deploy: false, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + + // Ensure Visualization Server Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileVisualizationServer(dspa, params) + assert.Nil(t, err) + + // Ensure Visualization Server Deployment still doesn't exist + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) +} From 55774dc71c10c8fd036293f948316c66bca159d0 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 19 Sep 2023 16:05:10 -0400 Subject: [PATCH 25/85] Fix Functional Tests (handle new envvars) --- config/internal/persistence-agent/deployment.yaml.tmpl | 10 ++++++++++ .../internal/scheduled-workflow/deployment.yaml.tmpl | 4 +--- .../case_0/expected/created/apiserver_deployment.yaml | 2 ++ .../expected/created/persistence-agent_deployment.yaml | 10 ++++++++++ .../created/scheduled-workflow_deployment.yaml | 4 ++++ .../case_2/expected/created/apiserver_deployment.yaml | 2 ++ .../expected/created/persistence-agent_deployment.yaml | 10 ++++++++++ .../created/scheduled-workflow_deployment.yaml | 4 ++++ .../case_3/expected/created/apiserver_deployment.yaml | 2 ++ .../case_4/expected/created/apiserver_deployment.yaml | 2 ++ .../expected/created/persistence-agent_deployment.yaml | 10 ++++++++++ .../created/scheduled-workflow_deployment.yaml | 4 ++++ .../case_5/expected/created/apiserver_deployment.yaml | 2 ++ .../expected/created/persistence-agent_deployment.yaml | 10 ++++++++++ .../created/scheduled-workflow_deployment.yaml | 4 ++++ 15 files changed, 77 insertions(+), 3 deletions(-) diff --git a/config/internal/persistence-agent/deployment.yaml.tmpl b/config/internal/persistence-agent/deployment.yaml.tmpl index 1c160ec59..cef718369 100644 --- a/config/internal/persistence-agent/deployment.yaml.tmpl +++ b/config/internal/persistence-agent/deployment.yaml.tmpl @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "{{.Namespace}}" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: "{{.PersistenceAgent.Image}}" imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/config/internal/scheduled-workflow/deployment.yaml.tmpl b/config/internal/scheduled-workflow/deployment.yaml.tmpl index 2415d25e0..2b76d429e 100644 --- a/config/internal/scheduled-workflow/deployment.yaml.tmpl +++ b/config/internal/scheduled-workflow/deployment.yaml.tmpl @@ -25,9 +25,7 @@ spec: containers: - env: - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + value: "{{.Namespace}}" - name: CRON_SCHEDULE_TIMEZONE value: "{{.ScheduledWorkflow.CronScheduleTimezone}}" - name: EXECUTIONTYPE diff --git a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml index fa277a796..ecf172926 100644 --- a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: "artifact-manager:test0" - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml index bf0171dc3..ecce799ab 100644 --- a/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/persistence-agent_deployment.yaml @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "default" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: persistenceagent:test0 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml index d069ca8fb..e5aee424a 100644 --- a/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/scheduled-workflow_deployment.yaml @@ -24,8 +24,12 @@ spec: spec: containers: - env: + - name: NAMESPACE + value: "default" - name: CRON_SCHEDULE_TIMEZONE value: "UTC" + - name: EXECUTIONTYPE + value: PipelineRun image: scheduledworkflow:test0 imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow diff --git a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml index 5c1263828..1489a4e0d 100644 --- a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: "artifact-manager:test2" - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml index 8342eace3..db064397e 100644 --- a/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/persistence-agent_deployment.yaml @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "default" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: persistenceagent:test2 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml index f912bc2f7..78b8b382b 100644 --- a/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/scheduled-workflow_deployment.yaml @@ -24,8 +24,12 @@ spec: spec: containers: - env: + - name: NAMESPACE + value: "default" - name: CRON_SCHEDULE_TIMEZONE value: "EST" + - name: EXECUTIONTYPE + value: PipelineRun image: scheduledworkflow:test2 imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow diff --git a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml index 0b617788d..6371d1460 100644 --- a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: artifact-manager:test3 - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml index 94524294c..043a86a37 100644 --- a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: "this-artifact-manager-image-from-cr-should-be-used:test4" - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml index da750bb99..c4118d680 100644 --- a/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/persistence-agent_deployment.yaml @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "default" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: this-persistenceagent-image-from-cr-should-be-used:test4 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml index 0d7e88db6..80e2084ad 100644 --- a/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/scheduled-workflow_deployment.yaml @@ -24,8 +24,12 @@ spec: spec: containers: - env: + - name: NAMESPACE + value: "default" - name: CRON_SCHEDULE_TIMEZONE value: "EST" + - name: EXECUTIONTYPE + value: PipelineRun image: this-scheduledworkflow-image-from-cr-should-be-used:test4 imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow diff --git a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml index 92f6ac5b9..672ecd431 100644 --- a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: "artifact-manager:test5" - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: TRACK_ARTIFACTS value: "true" - name: STRIP_EOF diff --git a/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml index 3255d1281..2a22a22ae 100644 --- a/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/persistence-agent_deployment.yaml @@ -26,6 +26,16 @@ spec: - env: - name: NAMESPACE value: "default" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun image: persistenceagent:test5 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent diff --git a/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml index d03c4daf8..ab88f8de9 100644 --- a/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/scheduled-workflow_deployment.yaml @@ -24,8 +24,12 @@ spec: spec: containers: - env: + - name: NAMESPACE + value: "default" - name: CRON_SCHEDULE_TIMEZONE value: "UTC" + - name: EXECUTIONTYPE + value: PipelineRun image: scheduledworkflow:test5 imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow From bbd0aec15016629ff721609d0b3ce36ef0d516e6 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 26 Sep 2023 03:41:01 -0400 Subject: [PATCH 26/85] Fix self-deployed DB/Storage missing secrets --- controllers/database.go | 4 ++++ controllers/storage.go | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/controllers/database.go b/controllers/database.go index 91e55aa95..deee77b80 100644 --- a/controllers/database.go +++ b/controllers/database.go @@ -110,6 +110,10 @@ func (r *DSPAReconciler) ReconcileDatabase(ctx context.Context, dsp *dspav1alpha if err != nil { return err } + err = r.Apply(dsp, params, dbSecret) + if err != nil { + return err + } // If no database was not specified, deploy mariaDB by default. // Update the CR with the state of mariaDB to accurately portray // desired state. diff --git a/controllers/storage.go b/controllers/storage.go index c3e133ba4..a57697f53 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -160,6 +160,10 @@ func (r *DSPAReconciler) ReconcileStorage(ctx context.Context, dsp *dspav1alpha1 if err != nil { return err } + err = r.Apply(dsp, params, storageSecret) + if err != nil { + return err + } // If no storage was not specified, deploy minio by default. // Update the CR with the state of minio to accurately portray // desired state. From 1aa8bb7984f651c9e58554ea5959d7255724371f Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 26 Sep 2023 03:41:45 -0400 Subject: [PATCH 27/85] WIP: Add DSPv2 Internal Manifests --- config/v2/cache/clusterrole.yaml | 35 +++++++++ config/v2/cache/clusterrolebinding.yaml | 12 +++ config/v2/cache/kustomization.yaml | 4 + config/v2/cache/serviceaccount.yaml | 11 +++ config/v2/driver/clusterrole.yaml | 63 +++++++++++++++ config/v2/driver/clusterrolebinding.yaml | 17 ++++ config/v2/driver/deployment.yaml | 57 ++++++++++++++ config/v2/driver/kustomization.yaml | 8 ++ config/v2/driver/role.yaml | 77 ++++++++++++++++++ config/v2/driver/rolebinding.yaml | 17 ++++ config/v2/driver/service.yaml | 24 ++++++ config/v2/driver/serviceaccount.yaml | 10 +++ .../clusterrole.leaderelection.yaml | 20 +++++ .../controller/clusterrole.clusteraccess.yaml | 66 ++++++++++++++++ .../controller/clusterrole.tenantaccess.yaml | 21 +++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ .../clusterrolebinding.tenantaccess.yaml | 16 ++++ .../v2/exithandler/controller/deployment.yaml | 60 ++++++++++++++ .../exithandler/controller/kustomization.yaml | 10 +++ config/v2/exithandler/controller/role.yaml | 37 +++++++++ .../exithandler/controller/rolebinding.yaml | 16 ++++ .../controller/serviceaccount.yaml | 10 +++ config/v2/exithandler/crd.yaml | 29 +++++++ config/v2/exithandler/kustomization.yaml | 5 ++ .../webhook/clusterrole.clusteraccess.yaml | 78 +++++++++++++++++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ config/v2/exithandler/webhook/deployment.yaml | 71 +++++++++++++++++ .../v2/exithandler/webhook/kustomization.yaml | 11 +++ .../webhook/mutatingwebhookconfig.yaml | 19 +++++ config/v2/exithandler/webhook/role.yaml | 53 +++++++++++++ .../v2/exithandler/webhook/rolebinding.yaml | 16 ++++ config/v2/exithandler/webhook/secret.yaml | 9 +++ config/v2/exithandler/webhook/service.yaml | 30 +++++++ .../exithandler/webhook/serviceaccount.yaml | 10 +++ .../webhook/validatingwebhookconfig.yaml | 19 +++++ .../kfptask/clusterrole.leaderelection.yaml | 20 +++++ .../controller/clusterrole.clusteraccess.yaml | 66 ++++++++++++++++ .../controller/clusterrole.tenantaccess.yaml | 21 +++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ .../clusterrolebinding.tenantaccess.yaml | 16 ++++ config/v2/kfptask/controller/deployment.yaml | 60 ++++++++++++++ .../v2/kfptask/controller/kustomization.yaml | 10 +++ config/v2/kfptask/controller/role.yaml | 38 +++++++++ config/v2/kfptask/controller/rolebinding.yaml | 17 ++++ .../v2/kfptask/controller/serviceaccount.yaml | 10 +++ config/v2/kfptask/crd.yaml | 29 +++++++ config/v2/kfptask/kustomization.yaml | 5 ++ .../webhook/clusterrole.clusteraccess.yaml | 78 +++++++++++++++++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ config/v2/kfptask/webhook/deployment.yaml | 71 +++++++++++++++++ config/v2/kfptask/webhook/kustomization.yaml | 12 +++ .../webhook/mutatingwebhookconfig.yaml | 19 +++++ config/v2/kfptask/webhook/role.yaml | 53 +++++++++++++ config/v2/kfptask/webhook/rolebinding.yaml | 16 ++++ config/v2/kfptask/webhook/secret.yaml | 9 +++ config/v2/kfptask/webhook/service.yaml | 30 +++++++ config/v2/kfptask/webhook/serviceaccount.yaml | 10 +++ .../webhook/validatingwebhookconfig.yaml | 19 +++++ config/v2/kustomization.yaml | 21 +++++ config/v2/params.env | 14 ++++ config/v2/params.yaml | 7 ++ .../clusterrole.leaderelection.yaml | 20 +++++ .../controller/clusterrole.clusteraccess.yaml | 66 ++++++++++++++++ .../controller/clusterrole.tenantaccess.yaml | 21 +++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ .../clusterrolebinding.tenantaccess.yaml | 16 ++++ .../pipelineloop/controller/deployment.yaml | 60 ++++++++++++++ .../controller/kustomization.yaml | 11 +++ config/v2/pipelineloop/controller/role.yaml | 36 +++++++++ .../pipelineloop/controller/rolebinding.yaml | 16 ++++ .../controller/serviceaccount.yaml | 10 +++ config/v2/pipelineloop/crd.yaml | 29 +++++++ config/v2/pipelineloop/kustomization.yaml | 5 ++ .../webhook/clusterrole.clusteraccess.yaml | 78 +++++++++++++++++++ .../clusterrolebinding.clusteraccess.yaml | 16 ++++ .../clusterrolebinding.leaderelection.yaml | 16 ++++ .../v2/pipelineloop/webhook/deployment.yaml | 71 +++++++++++++++++ .../pipelineloop/webhook/kustomization.yaml | 12 +++ .../webhook/mutatingwebhookconfig.yaml | 19 +++++ config/v2/pipelineloop/webhook/role.yaml | 52 +++++++++++++ .../v2/pipelineloop/webhook/rolebinding.yaml | 16 ++++ config/v2/pipelineloop/webhook/service.yaml | 30 +++++++ .../pipelineloop/webhook/serviceaccount.yaml | 10 +++ .../webhook/validatingwebhookconfig.yaml | 19 +++++ config/v2/tektoncrds/crd.yaml | 28 +++++++ config/v2/tektoncrds/kustomization.yaml | 4 + config/v2/tektoncrds/scc.anyuid.yaml | 61 +++++++++++++++ config/v2/tektoncrds/scc.privileged.yaml | 62 +++++++++++++++ 92 files changed, 2520 insertions(+) create mode 100644 config/v2/cache/clusterrole.yaml create mode 100644 config/v2/cache/clusterrolebinding.yaml create mode 100644 config/v2/cache/kustomization.yaml create mode 100644 config/v2/cache/serviceaccount.yaml create mode 100644 config/v2/driver/clusterrole.yaml create mode 100644 config/v2/driver/clusterrolebinding.yaml create mode 100644 config/v2/driver/deployment.yaml create mode 100644 config/v2/driver/kustomization.yaml create mode 100644 config/v2/driver/role.yaml create mode 100644 config/v2/driver/rolebinding.yaml create mode 100644 config/v2/driver/service.yaml create mode 100644 config/v2/driver/serviceaccount.yaml create mode 100644 config/v2/exithandler/clusterrole.leaderelection.yaml create mode 100644 config/v2/exithandler/controller/clusterrole.clusteraccess.yaml create mode 100644 config/v2/exithandler/controller/clusterrole.tenantaccess.yaml create mode 100644 config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml create mode 100644 config/v2/exithandler/controller/deployment.yaml create mode 100644 config/v2/exithandler/controller/kustomization.yaml create mode 100644 config/v2/exithandler/controller/role.yaml create mode 100644 config/v2/exithandler/controller/rolebinding.yaml create mode 100644 config/v2/exithandler/controller/serviceaccount.yaml create mode 100644 config/v2/exithandler/crd.yaml create mode 100644 config/v2/exithandler/kustomization.yaml create mode 100644 config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml create mode 100644 config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/exithandler/webhook/deployment.yaml create mode 100644 config/v2/exithandler/webhook/kustomization.yaml create mode 100644 config/v2/exithandler/webhook/mutatingwebhookconfig.yaml create mode 100644 config/v2/exithandler/webhook/role.yaml create mode 100644 config/v2/exithandler/webhook/rolebinding.yaml create mode 100644 config/v2/exithandler/webhook/secret.yaml create mode 100644 config/v2/exithandler/webhook/service.yaml create mode 100644 config/v2/exithandler/webhook/serviceaccount.yaml create mode 100644 config/v2/exithandler/webhook/validatingwebhookconfig.yaml create mode 100644 config/v2/kfptask/clusterrole.leaderelection.yaml create mode 100644 config/v2/kfptask/controller/clusterrole.clusteraccess.yaml create mode 100644 config/v2/kfptask/controller/clusterrole.tenantaccess.yaml create mode 100644 config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml create mode 100644 config/v2/kfptask/controller/deployment.yaml create mode 100644 config/v2/kfptask/controller/kustomization.yaml create mode 100644 config/v2/kfptask/controller/role.yaml create mode 100644 config/v2/kfptask/controller/rolebinding.yaml create mode 100644 config/v2/kfptask/controller/serviceaccount.yaml create mode 100644 config/v2/kfptask/crd.yaml create mode 100644 config/v2/kfptask/kustomization.yaml create mode 100644 config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml create mode 100644 config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/kfptask/webhook/deployment.yaml create mode 100644 config/v2/kfptask/webhook/kustomization.yaml create mode 100644 config/v2/kfptask/webhook/mutatingwebhookconfig.yaml create mode 100644 config/v2/kfptask/webhook/role.yaml create mode 100644 config/v2/kfptask/webhook/rolebinding.yaml create mode 100644 config/v2/kfptask/webhook/secret.yaml create mode 100644 config/v2/kfptask/webhook/service.yaml create mode 100644 config/v2/kfptask/webhook/serviceaccount.yaml create mode 100644 config/v2/kfptask/webhook/validatingwebhookconfig.yaml create mode 100644 config/v2/kustomization.yaml create mode 100644 config/v2/params.env create mode 100644 config/v2/params.yaml create mode 100644 config/v2/pipelineloop/clusterrole.leaderelection.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml create mode 100644 config/v2/pipelineloop/controller/deployment.yaml create mode 100644 config/v2/pipelineloop/controller/kustomization.yaml create mode 100644 config/v2/pipelineloop/controller/role.yaml create mode 100644 config/v2/pipelineloop/controller/rolebinding.yaml create mode 100644 config/v2/pipelineloop/controller/serviceaccount.yaml create mode 100644 config/v2/pipelineloop/crd.yaml create mode 100644 config/v2/pipelineloop/kustomization.yaml create mode 100644 config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml create mode 100644 config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml create mode 100644 config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml create mode 100644 config/v2/pipelineloop/webhook/deployment.yaml create mode 100644 config/v2/pipelineloop/webhook/kustomization.yaml create mode 100644 config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml create mode 100644 config/v2/pipelineloop/webhook/role.yaml create mode 100644 config/v2/pipelineloop/webhook/rolebinding.yaml create mode 100644 config/v2/pipelineloop/webhook/service.yaml create mode 100644 config/v2/pipelineloop/webhook/serviceaccount.yaml create mode 100644 config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml create mode 100644 config/v2/tektoncrds/crd.yaml create mode 100644 config/v2/tektoncrds/kustomization.yaml create mode 100644 config/v2/tektoncrds/scc.anyuid.yaml create mode 100644 config/v2/tektoncrds/scc.privileged.yaml diff --git a/config/v2/cache/clusterrole.yaml b/config/v2/cache/clusterrole.yaml new file mode 100644 index 000000000..5178f4f4b --- /dev/null +++ b/config/v2/cache/clusterrole.yaml @@ -0,0 +1,35 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app: kubeflow-pipelines-cache-deployer-clusterrole + name: kubeflow-pipelines-cache-deployer-clusterrole +rules: +- apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + - certificatesigningrequests/approval + verbs: + - create + - delete + - get + - update +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - create + - delete + - get + - list + - patch +- apiGroups: + - certificates.k8s.io + resourceNames: + - kubernetes.io/* + resources: + - signers + verbs: + - approve diff --git a/config/v2/cache/clusterrolebinding.yaml b/config/v2/cache/clusterrolebinding.yaml new file mode 100644 index 000000000..e9cf41f45 --- /dev/null +++ b/config/v2/cache/clusterrolebinding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubeflow-pipelines-cache-deployer-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubeflow-pipelines-cache-deployer-clusterrole +subjects: +- kind: ServiceAccount + name: kubeflow-pipelines-cache-deployer-sa + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/cache/kustomization.yaml b/config/v2/cache/kustomization.yaml new file mode 100644 index 000000000..51229db72 --- /dev/null +++ b/config/v2/cache/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- clusterrole.yaml +- clusterrolebinding.yaml +- serviceaccount.yaml diff --git a/config/v2/cache/serviceaccount.yaml b/config/v2/cache/serviceaccount.yaml new file mode 100644 index 000000000..ffa5d061b --- /dev/null +++ b/config/v2/cache/serviceaccount.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kubeflow-pipelines-cache-deployer-sa + \ No newline at end of file diff --git a/config/v2/driver/clusterrole.yaml b/config/v2/driver/clusterrole.yaml new file mode 100644 index 000000000..45a51fbf3 --- /dev/null +++ b/config/v2/driver/clusterrole.yaml @@ -0,0 +1,63 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + name: kfp-driver-cluster-access-clusterrole +rules: +- apiGroups: + - tekton.dev + resources: + - runs + - customruns + - runs/finalizers + - customruns/finalizers + - runs/status + - customruns/status + - pipelineruns + - task + - taskruns + - conditions + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list diff --git a/config/v2/driver/clusterrolebinding.yaml b/config/v2/driver/clusterrolebinding.yaml new file mode 100644 index 000000000..05db8c567 --- /dev/null +++ b/config/v2/driver/clusterrolebinding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + name: kfp-driver-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-driver-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-driver + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/driver/deployment.yaml b/config/v2/driver/deployment.yaml new file mode 100644 index 000000000..ce150c265 --- /dev/null +++ b/config/v2/driver/deployment.yaml @@ -0,0 +1,57 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: ckfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline + app.kubernetes.io/version: devel + name: kfp-driver +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: kfp-driver + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline + app.kubernetes.io/version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-driver:2.0.0 + imagePullPolicy: Always + name: kfp-driver + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfp-driver diff --git a/config/v2/driver/kustomization.yaml b/config/v2/driver/kustomization.yaml new file mode 100644 index 000000000..4968c8918 --- /dev/null +++ b/config/v2/driver/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- clusterrole.yaml +- clusterrolebinding.yaml +- deployment.yaml +- role.yaml +- rolebinding.yaml +- service.yaml +- serviceaccount.yaml \ No newline at end of file diff --git a/config/v2/driver/role.yaml b/config/v2/driver/role.yaml new file mode 100644 index 000000000..b4c9f9130 --- /dev/null +++ b/config/v2/driver/role.yaml @@ -0,0 +1,77 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + namespace: datasciencepipelinesapplications-controller + name: kfp-driver-role +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +- apiGroups: + - "" + resources: + - persistentvolumes + - persistentvolumeclaims + verbs: + - '*' +- apiGroups: + - snapshot.storage.k8s.io + resources: + - volumesnapshots + verbs: + - create + - delete + - get +- apiGroups: + - "" + resources: + - pods + - pods/exec + - pods/log + - services + verbs: + - '*' +- apiGroups: + - "" + - apps + - extensions + resources: + - deployments + - replicasets + verbs: + - '*' +- apiGroups: + - kubeflow.org + resources: + - '*' + verbs: + - '*' +- apiGroups: + - batch + resources: + - jobs + verbs: + - '*' +- apiGroups: + - machinelearning.seldon.io + resources: + - seldondeployments + verbs: + - '*' +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/driver/rolebinding.yaml b/config/v2/driver/rolebinding.yaml new file mode 100644 index 000000000..9819d3b97 --- /dev/null +++ b/config/v2/driver/rolebinding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + name: kfp-driver-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfp-driver-role +subjects: +- kind: ServiceAccount + name: kfp-driver + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/driver/service.yaml b/config/v2/driver/service.yaml new file mode 100644 index 000000000..2d4e2bbd5 --- /dev/null +++ b/config/v2/driver/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: kfp-driver + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfp-driver +spec: + ports: + - name: http-metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/name: kfp-driver + app.kubernetes.io/part-of: kubeflow-pipeline diff --git a/config/v2/driver/serviceaccount.yaml b/config/v2/driver/serviceaccount.yaml new file mode 100644 index 000000000..76988053a --- /dev/null +++ b/config/v2/driver/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: kfp-driver + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kubeflow-pipeline + namespace: datasciencepipelinesapplications-controller + name: kfp-driver diff --git a/config/v2/exithandler/clusterrole.leaderelection.yaml b/config/v2/exithandler/clusterrole.leaderelection.yaml new file mode 100644 index 000000000..2d68dd2df --- /dev/null +++ b/config/v2/exithandler/clusterrole.leaderelection.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-leader-election +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..a681c899a --- /dev/null +++ b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml @@ -0,0 +1,66 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-cluster-access-clusterrole +rules: +- apiGroups: + - tekton.dev + resources: + - runs + - customruns + - taskruns + - pipelineruns + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - tekton.dev + resources: + - runs/status + - customruns/status + - taskruns/status + - pipelineruns/status + - runs/finalizers + - customruns/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - custom.tekton.dev + resources: + - exithandlers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml b/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml new file mode 100644 index 000000000..3e5643fbe --- /dev/null +++ b/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-tenant-access-clusterrole +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..1f3b550ff --- /dev/null +++ b/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-exithandler-controller-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-exithandler-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml b/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..efeac890b --- /dev/null +++ b/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-exithandler-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-exithandler-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml new file mode 100644 index 000000000..05af4d4bd --- /dev/null +++ b/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-tenant-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-exithandler-controller-tenant-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-exithandler-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/deployment.yaml b/config/v2/exithandler/controller/deployment.yaml new file mode 100644 index 000000000..af843e38d --- /dev/null +++ b/config/v2/exithandler/controller/deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfp-exithandler-controller +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: kfp-exithandler-controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-exithandler-controller:2.0.0 + name: kfp-exithandler-controller + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfp-exithandler-controller diff --git a/config/v2/exithandler/controller/kustomization.yaml b/config/v2/exithandler/controller/kustomization.yaml new file mode 100644 index 000000000..865426037 --- /dev/null +++ b/config/v2/exithandler/controller/kustomization.yaml @@ -0,0 +1,10 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrole.tenantaccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- clusterrolebinding.tenantaccess.yaml +- deployment.yaml +- role.yaml +- rolebinding.yaml +- serviceaccount.yaml \ No newline at end of file diff --git a/config/v2/exithandler/controller/role.yaml b/config/v2/exithandler/controller/role.yaml new file mode 100644 index 000000000..67d9cc2cd --- /dev/null +++ b/config/v2/exithandler/controller/role.yaml @@ -0,0 +1,37 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-leader-election + - config-logging + - config-observability + - object-store-config + - cache-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/exithandler/controller/rolebinding.yaml b/config/v2/exithandler/controller/rolebinding.yaml new file mode 100644 index 000000000..47958e2ce --- /dev/null +++ b/config/v2/exithandler/controller/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-controller-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfp-exithandler-controller-role +subjects: +- kind: ServiceAccount + name: kfp-exithandler-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/serviceaccount.yaml b/config/v2/exithandler/controller/serviceaccount.yaml new file mode 100644 index 000000000..bd82939e1 --- /dev/null +++ b/config/v2/exithandler/controller/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kfp-exithandler-controller diff --git a/config/v2/exithandler/crd.yaml b/config/v2/exithandler/crd.yaml new file mode 100644 index 000000000..da184975b --- /dev/null +++ b/config/v2/exithandler/crd.yaml @@ -0,0 +1,29 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + version: devel + name: exithandlers.custom.tekton.dev +spec: + group: custom.tekton.dev + names: + categories: + - tekton + - tekton-pipelines + - openshift-pipelines + kind: ExitHandler + plural: exithandlers + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} diff --git a/config/v2/exithandler/kustomization.yaml b/config/v2/exithandler/kustomization.yaml new file mode 100644 index 000000000..2fac3a648 --- /dev/null +++ b/config/v2/exithandler/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- crd.yaml +- clusterrole.leaderelection.yaml +- ./controller +- ./webhook \ No newline at end of file diff --git a/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml b/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..0e810e3e6 --- /dev/null +++ b/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml @@ -0,0 +1,78 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-webhook-cluster-access-clusterrole +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + - customresourcedefinitions/status + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - webhook.exithandler.custom.tekton.dev + resources: + - mutatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - validation.webhook.exithandler.custom.tekton.dev + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..ae1d4ca15 --- /dev/null +++ b/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-webhook-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfp-exithandler-webhook-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/webhook/deployment.yaml b/config/v2/exithandler/webhook/deployment.yaml new file mode 100644 index 000000000..5ed24e94b --- /dev/null +++ b/config/v2/exithandler/webhook/deployment.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfp-exithandler-webhook +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: WEBHOOK_SERVICE_NAME + value: kfp-exithandler-webhook + - name: WEBHOOK_SECRET_NAME + value: kfp-exithandler-webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-exithandler-webhook:2.0.0 + name: webhook + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + - containerPort: 8443 + name: https-webhook + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfp-exithandler-webhook diff --git a/config/v2/exithandler/webhook/kustomization.yaml b/config/v2/exithandler/webhook/kustomization.yaml new file mode 100644 index 000000000..1b432c759 --- /dev/null +++ b/config/v2/exithandler/webhook/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrolebinding.clusteraccess.yaml +- deployment.yaml +- mutatingwebhookconfig.yaml +- role.yaml +- rolebinding.yaml +- secret.yaml +- service.yaml +- serviceaccount.yaml +- validatingwebhookconfig.yaml \ No newline at end of file diff --git a/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml b/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml new file mode 100644 index 000000000..7d8679d1a --- /dev/null +++ b/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: webhook.exithandler.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: webhook.exithandler.custom.tekton.dev + sideEffects: None diff --git a/config/v2/exithandler/webhook/role.yaml b/config/v2/exithandler/webhook/role.yaml new file mode 100644 index 000000000..f7ef29288 --- /dev/null +++ b/config/v2/exithandler/webhook/role.yaml @@ -0,0 +1,53 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-webhook-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-logging + - config-observability + - config-leader-election + - object-store-config + - cache-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - kfp-exithandler-webhook-certs + resources: + - secrets + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/exithandler/webhook/rolebinding.yaml b/config/v2/exithandler/webhook/rolebinding.yaml new file mode 100644 index 000000000..757701663 --- /dev/null +++ b/config/v2/exithandler/webhook/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfp-exithandler-webhook-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfp-exithandler-webhook-role +subjects: +- kind: ServiceAccount + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/webhook/secret.yaml b/config/v2/exithandler/webhook/secret.yaml new file mode 100644 index 000000000..ae60d20fa --- /dev/null +++ b/config/v2/exithandler/webhook/secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: kfp-exithandler-webhook-certs diff --git a/config/v2/exithandler/webhook/service.yaml b/config/v2/exithandler/webhook/service.yaml new file mode 100644 index 000000000..437bcefa7 --- /dev/null +++ b/config/v2/exithandler/webhook/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton diff --git a/config/v2/exithandler/webhook/serviceaccount.yaml b/config/v2/exithandler/webhook/serviceaccount.yaml new file mode 100644 index 000000000..a5048a80a --- /dev/null +++ b/config/v2/exithandler/webhook/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kfp-exithandler-webhook diff --git a/config/v2/exithandler/webhook/validatingwebhookconfig.yaml b/config/v2/exithandler/webhook/validatingwebhookconfig.yaml new file mode 100644 index 000000000..c34a0b903 --- /dev/null +++ b/config/v2/exithandler/webhook/validatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: validation.webhook.exithandler.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: kfp-exithandler-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: validation.webhook.exithandler.custom.tekton.dev + sideEffects: None diff --git a/config/v2/kfptask/clusterrole.leaderelection.yaml b/config/v2/kfptask/clusterrole.leaderelection.yaml new file mode 100644 index 000000000..664e9c627 --- /dev/null +++ b/config/v2/kfptask/clusterrole.leaderelection.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-leader-election +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml b/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..0580fafa5 --- /dev/null +++ b/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml @@ -0,0 +1,66 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-cluster-access-clusterrole +rules: +- apiGroups: + - tekton.dev + resources: + - runs + - customruns + - taskruns + - pipelineruns + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - tekton.dev + resources: + - runs/status + - customruns/status + - taskruns/status + - pipelineruns/status + - runs/finalizers + - customruns/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - custom.tekton.dev + resources: + - kfptasks + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml b/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml new file mode 100644 index 000000000..11576abd2 --- /dev/null +++ b/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-tenant-access-clusterrole +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..88108183b --- /dev/null +++ b/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-controller-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml b/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..fa63b846e --- /dev/null +++ b/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml new file mode 100644 index 000000000..4123d161e --- /dev/null +++ b/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-tenant-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-controller-tenant-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/deployment.yaml b/config/v2/kfptask/controller/deployment.yaml new file mode 100644 index 000000000..e3c3eee33 --- /dev/null +++ b/config/v2/kfptask/controller/deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfptask-controller +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: kfptask-controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-kfptask-controller:2.0.0 + name: kfptask-controller + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfptask-controller diff --git a/config/v2/kfptask/controller/kustomization.yaml b/config/v2/kfptask/controller/kustomization.yaml new file mode 100644 index 000000000..54449bfd8 --- /dev/null +++ b/config/v2/kfptask/controller/kustomization.yaml @@ -0,0 +1,10 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrole.tenantaccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- clusterrolebinding.tenantaccess.yaml +- deployment.yaml +- role.yaml +- rolebinding.yaml +- serviceaccount.yaml diff --git a/config/v2/kfptask/controller/role.yaml b/config/v2/kfptask/controller/role.yaml new file mode 100644 index 000000000..e7d6964e3 --- /dev/null +++ b/config/v2/kfptask/controller/role.yaml @@ -0,0 +1,38 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-leader-election + - config-logging + - config-observability + - object-store-config + - cache-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/kfptask/controller/rolebinding.yaml b/config/v2/kfptask/controller/rolebinding.yaml new file mode 100644 index 000000000..1fc2d1047 --- /dev/null +++ b/config/v2/kfptask/controller/rolebinding.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-controller-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfptask-controller-role +subjects: +- kind: ServiceAccount + name: kfptask-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/serviceaccount.yaml b/config/v2/kfptask/controller/serviceaccount.yaml new file mode 100644 index 000000000..642dbef0b --- /dev/null +++ b/config/v2/kfptask/controller/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kfptask-controller diff --git a/config/v2/kfptask/crd.yaml b/config/v2/kfptask/crd.yaml new file mode 100644 index 000000000..a3ec9de60 --- /dev/null +++ b/config/v2/kfptask/crd.yaml @@ -0,0 +1,29 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + version: devel + name: kfptasks.custom.tekton.dev +spec: + group: custom.tekton.dev + names: + categories: + - tekton + - tekton-pipelines + - openshift-pipelines + kind: KfpTask + plural: kfptasks + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} diff --git a/config/v2/kfptask/kustomization.yaml b/config/v2/kfptask/kustomization.yaml new file mode 100644 index 000000000..2fac3a648 --- /dev/null +++ b/config/v2/kfptask/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- crd.yaml +- clusterrole.leaderelection.yaml +- ./controller +- ./webhook \ No newline at end of file diff --git a/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..c4e2177b6 --- /dev/null +++ b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml @@ -0,0 +1,78 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-cluster-access-clusterrole +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + - customresourcedefinitions/status + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - webhook.kfptask.custom.tekton.dev + resources: + - mutatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - validation.webhook.kfptask.custom.tekton.dev + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..bfd617465 --- /dev/null +++ b/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-webhook-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml b/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..56bfcd42f --- /dev/null +++ b/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kfptask-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/deployment.yaml b/config/v2/kfptask/webhook/deployment.yaml new file mode 100644 index 000000000..48bd44617 --- /dev/null +++ b/config/v2/kfptask/webhook/deployment.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfptask-webhook +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: WEBHOOK_SERVICE_NAME + value: kfptask-webhook + - name: WEBHOOK_SECRET_NAME + value: kfptask-webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-kfptask-webhook:2.0.0 + name: webhook + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + - containerPort: 8443 + name: https-webhook + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: kfptask-webhook diff --git a/config/v2/kfptask/webhook/kustomization.yaml b/config/v2/kfptask/webhook/kustomization.yaml new file mode 100644 index 000000000..6692ef450 --- /dev/null +++ b/config/v2/kfptask/webhook/kustomization.yaml @@ -0,0 +1,12 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- deployment.yaml +- mutatingwebhookconfig.yaml +- role.yaml +- rolebinding.yaml +- secret.yaml +- service.yaml +- serviceaccount.yaml +- validatingwebhookconfig.yaml diff --git a/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml b/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml new file mode 100644 index 000000000..8b494fcba --- /dev/null +++ b/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: webhook.kfptask.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: webhook.kfptask.custom.tekton.dev + sideEffects: None diff --git a/config/v2/kfptask/webhook/role.yaml b/config/v2/kfptask/webhook/role.yaml new file mode 100644 index 000000000..c81cfc18e --- /dev/null +++ b/config/v2/kfptask/webhook/role.yaml @@ -0,0 +1,53 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-logging + - config-observability + - config-leader-election + - object-store-config + - cache-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - kfptask-webhook-certs + resources: + - secrets + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/kfptask/webhook/rolebinding.yaml b/config/v2/kfptask/webhook/rolebinding.yaml new file mode 100644 index 000000000..e3d798921 --- /dev/null +++ b/config/v2/kfptask/webhook/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + name: kfptask-webhook-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kfptask-webhook-role +subjects: +- kind: ServiceAccount + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/secret.yaml b/config/v2/kfptask/webhook/secret.yaml new file mode 100644 index 000000000..6387033ce --- /dev/null +++ b/config/v2/kfptask/webhook/secret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: kfptask-webhook-certs diff --git a/config/v2/kfptask/webhook/service.yaml b/config/v2/kfptask/webhook/service.yaml new file mode 100644 index 000000000..7f1b02e34 --- /dev/null +++ b/config/v2/kfptask/webhook/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: kfp-tekton diff --git a/config/v2/kfptask/webhook/serviceaccount.yaml b/config/v2/kfptask/webhook/serviceaccount.yaml new file mode 100644 index 000000000..27ffcfd45 --- /dev/null +++ b/config/v2/kfptask/webhook/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + namespace: datasciencepipelinesapplications-controller + name: kfptask-webhook diff --git a/config/v2/kfptask/webhook/validatingwebhookconfig.yaml b/config/v2/kfptask/webhook/validatingwebhookconfig.yaml new file mode 100644 index 000000000..a50ee1e25 --- /dev/null +++ b/config/v2/kfptask/webhook/validatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: validation.webhook.kfptask.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: kfptask-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: validation.webhook.kfptask.custom.tekton.dev + sideEffects: None diff --git a/config/v2/kustomization.yaml b/config/v2/kustomization.yaml new file mode 100644 index 000000000..227f61350 --- /dev/null +++ b/config/v2/kustomization.yaml @@ -0,0 +1,21 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: foo-pipelines +namePrefix: data-science-pipelines-operator- + +# namePrefix: data-science-pipelines-operator- +# configMapGenerator: +# - envs: +# - params.env +# name: dspo-parameters + +resources: +- ./cache +- ./driver +- ./exithandler +- ./kfptask +- ./pipelineloop +- ./tektoncrds + +# configurations: +# - params.yaml diff --git a/config/v2/params.env b/config/v2/params.env new file mode 100644 index 000000000..3f02c0849 --- /dev/null +++ b/config/v2/params.env @@ -0,0 +1,14 @@ +IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server:latest +IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:latest +IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent:latest +IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow:latest +IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy:latest +IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc:latest +IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer:latest +IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator:latest +IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.8 +IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.8 +IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 +IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 +IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 +IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 diff --git a/config/v2/params.yaml b/config/v2/params.yaml new file mode 100644 index 000000000..28beccc8c --- /dev/null +++ b/config/v2/params.yaml @@ -0,0 +1,7 @@ +varReference: +- path: data + kind: ConfigMap +- path: spec/template/spec/containers/env/value + kind: Deployment +- path: spec/template/spec/containers/image + kind: Deployment diff --git a/config/v2/pipelineloop/clusterrole.leaderelection.yaml b/config/v2/pipelineloop/clusterrole.leaderelection.yaml new file mode 100644 index 000000000..f5ba9f9e9 --- /dev/null +++ b/config/v2/pipelineloop/clusterrole.leaderelection.yaml @@ -0,0 +1,20 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-leader-election +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml b/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..d53b3bd25 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml @@ -0,0 +1,66 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-cluster-access-clusterrole +rules: +- apiGroups: + - tekton.dev + resources: + - runs + - customruns + - taskruns + - pipelineruns + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - tekton.dev + resources: + - runs/status + - customruns/status + - taskruns/status + - pipelineruns/status + - runs/finalizers + - customruns/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - custom.tekton.dev + resources: + - pipelineloops + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml b/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml new file mode 100644 index 000000000..f4c3d6c61 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml @@ -0,0 +1,21 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-tenant-access-clusterrole +rules: +- apiGroups: + - "" + resources: + - events + verbs: + - get + - list + - create + - update + - delete + - patch + - watch diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..b8ff40533 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-controller-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..b4dced872 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml new file mode 100644 index 000000000..0aa6b29b6 --- /dev/null +++ b/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-tenant-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-controller-tenant-access-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/deployment.yaml b/config/v2/pipelineloop/controller/deployment.yaml new file mode 100644 index 000000000..933bbe0b6 --- /dev/null +++ b/config/v2/pipelineloop/controller/deployment.yaml @@ -0,0 +1,60 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: tekton-pipelineloop-controller +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: tekton-pipeline-loops + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: tekton-pipelineloop-controller + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/name: controller + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-pipelineloop-controller:2.0.0 + name: tekton-pipelineloop-controller + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: tekton-pipelineloop-controller diff --git a/config/v2/pipelineloop/controller/kustomization.yaml b/config/v2/pipelineloop/controller/kustomization.yaml new file mode 100644 index 000000000..0824469d5 --- /dev/null +++ b/config/v2/pipelineloop/controller/kustomization.yaml @@ -0,0 +1,11 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrole.tenantaccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- clusterrolebinding.tenantaccess.yaml +- deployment.yaml +- role.yaml +- rolebinding.yaml +- serviceaccount.yaml + diff --git a/config/v2/pipelineloop/controller/role.yaml b/config/v2/pipelineloop/controller/role.yaml new file mode 100644 index 000000000..cb3c3e9a5 --- /dev/null +++ b/config/v2/pipelineloop/controller/role.yaml @@ -0,0 +1,36 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-leader-election + - config-logging + - config-observability + - object-store-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/pipelineloop/controller/rolebinding.yaml b/config/v2/pipelineloop/controller/rolebinding.yaml new file mode 100644 index 000000000..4e26f6d24 --- /dev/null +++ b/config/v2/pipelineloop/controller/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-controller-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: tekton-pipelineloop-controller-role +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-controller + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/serviceaccount.yaml b/config/v2/pipelineloop/controller/serviceaccount.yaml new file mode 100644 index 000000000..5b1bafc59 --- /dev/null +++ b/config/v2/pipelineloop/controller/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/name: data-science-pipelines-operator + namespace: datasciencepipelinesapplications-controller + name: tekton-pipelineloop-controller diff --git a/config/v2/pipelineloop/crd.yaml b/config/v2/pipelineloop/crd.yaml new file mode 100644 index 000000000..860c6a7b7 --- /dev/null +++ b/config/v2/pipelineloop/crd.yaml @@ -0,0 +1,29 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + pipeline.tekton.dev/release: devel + version: devel + name: pipelineloops.custom.tekton.dev +spec: + group: custom.tekton.dev + names: + categories: + - tekton + - tekton-pipelines + - openshift-pipelines + kind: PipelineLoop + plural: pipelineloops + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} diff --git a/config/v2/pipelineloop/kustomization.yaml b/config/v2/pipelineloop/kustomization.yaml new file mode 100644 index 000000000..2fac3a648 --- /dev/null +++ b/config/v2/pipelineloop/kustomization.yaml @@ -0,0 +1,5 @@ +resources: +- crd.yaml +- clusterrole.leaderelection.yaml +- ./controller +- ./webhook \ No newline at end of file diff --git a/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml b/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml new file mode 100644 index 000000000..f3b5f165d --- /dev/null +++ b/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml @@ -0,0 +1,78 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-cluster-access-clusterrole +rules: +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + - customresourcedefinitions/status + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - list + - update + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + - validatingwebhookconfigurations + verbs: + - list + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - webhook.pipelineloop.custom.tekton.dev + resources: + - mutatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - apps + resources: + - deployments + - deployments/finalizers + verbs: + - get + - list + - create + - update + - delete + - patch + - watch +- apiGroups: + - admissionregistration.k8s.io + resourceNames: + - validation.webhook.pipelineloop.custom.tekton.dev + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml new file mode 100644 index 000000000..9bab6fc0f --- /dev/null +++ b/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-cluster-access-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-webhook-cluster-access-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml b/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml new file mode 100644 index 000000000..72ccc7792 --- /dev/null +++ b/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-leaderelection-clusterrolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: tekton-pipelineloop-leader-election-clusterrole +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/deployment.yaml b/config/v2/pipelineloop/webhook/deployment.yaml new file mode 100644 index 000000000..cc61bbcca --- /dev/null +++ b/config/v2/pipelineloop/webhook/deployment.yaml @@ -0,0 +1,71 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: tekton-pipelineloop-webhook +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "false" + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + spec: + containers: + - env: + - name: SYSTEM_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: CONFIG_LOGGING_NAME + value: config-logging + - name: CONFIG_OBSERVABILITY_NAME + value: config-observability + - name: CONFIG_LEADERELECTION_NAME + value: config-leader-election + - name: WEBHOOK_SERVICE_NAME + value: tekton-pipelineloop-webhook + - name: WEBHOOK_SECRET_NAME + value: tekton-pipelineloop-webhook-certs + - name: METRICS_DOMAIN + value: tekton.dev/pipeline + image: quay.io/internaldatahub/tekton-pipelineloop-webhook:2.0.0 + name: webhook + ports: + - containerPort: 9090 + name: metrics + - containerPort: 8008 + name: profiling + - containerPort: 8443 + name: https-webhook + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + runAsGroup: 65532 + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + serviceAccountName: tekton-pipelineloop-webhook diff --git a/config/v2/pipelineloop/webhook/kustomization.yaml b/config/v2/pipelineloop/webhook/kustomization.yaml new file mode 100644 index 000000000..519a9540f --- /dev/null +++ b/config/v2/pipelineloop/webhook/kustomization.yaml @@ -0,0 +1,12 @@ +resources: +- clusterrole.clusteraccess.yaml +- clusterrolebinding.clusteraccess.yaml +- clusterrolebinding.leaderelection.yaml +- deployment.yaml +- mutatingwebhookconfig.yaml +- role.yaml +- rolebinding.yaml +- service.yaml +- serviceaccount.yaml +- validatingwebhookconfig.yaml + diff --git a/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml b/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml new file mode 100644 index 000000000..bb8faf3d3 --- /dev/null +++ b/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + pipeline.tekton.dev/release: devel + name: webhook.pipelineloop.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: webhook.pipelineloop.custom.tekton.dev + sideEffects: None diff --git a/config/v2/pipelineloop/webhook/role.yaml b/config/v2/pipelineloop/webhook/role.yaml new file mode 100644 index 000000000..7c0cad973 --- /dev/null +++ b/config/v2/pipelineloop/webhook/role.yaml @@ -0,0 +1,52 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - config-logging + - config-observability + - config-leader-election + - object-store-config + resources: + - configmaps + verbs: + - get +- apiGroups: + - "" + resources: + - secrets + verbs: + - list + - watch +- apiGroups: + - "" + resourceNames: + - tekton-pipelineloop-webhook-certs + resources: + - secrets + verbs: + - get + - update +- apiGroups: + - policy + resourceNames: + - tekton-pipelines + - openshift-pipelines + resources: + - podsecuritypolicies + verbs: + - use diff --git a/config/v2/pipelineloop/webhook/rolebinding.yaml b/config/v2/pipelineloop/webhook/rolebinding.yaml new file mode 100644 index 000000000..f47f37f90 --- /dev/null +++ b/config/v2/pipelineloop/webhook/rolebinding.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + name: tekton-pipelineloop-webhook-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: tekton-pipelineloop-webhook-role +subjects: +- kind: ServiceAccount + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/service.yaml b/config/v2/pipelineloop/webhook/service.yaml new file mode 100644 index 000000000..b8f09a7b1 --- /dev/null +++ b/config/v2/pipelineloop/webhook/service.yaml @@ -0,0 +1,30 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: tekton-pipelines-webhook + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/version: devel + pipeline.tekton.dev/release: devel + version: devel + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller +spec: + ports: + - name: http-metrics + port: 9090 + targetPort: 9090 + - name: http-profiling + port: 8008 + targetPort: 8008 + - name: https-webhook + port: 443 + targetPort: 8443 + selector: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/name: webhook + app.kubernetes.io/part-of: tekton-pipeline-loops diff --git a/config/v2/pipelineloop/webhook/serviceaccount.yaml b/config/v2/pipelineloop/webhook/serviceaccount.yaml new file mode 100644 index 000000000..2bd0bdddf --- /dev/null +++ b/config/v2/pipelineloop/webhook/serviceaccount.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + app.kubernetes.io/name: data-science-pipelines-operator + namespace: datasciencepipelinesapplications-controller + name: tekton-pipelineloop-webhook diff --git a/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml b/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml new file mode 100644 index 000000000..063ee7056 --- /dev/null +++ b/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml @@ -0,0 +1,19 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + pipeline.tekton.dev/release: devel + name: validation.webhook.pipelineloop.custom.tekton.dev +webhooks: +- admissionReviewVersions: + - v1beta1 + clientConfig: + service: + name: tekton-pipelineloop-webhook + namespace: datasciencepipelinesapplications-controller + failurePolicy: Fail + name: validation.webhook.pipelineloop.custom.tekton.dev + sideEffects: None diff --git a/config/v2/tektoncrds/crd.yaml b/config/v2/tektoncrds/crd.yaml new file mode 100644 index 000000000..155c675a3 --- /dev/null +++ b/config/v2/tektoncrds/crd.yaml @@ -0,0 +1,28 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipeline-loops + pipeline.tekton.dev/release: devel + version: devel + name: breaktasks.custom.tekton.dev +spec: + group: custom.tekton.dev + names: + categories: + - tekton + - tekton-pipelines + kind: BreakTask + plural: breaktasks + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} diff --git a/config/v2/tektoncrds/kustomization.yaml b/config/v2/tektoncrds/kustomization.yaml new file mode 100644 index 000000000..2a8fa4333 --- /dev/null +++ b/config/v2/tektoncrds/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- crd.yaml +- scc.anyuid.yaml +- scc.privileged.yaml diff --git a/config/v2/tektoncrds/scc.anyuid.yaml b/config/v2/tektoncrds/scc.anyuid.yaml new file mode 100644 index 000000000..a25c7e939 --- /dev/null +++ b/config/v2/tektoncrds/scc.anyuid.yaml @@ -0,0 +1,61 @@ +allowHostDirVolumePlugin: false +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: null +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: null +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: kubeflow-anyuid provides all features of the restricted + SCC but allows users to run with any UID and any GID. + name: kubeflow-anyuid-kfp-tekton +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +- system:serviceaccount:kubeflow:metadatadb +- system:serviceaccount:kubeflow:minio +- system:serviceaccount:kubeflow:default +- system:serviceaccount:kubeflow:pipeline-runner +- system:serviceaccount:kubeflow:kubeflow-pipelines-cache +- system:serviceaccount:kubeflow:kubeflow-pipelines-cache-deployer-sa +- system:serviceaccount:kubeflow:metadata-grpc-server +- system:serviceaccount:kubeflow:kubeflow-pipelines-metadata-writer +- system:serviceaccount:kubeflow:ml-pipeline +- system:serviceaccount:kubeflow:ml-pipeline-persistenceagent +- system:serviceaccount:kubeflow:ml-pipeline-scheduledworkflow +- system:serviceaccount:kubeflow:ml-pipeline-ui +- system:serviceaccount:kubeflow:ml-pipeline-viewer-crd-service-account +- system:serviceaccount:kubeflow:ml-pipeline-visualizationserver +- system:serviceaccount:kubeflow:mysql +- system:serviceaccount:kubeflow:kfp-csi-s3 +- system:serviceaccount:kubeflow:kfp-csi-attacher +- system:serviceaccount:kubeflow:kfp-csi-provisioner +- system:serviceaccount:openshift-pipelines:kfp-driver +- system:serviceaccount:openshift-pipelines:kfp-exithandler-controller +- system:serviceaccount:openshift-pipelines:kfp-exithandler-webhook +- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-controller +- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-webhook +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret diff --git a/config/v2/tektoncrds/scc.privileged.yaml b/config/v2/tektoncrds/scc.privileged.yaml new file mode 100644 index 000000000..eafc24ea2 --- /dev/null +++ b/config/v2/tektoncrds/scc.privileged.yaml @@ -0,0 +1,62 @@ +allowHostDirVolumePlugin: true +allowHostIPC: false +allowHostNetwork: false +allowHostPID: false +allowHostPorts: false +allowPrivilegeEscalation: true +allowPrivilegedContainer: true +allowedCapabilities: null +apiVersion: security.openshift.io/v1 +defaultAddCapabilities: null +fsGroup: + type: RunAsAny +groups: +- system:cluster-admins +kind: SecurityContextConstraints +metadata: + annotations: + kubernetes.io/description: kubeflow-anyuid provides all features of the restricted + SCC but allows users to run with any UID and any GID. + name: kubeflow-privileged-kfp-tekton +priority: 10 +readOnlyRootFilesystem: false +requiredDropCapabilities: +- MKNOD +runAsUser: + type: RunAsAny +seLinuxContext: + type: MustRunAs +supplementalGroups: + type: RunAsAny +users: +- system:serviceaccount:kubeflow:metadatadb +- system:serviceaccount:kubeflow:minio +- system:serviceaccount:kubeflow:default +- system:serviceaccount:kubeflow:pipeline-runner +- system:serviceaccount:kubeflow:kubeflow-pipelines-cache +- system:serviceaccount:kubeflow:kubeflow-pipelines-cache-deployer-sa +- system:serviceaccount:kubeflow:metadata-grpc-server +- system:serviceaccount:kubeflow:kubeflow-pipelines-metadata-writer +- system:serviceaccount:kubeflow:ml-pipeline +- system:serviceaccount:kubeflow:ml-pipeline-persistenceagent +- system:serviceaccount:kubeflow:ml-pipeline-scheduledworkflow +- system:serviceaccount:kubeflow:ml-pipeline-ui +- system:serviceaccount:kubeflow:ml-pipeline-viewer-crd-service-account +- system:serviceaccount:kubeflow:ml-pipeline-visualizationserver +- system:serviceaccount:kubeflow:mysql +- system:serviceaccount:kubeflow:kfp-csi-s3 +- system:serviceaccount:kubeflow:kfp-csi-attacher +- system:serviceaccount:kubeflow:kfp-csi-provisioner +- system:serviceaccount:openshift-pipelines:kfp-driver +- system:serviceaccount:openshift-pipelines:kfp-exithandler-controller +- system:serviceaccount:openshift-pipelines:kfp-exithandler-webhook +- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-controller +- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-webhook +volumes: +- configMap +- downwardAPI +- emptyDir +- persistentVolumeClaim +- projected +- secret +- hostPath From 3892ba57a51ca55985521446ea139d20d7c1aa89 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 26 Sep 2023 03:42:18 -0400 Subject: [PATCH 28/85] WIP: Implement v2deploy make command --- Makefile | 8 ++++++++ config/overlays/make-v2deploy/kustomization.yaml | 5 +++++ 2 files changed, 13 insertions(+) create mode 100644 config/overlays/make-v2deploy/kustomization.yaml diff --git a/Makefile b/Makefile index 7ab88e250..bb57ab03c 100644 --- a/Makefile +++ b/Makefile @@ -52,6 +52,8 @@ IMG ?= quay.io/opendatahub/data-science-pipelines-operator:main ENVTEST_K8S_VERSION = 1.25.0 # Namespace to deploy the operator OPERATOR_NS ?= odh-applications +# Namespace to deploy v2 infrastructure +V2INFRA_NS ?= openshift-pipelines # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -159,6 +161,12 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi cd config/overlays/make-deploy && $(KUSTOMIZE) edit set namespace ${OPERATOR_NS} $(KUSTOMIZE) build config/overlays/make-deploy | kubectl delete --ignore-not-found=$(ignore-not-found) -f - +.PHONY: v2deploy +v2deploy: manifests kustomize + cd config/overlays/make-v2deploy \ + && $(KUSTOMIZE) edit set namespace ${V2INFRA_NS} + $(KUSTOMIZE) build config/overlays/make-v2deploy | kubectl apply -f - + ##@ Build Dependencies ## Location to install dependencies to diff --git a/config/overlays/make-v2deploy/kustomization.yaml b/config/overlays/make-v2deploy/kustomization.yaml new file mode 100644 index 000000000..6d2e0a3ca --- /dev/null +++ b/config/overlays/make-v2deploy/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: openshift-pipelines +resources: +- ../../v2 From e21369a258bf14ec0f21d980a659a17a12787ef5 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 27 Sep 2023 18:28:14 -0400 Subject: [PATCH 29/85] Implement DSPVersion item in DSPA CRD --- api/v1alpha1/dspipeline_types.go | 3 + config/base/kustomization.yaml | 64 +++++++++++++++++++ config/base/params.env | 9 +++ config/configmaps/files/config.yaml | 12 +++- ...b.io_datasciencepipelinesapplications.yaml | 3 + config/manager/manager.yaml | 18 ++++++ controllers/config/defaults.go | 16 ++++- controllers/dspipeline_params.go | 56 ++++++++++++---- kfdef/kfdef.yaml | 19 ++++++ 9 files changed, 186 insertions(+), 14 deletions(-) diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index 1525b95b7..5e6496a60 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -49,6 +49,9 @@ type DSPASpec struct { // +kubebuilder:validation:Optional // +kubebuilder:default:={deploy: false} *VisualizationServer `json:"visualizationServer"` + // +kubebuilder:validation:Optional + // +kubebuilder:default:="v1" + DSPVersion string `json:"dspVersion,omitempty"` } type APIServer struct { diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index d36804d32..9bd9860f5 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -120,5 +120,69 @@ vars: apiVersion: v1 fieldref: fieldpath: data.ZAP_LOG_LEVEL + - name: IMAGESV2_APISERVER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_APISERVER + - name: IMAGESV2_ARTIFACT + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_ARTIFACT + - name: IMAGESV2_PERSISTENTAGENT + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_PERSISTENTAGENT + - name: IMAGESV2_SCHEDULEDWORKFLOW + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_SCHEDULEDWORKFLOW + - name: IMAGESV2_CACHE + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_CACHE + - name: IMAGESV2_MOVERESULTSIMAGE + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_MOVERESULTSIMAGE + - name: IMAGESV2_MLMDENVOY + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_MLMDENVOY + - name: IMAGESV2_MLMDGRPC + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_MLMDGRPC + - name: IMAGESV2_MLMDWRITER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_MLMDWRITER +>>>>>>> 94adfa6 (Implement DSPVersion item in DSPA CRD) configurations: - params.yaml diff --git a/config/base/params.env b/config/base/params.env index dd7f56925..07ae6176c 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -12,4 +12,13 @@ IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 +IMAGESV2_APISERVER=quay.io/rmartine/apiserver:v2 +IMAGESV2_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:main +IMAGESV2_PERSISTENTAGENT=quay.io/rmartine/persistenceagent-dev:6b8723529 +IMAGESV2_SCHEDULEDWORKFLOW=quay.io/rmartine/swf-dev:6b8723529 +IMAGESV2_MLMDENVOY=gcr.io/ml-pipeline/metadata-envoy:2.0.0-rc.2 +IMAGESV2_MLMDGRPC=gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 +IMAGESV2_MLMDWRITER=gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 +IMAGESV2_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.7 +IMAGESV2_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.7 ZAP_LOG_LEVEL=info diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index 41319834a..f246d7dc6 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -11,4 +11,14 @@ Images: MlmdGRPC: $(IMAGES_MLMDGRPC) MlmdWriter: $(IMAGES_MLMDWRITER) CRDViewer: $(IMAGES_CRDVIEWER) - VisualizationServer: $(IMAGES_VISUALIZATIONSERVER) \ No newline at end of file + VisualizationServer: $(IMAGES_VISUALIZATIONSERVER) +ImagesV2: + ApiServer: $(IMAGESV2_APISERVER) + Artifact: $(IMAGESV2_ARTIFACT) + Cache: $(IMAGESV2_CACHE) + MoveResultsImage: $(IMAGESV2_MOVERESULTSIMAGE) + PersistentAgent: $(IMAGESV2_PERSISTENTAGENT) + ScheduledWorkflow: $(IMAGESV2_SCHEDULEDWORKFLOW) + MlmdEnvoy: $(IMAGESV2_MLMDENVOY) + MlmdGRPC: $(IMAGESV2_MLMDGRPC) + MlmdWriter: $(IMAGESV2_MLMDWRITER) \ No newline at end of file diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 0060ca717..eea98ada9 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -280,6 +280,9 @@ spec: type: string type: object type: object + dspVersion: + default: v1 + type: string mlmd: default: deploy: false diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index d643464f6..74ec58643 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -61,6 +61,24 @@ spec: value: $(IMAGES_CRDVIEWER) - name: IMAGES_VISUALIZATIONSERVER value: $(IMAGES_VISUALIZATIONSERVER) + - name: IMAGESV2_APISERVER + value: $(IMAGESV2_APISERVER) + - name: IMAGESV2_ARTIFACT + value: $(IMAGESV2_ARTIFACT) + - name: IMAGESV2_PERSISTENTAGENT + value: $(IMAGESV2_PERSISTENTAGENT) + - name: IMAGESV2_SCHEDULEDWORKFLOW + value: $(IMAGESV2_SCHEDULEDWORKFLOW) + - name: IMAGESV2_CACHE + value: $(IMAGESV2_CACHE) + - name: IMAGESV2_MOVERESULTSIMAGE + value: $(IMAGESV2_MOVERESULTSIMAGE) + - name: IMAGESV2_MLMDENVOY + value: $(IMAGESV2_MLMDENVOY) + - name: IMAGESV2_MLMDGRPC + value: $(IMAGESV2_MLMDGRPC) + - name: IMAGESV2_MLMDWRITER + value: $(IMAGESV2_MLMDWRITER) - name: ZAP_LOG_LEVEL value: $(ZAP_LOG_LEVEL) securityContext: diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index 4abfdb23a..d909c160b 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -17,10 +17,11 @@ limitations under the License. package config import ( + "time" + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" "github.com/spf13/viper" "k8s.io/apimachinery/pkg/api/resource" - "time" ) const ( @@ -68,6 +69,19 @@ const ( MlmdWriterImagePath = "Images.MlmdWriter" ) +// DSPV2 Image Paths +const ( + APIServerImagePathV2 = "ImagesV2.ApiServer" + APIServerArtifactImagePathV2 = "ImagesV2.Artifact" + APIServerCacheImagePathV2 = "ImagesV2.Cache" + APIServerMoveResultsImagePathV2 = "ImagesV2.MoveResultsImage" + PersistenceAgentImagePathV2 = "ImagesV2.PersistentAgent" + ScheduledWorkflowImagePathV2 = "ImagesV2.ScheduledWorkflow" + MlmdEnvoyImagePathV2 = "ImagesV2.MlmdEnvoy" + MlmdGRPCImagePathV2 = "ImagesV2.MlmdGRPC" + MlmdWriterImagePathV2 = "ImagesV2.MlmdWriter" +) + // DSPA Status Condition Types const ( DatabaseAvailable = "DatabaseAvailable" diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index d19582253..e6be6229a 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -75,6 +75,10 @@ type ObjectStorageConnection struct { SecretAccessKey string } +func (p *DSPAParams) UsingV2Pipelines(dsp *dspa.DataSciencePipelinesApplication) bool { + return dsp.Spec.DSPVersion == "v2" +} + // UsingExternalDB will return true if an external Database is specified in the CR, otherwise false. func (p *DSPAParams) UsingExternalDB(dsp *dspa.DataSciencePipelinesApplication) bool { if dsp.Spec.Database != nil && dsp.Spec.Database.ExternalDB != nil { @@ -379,25 +383,33 @@ func (p *DSPAParams) SetupObjectParams(ctx context.Context, dsp *dspa.DataScienc func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelinesApplication, client client.Client, log logr.Logger) error { if p.MLMD != nil { + MlmdEnvoyImagePath := config.MlmdEnvoyImagePath + MlmdGRPCImagePath := config.MlmdGRPCImagePath + MlmdWriterImagePath := config.MlmdWriterImagePath + if p.UsingV2Pipelines(dsp) { + MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2 + MlmdGRPCImagePath = config.MlmdGRPCImagePathV2 + MlmdWriterImagePath = config.MlmdWriterImagePathV2 + } if p.MLMD.Envoy == nil { p.MLMD.Envoy = &dspa.Envoy{ - Image: config.GetStringConfigWithDefault(config.MlmdEnvoyImagePath, config.DefaultImageValue), + Image: config.GetStringConfigWithDefault(MlmdEnvoyImagePath, config.DefaultImageValue), } } if p.MLMD.GRPC == nil { p.MLMD.GRPC = &dspa.GRPC{ - Image: config.GetStringConfigWithDefault(config.MlmdGRPCImagePath, config.DefaultImageValue), + Image: config.GetStringConfigWithDefault(MlmdGRPCImagePath, config.DefaultImageValue), } } if p.MLMD.Writer == nil { p.MLMD.Writer = &dspa.Writer{ - Image: config.GetStringConfigWithDefault(config.MlmdWriterImagePath, config.DefaultImageValue), + Image: config.GetStringConfigWithDefault(MlmdWriterImagePath, config.DefaultImageValue), } } - mlmdEnvoyImageFromConfig := config.GetStringConfigWithDefault(config.MlmdEnvoyImagePath, config.DefaultImageValue) - mlmdGRPCImageFromConfig := config.GetStringConfigWithDefault(config.MlmdGRPCImagePath, config.DefaultImageValue) - mlmdWriterImageFromConfig := config.GetStringConfigWithDefault(config.MlmdWriterImagePath, config.DefaultImageValue) + mlmdEnvoyImageFromConfig := config.GetStringConfigWithDefault(MlmdEnvoyImagePath, config.DefaultImageValue) + mlmdGRPCImageFromConfig := config.GetStringConfigWithDefault(MlmdGRPCImagePath, config.DefaultImageValue) + mlmdWriterImageFromConfig := config.GetStringConfigWithDefault(MlmdWriterImagePath, config.DefaultImageValue) setStringDefault(mlmdEnvoyImageFromConfig, &p.MLMD.Envoy.Image) setStringDefault(mlmdGRPCImageFromConfig, &p.MLMD.GRPC.Image) @@ -440,12 +452,24 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip // TODO: If p. is nil we should create defaults + pipelinesV2Images := p.UsingV2Pipelines(dsp) + if p.APIServer != nil { + APIServerImagePath := config.APIServerImagePath + APIServerArtifactImagePath := config.APIServerArtifactImagePath + APIServerCacheImagePath := config.APIServerCacheImagePath + APIServerMoveResultsImagePath := config.APIServerMoveResultsImagePath + if pipelinesV2Images { + APIServerImagePath = config.APIServerImagePathV2 + APIServerArtifactImagePath = config.APIServerArtifactImagePathV2 + APIServerCacheImagePath = config.APIServerCacheImagePathV2 + APIServerMoveResultsImagePath = config.APIServerMoveResultsImagePathV2 + } - serverImageFromConfig := config.GetStringConfigWithDefault(config.APIServerImagePath, config.DefaultImageValue) - artifactImageFromConfig := config.GetStringConfigWithDefault(config.APIServerArtifactImagePath, config.DefaultImageValue) - cacheImageFromConfig := config.GetStringConfigWithDefault(config.APIServerCacheImagePath, config.DefaultImageValue) - moveResultsImageFromConfig := config.GetStringConfigWithDefault(config.APIServerMoveResultsImagePath, config.DefaultImageValue) + serverImageFromConfig := config.GetStringConfigWithDefault(APIServerImagePath, config.DefaultImageValue) + artifactImageFromConfig := config.GetStringConfigWithDefault(APIServerArtifactImagePath, config.DefaultImageValue) + cacheImageFromConfig := config.GetStringConfigWithDefault(APIServerCacheImagePath, config.DefaultImageValue) + moveResultsImageFromConfig := config.GetStringConfigWithDefault(APIServerMoveResultsImagePath, config.DefaultImageValue) setStringDefault(serverImageFromConfig, &p.APIServer.Image) setStringDefault(artifactImageFromConfig, &p.APIServer.ArtifactImage) @@ -462,12 +486,20 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip } } if p.PersistenceAgent != nil { - persistenceAgentImageFromConfig := config.GetStringConfigWithDefault(config.PersistenceAgentImagePath, config.DefaultImageValue) + PersistenceAgentImagePath := config.PersistenceAgentImagePath + if pipelinesV2Images { + PersistenceAgentImagePath = config.PersistenceAgentImagePathV2 + } + persistenceAgentImageFromConfig := config.GetStringConfigWithDefault(PersistenceAgentImagePath, config.DefaultImageValue) setStringDefault(persistenceAgentImageFromConfig, &p.PersistenceAgent.Image) setResourcesDefault(config.PersistenceAgentResourceRequirements, &p.PersistenceAgent.Resources) } if p.ScheduledWorkflow != nil { - scheduledWorkflowImageFromConfig := config.GetStringConfigWithDefault(config.ScheduledWorkflowImagePath, config.DefaultImageValue) + ScheduledWorkflowImagePath := config.ScheduledWorkflowImagePath + if pipelinesV2Images { + ScheduledWorkflowImagePath = config.ScheduledWorkflowImagePathV2 + } + scheduledWorkflowImageFromConfig := config.GetStringConfigWithDefault(ScheduledWorkflowImagePath, config.DefaultImageValue) setStringDefault(scheduledWorkflowImageFromConfig, &p.ScheduledWorkflow.Image) setResourcesDefault(config.ScheduledWorkflowResourceRequirements, &p.ScheduledWorkflow.Resources) } diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index 4f1c2efe2..2778cdfc7 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -34,6 +34,25 @@ spec: value: gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 - name: IMAGES_VISUALIZATIONSERVER value: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 + - name: IMAGESV2_APISERVER + value: quay.io/rmartine/apiserver:v2 + - name: IMAGESV2_ARTIFACT + value: quay.io/opendatahub/ds-pipelines-artifact-manager:main + - name: IMAGESV2_PERSISTENTAGENT + value: quay.io/rmartine/persistenceagent-dev:6b8723529 + - name: IMAGESV2_SCHEDULEDWORKFLOW + value: quay.io/rmartine/swf-dev:6b8723529 + - name: IMAGESV2_CACHE + value: registry.access.redhat.com/ubi8/ubi-minimal:8.7 + - name: IMAGESV2_MOVERESULTSIMAGE + value: registry.access.redhat.com/ubi8/ubi-micro:8.7 + - name: IMAGESV2_MLMDENVOY + value: gcr.io/ml-pipeline/metadata-envoy:2.0.0-rc.2 + - name: IMAGESV2_MLMDGRPC + value: gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 + - name: IMAGESV2_MLMDWRITER + value: gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 + repoRef: name: manifests path: config From bb82a91f7aa9a3be82dc717913eeb8b61bc21d82 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 27 Sep 2023 18:29:45 -0400 Subject: [PATCH 30/85] Remove unneeded v2 kustomization params --- config/v2/kustomization.yaml | 13 +++---------- config/v2/params.env | 14 -------------- config/v2/params.yaml | 7 ------- 3 files changed, 3 insertions(+), 31 deletions(-) delete mode 100644 config/v2/params.env delete mode 100644 config/v2/params.yaml diff --git a/config/v2/kustomization.yaml b/config/v2/kustomization.yaml index 227f61350..ffc6327ef 100644 --- a/config/v2/kustomization.yaml +++ b/config/v2/kustomization.yaml @@ -1,13 +1,9 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization -namespace: foo-pipelines +namespace: openshift-pipelines namePrefix: data-science-pipelines-operator- -# namePrefix: data-science-pipelines-operator- -# configMapGenerator: -# - envs: -# - params.env -# name: dspo-parameters +namePrefix: data-science-pipelines-operator- resources: - ./cache @@ -15,7 +11,4 @@ resources: - ./exithandler - ./kfptask - ./pipelineloop -- ./tektoncrds - -# configurations: -# - params.yaml +- ./tektoncrds \ No newline at end of file diff --git a/config/v2/params.env b/config/v2/params.env deleted file mode 100644 index 3f02c0849..000000000 --- a/config/v2/params.env +++ /dev/null @@ -1,14 +0,0 @@ -IMAGES_APISERVER=quay.io/opendatahub/ds-pipelines-api-server:latest -IMAGES_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:latest -IMAGES_PERSISTENTAGENT=quay.io/opendatahub/ds-pipelines-persistenceagent:latest -IMAGES_SCHEDULEDWORKFLOW=quay.io/opendatahub/ds-pipelines-scheduledworkflow:latest -IMAGES_MLMDENVOY=quay.io/opendatahub/ds-pipelines-metadata-envoy:latest -IMAGES_MLMDGRPC=quay.io/opendatahub/ds-pipelines-metadata-grpc:latest -IMAGES_MLMDWRITER=quay.io/opendatahub/ds-pipelines-metadata-writer:latest -IMAGES_DSPO=quay.io/opendatahub/data-science-pipelines-operator:latest -IMAGES_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.8 -IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.8 -IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 -IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 -IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 -IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 diff --git a/config/v2/params.yaml b/config/v2/params.yaml deleted file mode 100644 index 28beccc8c..000000000 --- a/config/v2/params.yaml +++ /dev/null @@ -1,7 +0,0 @@ -varReference: -- path: data - kind: ConfigMap -- path: spec/template/spec/containers/env/value - kind: Deployment -- path: spec/template/spec/containers/image - kind: Deployment From 55949946aaef0f338ab0f3db7e716c0caa768925 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 17 Oct 2023 16:10:03 -0400 Subject: [PATCH 31/85] Add v2undeploy command to Makefile --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index bb57ab03c..c1497854a 100644 --- a/Makefile +++ b/Makefile @@ -167,6 +167,12 @@ v2deploy: manifests kustomize && $(KUSTOMIZE) edit set namespace ${V2INFRA_NS} $(KUSTOMIZE) build config/overlays/make-v2deploy | kubectl apply -f - +.PHONY: v2undeploy +v2undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + cd config/overlays/make-v2deploy \ + && $(KUSTOMIZE) edit set namespace ${V2INFRA_NS} + $(KUSTOMIZE) build config/overlays/make-v2deploy | kubectl delete --ignore-not-found=$(ignore-not-found) -f - + ##@ Build Dependencies ## Location to install dependencies to From 9700595a7cbde07651cf336282b7bf277425edd5 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 17 Oct 2023 16:58:04 -0400 Subject: [PATCH 32/85] Code formatting cleanliness updates - Fix duplicate key in v2/kustomization.yaml - Fix end-of-file newlines --- config/configmaps/files/config.yaml | 2 +- config/v2/cache/serviceaccount.yaml | 1 - config/v2/driver/kustomization.yaml | 2 +- config/v2/exithandler/controller/kustomization.yaml | 2 +- config/v2/exithandler/kustomization.yaml | 2 +- config/v2/exithandler/webhook/kustomization.yaml | 2 +- config/v2/kfptask/kustomization.yaml | 2 +- config/v2/kustomization.yaml | 4 +--- config/v2/pipelineloop/controller/kustomization.yaml | 1 - config/v2/pipelineloop/kustomization.yaml | 2 +- config/v2/pipelineloop/webhook/kustomization.yaml | 1 - kfdef/kfdef.yaml | 2 +- 12 files changed, 9 insertions(+), 14 deletions(-) diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index f246d7dc6..3d19b4cf1 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -21,4 +21,4 @@ ImagesV2: ScheduledWorkflow: $(IMAGESV2_SCHEDULEDWORKFLOW) MlmdEnvoy: $(IMAGESV2_MLMDENVOY) MlmdGRPC: $(IMAGESV2_MLMDGRPC) - MlmdWriter: $(IMAGESV2_MLMDWRITER) \ No newline at end of file + MlmdWriter: $(IMAGESV2_MLMDWRITER) diff --git a/config/v2/cache/serviceaccount.yaml b/config/v2/cache/serviceaccount.yaml index ffa5d061b..708c73c94 100644 --- a/config/v2/cache/serviceaccount.yaml +++ b/config/v2/cache/serviceaccount.yaml @@ -8,4 +8,3 @@ metadata: app.kubernetes.io/part-of: kfp-tekton namespace: datasciencepipelinesapplications-controller name: kubeflow-pipelines-cache-deployer-sa - \ No newline at end of file diff --git a/config/v2/driver/kustomization.yaml b/config/v2/driver/kustomization.yaml index 4968c8918..aa8114fa8 100644 --- a/config/v2/driver/kustomization.yaml +++ b/config/v2/driver/kustomization.yaml @@ -5,4 +5,4 @@ resources: - role.yaml - rolebinding.yaml - service.yaml -- serviceaccount.yaml \ No newline at end of file +- serviceaccount.yaml diff --git a/config/v2/exithandler/controller/kustomization.yaml b/config/v2/exithandler/controller/kustomization.yaml index 865426037..54449bfd8 100644 --- a/config/v2/exithandler/controller/kustomization.yaml +++ b/config/v2/exithandler/controller/kustomization.yaml @@ -7,4 +7,4 @@ resources: - deployment.yaml - role.yaml - rolebinding.yaml -- serviceaccount.yaml \ No newline at end of file +- serviceaccount.yaml diff --git a/config/v2/exithandler/kustomization.yaml b/config/v2/exithandler/kustomization.yaml index 2fac3a648..4c7cb015a 100644 --- a/config/v2/exithandler/kustomization.yaml +++ b/config/v2/exithandler/kustomization.yaml @@ -2,4 +2,4 @@ resources: - crd.yaml - clusterrole.leaderelection.yaml - ./controller -- ./webhook \ No newline at end of file +- ./webhook diff --git a/config/v2/exithandler/webhook/kustomization.yaml b/config/v2/exithandler/webhook/kustomization.yaml index 1b432c759..2a58a8aa8 100644 --- a/config/v2/exithandler/webhook/kustomization.yaml +++ b/config/v2/exithandler/webhook/kustomization.yaml @@ -8,4 +8,4 @@ resources: - secret.yaml - service.yaml - serviceaccount.yaml -- validatingwebhookconfig.yaml \ No newline at end of file +- validatingwebhookconfig.yaml diff --git a/config/v2/kfptask/kustomization.yaml b/config/v2/kfptask/kustomization.yaml index 2fac3a648..4c7cb015a 100644 --- a/config/v2/kfptask/kustomization.yaml +++ b/config/v2/kfptask/kustomization.yaml @@ -2,4 +2,4 @@ resources: - crd.yaml - clusterrole.leaderelection.yaml - ./controller -- ./webhook \ No newline at end of file +- ./webhook diff --git a/config/v2/kustomization.yaml b/config/v2/kustomization.yaml index ffc6327ef..8b3c8c0a7 100644 --- a/config/v2/kustomization.yaml +++ b/config/v2/kustomization.yaml @@ -3,12 +3,10 @@ kind: Kustomization namespace: openshift-pipelines namePrefix: data-science-pipelines-operator- -namePrefix: data-science-pipelines-operator- - resources: - ./cache - ./driver - ./exithandler - ./kfptask - ./pipelineloop -- ./tektoncrds \ No newline at end of file +- ./tektoncrds diff --git a/config/v2/pipelineloop/controller/kustomization.yaml b/config/v2/pipelineloop/controller/kustomization.yaml index 0824469d5..54449bfd8 100644 --- a/config/v2/pipelineloop/controller/kustomization.yaml +++ b/config/v2/pipelineloop/controller/kustomization.yaml @@ -8,4 +8,3 @@ resources: - role.yaml - rolebinding.yaml - serviceaccount.yaml - diff --git a/config/v2/pipelineloop/kustomization.yaml b/config/v2/pipelineloop/kustomization.yaml index 2fac3a648..4c7cb015a 100644 --- a/config/v2/pipelineloop/kustomization.yaml +++ b/config/v2/pipelineloop/kustomization.yaml @@ -2,4 +2,4 @@ resources: - crd.yaml - clusterrole.leaderelection.yaml - ./controller -- ./webhook \ No newline at end of file +- ./webhook diff --git a/config/v2/pipelineloop/webhook/kustomization.yaml b/config/v2/pipelineloop/webhook/kustomization.yaml index 519a9540f..df691ded5 100644 --- a/config/v2/pipelineloop/webhook/kustomization.yaml +++ b/config/v2/pipelineloop/webhook/kustomization.yaml @@ -9,4 +9,3 @@ resources: - service.yaml - serviceaccount.yaml - validatingwebhookconfig.yaml - diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index 2778cdfc7..2e8e33f44 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -52,7 +52,7 @@ spec: value: gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 - name: IMAGESV2_MLMDWRITER value: gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 - + repoRef: name: manifests path: config From 4e0bb8a17085853d6c9439e20d608dc1d91f102c Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 18 Oct 2023 23:56:38 -0400 Subject: [PATCH 33/85] Fix Leader Election Cluster Role Names --- config/v2/exithandler/clusterrole.leaderelection.yaml | 2 +- config/v2/kfptask/clusterrole.leaderelection.yaml | 2 +- config/v2/pipelineloop/clusterrole.leaderelection.yaml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/config/v2/exithandler/clusterrole.leaderelection.yaml b/config/v2/exithandler/clusterrole.leaderelection.yaml index 2d68dd2df..92d381dcd 100644 --- a/config/v2/exithandler/clusterrole.leaderelection.yaml +++ b/config/v2/exithandler/clusterrole.leaderelection.yaml @@ -4,7 +4,7 @@ metadata: labels: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-leader-election + name: kfp-exithandler-leader-election-clusterrole rules: - apiGroups: - coordination.k8s.io diff --git a/config/v2/kfptask/clusterrole.leaderelection.yaml b/config/v2/kfptask/clusterrole.leaderelection.yaml index 664e9c627..0ecf7d8dc 100644 --- a/config/v2/kfptask/clusterrole.leaderelection.yaml +++ b/config/v2/kfptask/clusterrole.leaderelection.yaml @@ -4,7 +4,7 @@ metadata: labels: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-leader-election + name: kfptask-leader-election-clusterrole rules: - apiGroups: - coordination.k8s.io diff --git a/config/v2/pipelineloop/clusterrole.leaderelection.yaml b/config/v2/pipelineloop/clusterrole.leaderelection.yaml index f5ba9f9e9..233ffa982 100644 --- a/config/v2/pipelineloop/clusterrole.leaderelection.yaml +++ b/config/v2/pipelineloop/clusterrole.leaderelection.yaml @@ -4,7 +4,7 @@ metadata: labels: app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-leader-election + name: tekton-pipelineloop-leader-election-clusterrole rules: - apiGroups: - coordination.k8s.io From 2fec33481ad87da7de6aee6a5ba56125e31d75ec Mon Sep 17 00:00:00 2001 From: ddalvi Date: Wed, 18 Oct 2023 19:48:43 -0400 Subject: [PATCH 34/85] Updating v2 manifests to utilize DSPO namePrefix --- config/v2/cache/clusterrole.yaml | 4 ++-- config/v2/cache/clusterrolebinding.yaml | 6 ++--- config/v2/cache/serviceaccount.yaml | 2 +- config/v2/driver/clusterrole.yaml | 2 +- config/v2/driver/clusterrolebinding.yaml | 6 ++--- config/v2/driver/deployment.yaml | 12 +++++----- config/v2/driver/role.yaml | 2 +- config/v2/driver/rolebinding.yaml | 6 ++--- config/v2/driver/service.yaml | 6 ++--- config/v2/driver/serviceaccount.yaml | 2 +- .../clusterrole.leaderelection.yaml | 2 +- .../controller/clusterrole.clusteraccess.yaml | 2 +- .../controller/clusterrole.tenantaccess.yaml | 2 +- .../clusterrolebinding.clusteraccess.yaml | 6 ++--- .../clusterrolebinding.leaderelection.yaml | 6 ++--- .../clusterrolebinding.tenantaccess.yaml | 6 ++--- .../v2/exithandler/controller/deployment.yaml | 6 ++--- config/v2/exithandler/controller/role.yaml | 2 +- .../exithandler/controller/rolebinding.yaml | 6 ++--- .../controller/serviceaccount.yaml | 2 +- .../webhook/clusterrole.clusteraccess.yaml | 2 +- .../clusterrolebinding.clusteraccess.yaml | 6 ++--- config/v2/exithandler/webhook/deployment.yaml | 4 ++-- .../webhook/mutatingwebhookconfig.yaml | 2 +- config/v2/exithandler/webhook/role.yaml | 2 +- .../v2/exithandler/webhook/rolebinding.yaml | 6 ++--- config/v2/exithandler/webhook/secret.yaml | 2 +- config/v2/exithandler/webhook/service.yaml | 2 +- .../exithandler/webhook/serviceaccount.yaml | 2 +- .../webhook/validatingwebhookconfig.yaml | 2 +- .../kfptask/clusterrole.leaderelection.yaml | 2 +- .../controller/clusterrole.clusteraccess.yaml | 2 +- .../controller/clusterrole.tenantaccess.yaml | 2 +- .../clusterrolebinding.clusteraccess.yaml | 6 ++--- .../clusterrolebinding.leaderelection.yaml | 6 ++--- .../clusterrolebinding.tenantaccess.yaml | 6 ++--- config/v2/kfptask/controller/deployment.yaml | 6 ++--- config/v2/kfptask/controller/role.yaml | 2 +- config/v2/kfptask/controller/rolebinding.yaml | 6 ++--- .../v2/kfptask/controller/serviceaccount.yaml | 2 +- .../webhook/clusterrole.clusteraccess.yaml | 2 +- .../clusterrolebinding.clusteraccess.yaml | 6 ++--- .../clusterrolebinding.leaderelection.yaml | 6 ++--- config/v2/kfptask/webhook/deployment.yaml | 4 ++-- .../webhook/mutatingwebhookconfig.yaml | 2 +- config/v2/kfptask/webhook/role.yaml | 2 +- config/v2/kfptask/webhook/rolebinding.yaml | 6 ++--- config/v2/kfptask/webhook/secret.yaml | 2 +- config/v2/kfptask/webhook/service.yaml | 2 +- config/v2/kfptask/webhook/serviceaccount.yaml | 2 +- .../webhook/validatingwebhookconfig.yaml | 2 +- .../clusterrole.leaderelection.yaml | 2 +- .../controller/clusterrole.clusteraccess.yaml | 2 +- .../controller/clusterrole.tenantaccess.yaml | 2 +- .../clusterrolebinding.clusteraccess.yaml | 6 ++--- .../clusterrolebinding.leaderelection.yaml | 6 ++--- .../clusterrolebinding.tenantaccess.yaml | 6 ++--- .../pipelineloop/controller/deployment.yaml | 10 ++++---- config/v2/pipelineloop/controller/role.yaml | 2 +- .../pipelineloop/controller/rolebinding.yaml | 6 ++--- .../controller/serviceaccount.yaml | 2 +- .../webhook/clusterrole.clusteraccess.yaml | 2 +- .../clusterrolebinding.clusteraccess.yaml | 6 ++--- .../clusterrolebinding.leaderelection.yaml | 6 ++--- .../v2/pipelineloop/webhook/deployment.yaml | 10 ++++---- .../webhook/mutatingwebhookconfig.yaml | 2 +- config/v2/pipelineloop/webhook/role.yaml | 4 ++-- .../v2/pipelineloop/webhook/rolebinding.yaml | 6 ++--- config/v2/pipelineloop/webhook/service.yaml | 2 +- .../pipelineloop/webhook/serviceaccount.yaml | 2 +- .../webhook/validatingwebhookconfig.yaml | 2 +- config/v2/tektoncrds/scc.anyuid.yaml | 24 +++++++++---------- config/v2/tektoncrds/scc.privileged.yaml | 24 +++++++++---------- 73 files changed, 164 insertions(+), 164 deletions(-) diff --git a/config/v2/cache/clusterrole.yaml b/config/v2/cache/clusterrole.yaml index 5178f4f4b..73f9ab43b 100644 --- a/config/v2/cache/clusterrole.yaml +++ b/config/v2/cache/clusterrole.yaml @@ -2,8 +2,8 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - app: kubeflow-pipelines-cache-deployer-clusterrole - name: kubeflow-pipelines-cache-deployer-clusterrole + app: cache-deployer-clusterrole + name: cache-deployer-clusterrole rules: - apiGroups: - certificates.k8s.io diff --git a/config/v2/cache/clusterrolebinding.yaml b/config/v2/cache/clusterrolebinding.yaml index e9cf41f45..8c1e030fd 100644 --- a/config/v2/cache/clusterrolebinding.yaml +++ b/config/v2/cache/clusterrolebinding.yaml @@ -1,12 +1,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: - name: kubeflow-pipelines-cache-deployer-clusterrolebinding + name: cache-deployer-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kubeflow-pipelines-cache-deployer-clusterrole + name: cache-deployer-clusterrole subjects: - kind: ServiceAccount - name: kubeflow-pipelines-cache-deployer-sa + name: cache-deployer-sa namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/cache/serviceaccount.yaml b/config/v2/cache/serviceaccount.yaml index 708c73c94..f1702bc71 100644 --- a/config/v2/cache/serviceaccount.yaml +++ b/config/v2/cache/serviceaccount.yaml @@ -7,4 +7,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton namespace: datasciencepipelinesapplications-controller - name: kubeflow-pipelines-cache-deployer-sa + name: cache-deployer-sa diff --git a/config/v2/driver/clusterrole.yaml b/config/v2/driver/clusterrole.yaml index 45a51fbf3..3468389c2 100644 --- a/config/v2/driver/clusterrole.yaml +++ b/config/v2/driver/clusterrole.yaml @@ -6,7 +6,7 @@ metadata: app.kubernetes.io/component: kfp-driver app.kubernetes.io/instance: default app.kubernetes.io/part-of: kubeflow-pipeline - name: kfp-driver-cluster-access-clusterrole + name: driver-cluster-access-clusterrole rules: - apiGroups: - tekton.dev diff --git a/config/v2/driver/clusterrolebinding.yaml b/config/v2/driver/clusterrolebinding.yaml index 05db8c567..c2bf01c40 100644 --- a/config/v2/driver/clusterrolebinding.yaml +++ b/config/v2/driver/clusterrolebinding.yaml @@ -6,12 +6,12 @@ metadata: app.kubernetes.io/component: kfp-driver app.kubernetes.io/instance: default app.kubernetes.io/part-of: kubeflow-pipeline - name: kfp-driver-cluster-access-clusterrolebinding + name: driver-cluster-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfp-driver-cluster-access-clusterrole + name: driver-cluster-access-clusterrole subjects: - kind: ServiceAccount - name: kfp-driver + name: driver namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/driver/deployment.yaml b/config/v2/driver/deployment.yaml index ce150c265..025ce936b 100644 --- a/config/v2/driver/deployment.yaml +++ b/config/v2/driver/deployment.yaml @@ -4,17 +4,17 @@ metadata: labels: app.kubernetes.io/component: ckfp-driver app.kubernetes.io/instance: default - app.kubernetes.io/name: kfp-driver + app.kubernetes.io/name: driver app.kubernetes.io/part-of: kubeflow-pipeline app.kubernetes.io/version: devel - name: kfp-driver + name: driver spec: replicas: 1 selector: matchLabels: app.kubernetes.io/component: kfp-driver app.kubernetes.io/instance: default - app.kubernetes.io/name: kfp-driver + app.kubernetes.io/name: driver app.kubernetes.io/part-of: kubeflow-pipeline template: metadata: @@ -24,7 +24,7 @@ spec: app: kfp-driver app.kubernetes.io/component: kfp-driver app.kubernetes.io/instance: default - app.kubernetes.io/name: kfp-driver + app.kubernetes.io/name: driver app.kubernetes.io/part-of: kubeflow-pipeline app.kubernetes.io/version: devel spec: @@ -44,7 +44,7 @@ spec: value: tekton.dev/pipeline image: quay.io/internaldatahub/tekton-driver:2.0.0 imagePullPolicy: Always - name: kfp-driver + name: driver securityContext: allowPrivilegeEscalation: false capabilities: @@ -54,4 +54,4 @@ spec: runAsNonRoot: true seccompProfile: type: RuntimeDefault - serviceAccountName: kfp-driver + serviceAccountName: driver diff --git a/config/v2/driver/role.yaml b/config/v2/driver/role.yaml index b4c9f9130..dc1be8689 100644 --- a/config/v2/driver/role.yaml +++ b/config/v2/driver/role.yaml @@ -7,7 +7,7 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kubeflow-pipeline namespace: datasciencepipelinesapplications-controller - name: kfp-driver-role + name: driver-role rules: - apiGroups: - "" diff --git a/config/v2/driver/rolebinding.yaml b/config/v2/driver/rolebinding.yaml index 9819d3b97..d882b3713 100644 --- a/config/v2/driver/rolebinding.yaml +++ b/config/v2/driver/rolebinding.yaml @@ -6,12 +6,12 @@ metadata: app.kubernetes.io/component: kfp-driver app.kubernetes.io/instance: default app.kubernetes.io/part-of: kubeflow-pipeline - name: kfp-driver-rolebinding + name: driver-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: kfp-driver-role + name: driver-role subjects: - kind: ServiceAccount - name: kfp-driver + name: driver namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/driver/service.yaml b/config/v2/driver/service.yaml index 2d4e2bbd5..f57b423c3 100644 --- a/config/v2/driver/service.yaml +++ b/config/v2/driver/service.yaml @@ -5,12 +5,12 @@ metadata: app: kfp-driver app.kubernetes.io/component: kfp-driver app.kubernetes.io/instance: default - app.kubernetes.io/name: kfp-driver + app.kubernetes.io/name: driver app.kubernetes.io/part-of: kubeflow-pipeline app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: kfp-driver + name: driver spec: ports: - name: http-metrics @@ -20,5 +20,5 @@ spec: selector: app.kubernetes.io/component: kfp-driver app.kubernetes.io/instance: default - app.kubernetes.io/name: kfp-driver + app.kubernetes.io/name: driver app.kubernetes.io/part-of: kubeflow-pipeline diff --git a/config/v2/driver/serviceaccount.yaml b/config/v2/driver/serviceaccount.yaml index 76988053a..87d53f272 100644 --- a/config/v2/driver/serviceaccount.yaml +++ b/config/v2/driver/serviceaccount.yaml @@ -7,4 +7,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kubeflow-pipeline namespace: datasciencepipelinesapplications-controller - name: kfp-driver + name: driver diff --git a/config/v2/exithandler/clusterrole.leaderelection.yaml b/config/v2/exithandler/clusterrole.leaderelection.yaml index 92d381dcd..f3a2752f9 100644 --- a/config/v2/exithandler/clusterrole.leaderelection.yaml +++ b/config/v2/exithandler/clusterrole.leaderelection.yaml @@ -4,7 +4,7 @@ metadata: labels: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-leader-election-clusterrole + name: leader-election-clusterrole rules: - apiGroups: - coordination.k8s.io diff --git a/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml index a681c899a..e6b5e8a9e 100644 --- a/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml +++ b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-controller-cluster-access-clusterrole + name: exithandler-controller-cluster-access-clusterrole rules: - apiGroups: - tekton.dev diff --git a/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml b/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml index 3e5643fbe..d666f375b 100644 --- a/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml +++ b/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-controller-tenant-access-clusterrole + name: exithandler-controller-tenant-access-clusterrole rules: - apiGroups: - "" diff --git a/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml index 1f3b550ff..047393dcc 100644 --- a/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml +++ b/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-controller-cluster-access-clusterrolebinding + name: exithandler-controller-cluster-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfp-exithandler-controller-cluster-access-clusterrole + name: exithandler-controller-cluster-access-clusterrole subjects: - kind: ServiceAccount - name: kfp-exithandler-controller + name: exithandler-controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml b/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml index efeac890b..98320d612 100644 --- a/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml +++ b/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-controller-leaderelection-clusterrolebinding + name: exithandler-controller-leaderelection-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfp-exithandler-leader-election-clusterrole + name: exithandler-leader-election-clusterrole subjects: - kind: ServiceAccount - name: kfp-exithandler-controller + name: exithandler-controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml index 05af4d4bd..7682ba12b 100644 --- a/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml +++ b/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-controller-tenant-access-clusterrolebinding + name: exithandler-controller-tenant-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfp-exithandler-controller-tenant-access-clusterrole + name: exithandler-controller-tenant-access-clusterrole subjects: - kind: ServiceAccount - name: kfp-exithandler-controller + name: exithandler-controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/deployment.yaml b/config/v2/exithandler/controller/deployment.yaml index af843e38d..843c478fa 100644 --- a/config/v2/exithandler/controller/deployment.yaml +++ b/config/v2/exithandler/controller/deployment.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: kfp-exithandler-controller + name: exithandler-controller spec: replicas: 1 selector: @@ -47,7 +47,7 @@ spec: - name: METRICS_DOMAIN value: tekton.dev/pipeline image: quay.io/internaldatahub/tekton-exithandler-controller:2.0.0 - name: kfp-exithandler-controller + name: exithandler-controller securityContext: allowPrivilegeEscalation: false capabilities: @@ -57,4 +57,4 @@ spec: runAsNonRoot: true seccompProfile: type: RuntimeDefault - serviceAccountName: kfp-exithandler-controller + serviceAccountName: exithandler-controller diff --git a/config/v2/exithandler/controller/role.yaml b/config/v2/exithandler/controller/role.yaml index 67d9cc2cd..adb1ebed5 100644 --- a/config/v2/exithandler/controller/role.yaml +++ b/config/v2/exithandler/controller/role.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-controller-role + name: exithandler-controller-role rules: - apiGroups: - "" diff --git a/config/v2/exithandler/controller/rolebinding.yaml b/config/v2/exithandler/controller/rolebinding.yaml index 47958e2ce..af8c564db 100644 --- a/config/v2/exithandler/controller/rolebinding.yaml +++ b/config/v2/exithandler/controller/rolebinding.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-controller-rolebinding + name: exithandler-controller-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: kfp-exithandler-controller-role + name: exithandler-controller-role subjects: - kind: ServiceAccount - name: kfp-exithandler-controller + name: exithandler-controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/serviceaccount.yaml b/config/v2/exithandler/controller/serviceaccount.yaml index bd82939e1..69823ccd6 100644 --- a/config/v2/exithandler/controller/serviceaccount.yaml +++ b/config/v2/exithandler/controller/serviceaccount.yaml @@ -7,4 +7,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton namespace: datasciencepipelinesapplications-controller - name: kfp-exithandler-controller + name: exithandler-controller diff --git a/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml b/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml index 0e810e3e6..a283f23be 100644 --- a/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml +++ b/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-webhook-cluster-access-clusterrole + name: exithandler-webhook-cluster-access-clusterrole rules: - apiGroups: - apiextensions.k8s.io diff --git a/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml index ae1d4ca15..2df37eade 100644 --- a/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml +++ b/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-webhook-cluster-access-clusterrolebinding + name: exithandler-webhook-cluster-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfp-exithandler-webhook-cluster-access-clusterrole + name: exithandler-webhook-cluster-access-clusterrole subjects: - kind: ServiceAccount - name: kfp-exithandler-webhook + name: exithandler-webhook namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/webhook/deployment.yaml b/config/v2/exithandler/webhook/deployment.yaml index 5ed24e94b..35ddfad35 100644 --- a/config/v2/exithandler/webhook/deployment.yaml +++ b/config/v2/exithandler/webhook/deployment.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: kfp-exithandler-webhook + name: exithandler-webhook spec: replicas: 1 selector: @@ -68,4 +68,4 @@ spec: runAsNonRoot: true seccompProfile: type: RuntimeDefault - serviceAccountName: kfp-exithandler-webhook + serviceAccountName: exithandler-webhook diff --git a/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml b/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml index 7d8679d1a..655ab1c78 100644 --- a/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml +++ b/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml @@ -12,7 +12,7 @@ webhooks: - v1beta1 clientConfig: service: - name: kfp-exithandler-webhook + name: exithandler-webhook namespace: datasciencepipelinesapplications-controller failurePolicy: Fail name: webhook.exithandler.custom.tekton.dev diff --git a/config/v2/exithandler/webhook/role.yaml b/config/v2/exithandler/webhook/role.yaml index f7ef29288..d8fbb1139 100644 --- a/config/v2/exithandler/webhook/role.yaml +++ b/config/v2/exithandler/webhook/role.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-webhook-role + name: exithandler-webhook-role rules: - apiGroups: - "" diff --git a/config/v2/exithandler/webhook/rolebinding.yaml b/config/v2/exithandler/webhook/rolebinding.yaml index 757701663..e7baa905d 100644 --- a/config/v2/exithandler/webhook/rolebinding.yaml +++ b/config/v2/exithandler/webhook/rolebinding.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfp-exithandler-webhook-rolebinding + name: exithandler-webhook-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: kfp-exithandler-webhook-role + name: exithandler-webhook-role subjects: - kind: ServiceAccount - name: kfp-exithandler-webhook + name: exithandler-webhook namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/webhook/secret.yaml b/config/v2/exithandler/webhook/secret.yaml index ae60d20fa..b9546c694 100644 --- a/config/v2/exithandler/webhook/secret.yaml +++ b/config/v2/exithandler/webhook/secret.yaml @@ -6,4 +6,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton pipeline.tekton.dev/release: devel - name: kfp-exithandler-webhook-certs + name: exithandler-webhook-certs diff --git a/config/v2/exithandler/webhook/service.yaml b/config/v2/exithandler/webhook/service.yaml index 437bcefa7..ad2e06a1d 100644 --- a/config/v2/exithandler/webhook/service.yaml +++ b/config/v2/exithandler/webhook/service.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: kfp-exithandler-webhook + name: exithandler-webhook namespace: datasciencepipelinesapplications-controller spec: ports: diff --git a/config/v2/exithandler/webhook/serviceaccount.yaml b/config/v2/exithandler/webhook/serviceaccount.yaml index a5048a80a..e4ae47bf0 100644 --- a/config/v2/exithandler/webhook/serviceaccount.yaml +++ b/config/v2/exithandler/webhook/serviceaccount.yaml @@ -7,4 +7,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton namespace: datasciencepipelinesapplications-controller - name: kfp-exithandler-webhook + name: exithandler-webhook diff --git a/config/v2/exithandler/webhook/validatingwebhookconfig.yaml b/config/v2/exithandler/webhook/validatingwebhookconfig.yaml index c34a0b903..f18912178 100644 --- a/config/v2/exithandler/webhook/validatingwebhookconfig.yaml +++ b/config/v2/exithandler/webhook/validatingwebhookconfig.yaml @@ -12,7 +12,7 @@ webhooks: - v1beta1 clientConfig: service: - name: kfp-exithandler-webhook + name: exithandler-webhook namespace: datasciencepipelinesapplications-controller failurePolicy: Fail name: validation.webhook.exithandler.custom.tekton.dev diff --git a/config/v2/kfptask/clusterrole.leaderelection.yaml b/config/v2/kfptask/clusterrole.leaderelection.yaml index 0ecf7d8dc..f3a2752f9 100644 --- a/config/v2/kfptask/clusterrole.leaderelection.yaml +++ b/config/v2/kfptask/clusterrole.leaderelection.yaml @@ -4,7 +4,7 @@ metadata: labels: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-leader-election-clusterrole + name: leader-election-clusterrole rules: - apiGroups: - coordination.k8s.io diff --git a/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml b/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml index 0580fafa5..d95fd6141 100644 --- a/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml +++ b/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-controller-cluster-access-clusterrole + name: controller-cluster-access-clusterrole rules: - apiGroups: - tekton.dev diff --git a/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml b/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml index 11576abd2..789553259 100644 --- a/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml +++ b/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-controller-tenant-access-clusterrole + name: controller-tenant-access-clusterrole rules: - apiGroups: - "" diff --git a/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml index 88108183b..660c52cc5 100644 --- a/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml +++ b/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-controller-cluster-access-clusterrolebinding + name: controller-cluster-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfptask-controller-cluster-access-clusterrole + name: controller-cluster-access-clusterrole subjects: - kind: ServiceAccount - name: kfptask-controller + name: controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml b/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml index fa63b846e..d6449e36d 100644 --- a/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml +++ b/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-controller-leaderelection-clusterrolebinding + name: controller-leaderelection-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfptask-leader-election-clusterrole + name: leader-election-clusterrole subjects: - kind: ServiceAccount - name: kfptask-controller + name: controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml index 4123d161e..4827d6ebf 100644 --- a/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml +++ b/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-controller-tenant-access-clusterrolebinding + name: controller-tenant-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfptask-controller-tenant-access-clusterrole + name: controller-tenant-access-clusterrole subjects: - kind: ServiceAccount - name: kfptask-controller + name: controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/deployment.yaml b/config/v2/kfptask/controller/deployment.yaml index e3c3eee33..57576974b 100644 --- a/config/v2/kfptask/controller/deployment.yaml +++ b/config/v2/kfptask/controller/deployment.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: kfptask-controller + name: controller spec: replicas: 1 selector: @@ -47,7 +47,7 @@ spec: - name: METRICS_DOMAIN value: tekton.dev/pipeline image: quay.io/internaldatahub/tekton-kfptask-controller:2.0.0 - name: kfptask-controller + name: controller securityContext: allowPrivilegeEscalation: false capabilities: @@ -57,4 +57,4 @@ spec: runAsNonRoot: true seccompProfile: type: RuntimeDefault - serviceAccountName: kfptask-controller + serviceAccountName: controller diff --git a/config/v2/kfptask/controller/role.yaml b/config/v2/kfptask/controller/role.yaml index e7d6964e3..275f63232 100644 --- a/config/v2/kfptask/controller/role.yaml +++ b/config/v2/kfptask/controller/role.yaml @@ -6,7 +6,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-controller-role + name: controller-role rules: - apiGroups: - "" diff --git a/config/v2/kfptask/controller/rolebinding.yaml b/config/v2/kfptask/controller/rolebinding.yaml index 1fc2d1047..569eb7f83 100644 --- a/config/v2/kfptask/controller/rolebinding.yaml +++ b/config/v2/kfptask/controller/rolebinding.yaml @@ -6,12 +6,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-controller-rolebinding + name: controller-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: kfptask-controller-role + name: controller-role subjects: - kind: ServiceAccount - name: kfptask-controller + name: controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/serviceaccount.yaml b/config/v2/kfptask/controller/serviceaccount.yaml index 642dbef0b..99a950cb2 100644 --- a/config/v2/kfptask/controller/serviceaccount.yaml +++ b/config/v2/kfptask/controller/serviceaccount.yaml @@ -7,4 +7,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton namespace: datasciencepipelinesapplications-controller - name: kfptask-controller + name: controller diff --git a/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml index c4e2177b6..e57603f2c 100644 --- a/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml +++ b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-webhook-cluster-access-clusterrole + name: webhook-cluster-access-clusterrole rules: - apiGroups: - apiextensions.k8s.io diff --git a/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml index bfd617465..58b4bb31d 100644 --- a/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml +++ b/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-webhook-cluster-access-clusterrolebinding + name: webhook-cluster-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfptask-webhook-cluster-access-clusterrole + name: webhook-cluster-access-clusterrole subjects: - kind: ServiceAccount - name: kfptask-webhook + name: webhook namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml b/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml index 56bfcd42f..acd0b5c3e 100644 --- a/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml +++ b/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-webhook-leaderelection-clusterrolebinding + name: webhook-leaderelection-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kfptask-leader-election-clusterrole + name: leader-election-clusterrole subjects: - kind: ServiceAccount - name: kfptask-webhook + name: webhook namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/deployment.yaml b/config/v2/kfptask/webhook/deployment.yaml index 48bd44617..2f15a509a 100644 --- a/config/v2/kfptask/webhook/deployment.yaml +++ b/config/v2/kfptask/webhook/deployment.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: kfptask-webhook + name: webhook spec: replicas: 1 selector: @@ -68,4 +68,4 @@ spec: runAsNonRoot: true seccompProfile: type: RuntimeDefault - serviceAccountName: kfptask-webhook + serviceAccountName: webhook diff --git a/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml b/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml index 8b494fcba..b1ff8fec0 100644 --- a/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml +++ b/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml @@ -12,7 +12,7 @@ webhooks: - v1beta1 clientConfig: service: - name: kfptask-webhook + name: webhook namespace: datasciencepipelinesapplications-controller failurePolicy: Fail name: webhook.kfptask.custom.tekton.dev diff --git a/config/v2/kfptask/webhook/role.yaml b/config/v2/kfptask/webhook/role.yaml index c81cfc18e..d179a5cfb 100644 --- a/config/v2/kfptask/webhook/role.yaml +++ b/config/v2/kfptask/webhook/role.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-webhook-role + name: webhook-role rules: - apiGroups: - "" diff --git a/config/v2/kfptask/webhook/rolebinding.yaml b/config/v2/kfptask/webhook/rolebinding.yaml index e3d798921..f62b1cdfa 100644 --- a/config/v2/kfptask/webhook/rolebinding.yaml +++ b/config/v2/kfptask/webhook/rolebinding.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: kfptask-webhook-rolebinding + name: webhook-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: kfptask-webhook-role + name: webhook-role subjects: - kind: ServiceAccount - name: kfptask-webhook + name: webhook namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/secret.yaml b/config/v2/kfptask/webhook/secret.yaml index 6387033ce..8e4b6bb5c 100644 --- a/config/v2/kfptask/webhook/secret.yaml +++ b/config/v2/kfptask/webhook/secret.yaml @@ -6,4 +6,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton pipeline.tekton.dev/release: devel - name: kfptask-webhook-certs + name: webhook-certs diff --git a/config/v2/kfptask/webhook/service.yaml b/config/v2/kfptask/webhook/service.yaml index 7f1b02e34..40434576f 100644 --- a/config/v2/kfptask/webhook/service.yaml +++ b/config/v2/kfptask/webhook/service.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: kfptask-webhook + name: webhook namespace: datasciencepipelinesapplications-controller spec: ports: diff --git a/config/v2/kfptask/webhook/serviceaccount.yaml b/config/v2/kfptask/webhook/serviceaccount.yaml index 27ffcfd45..bf030f189 100644 --- a/config/v2/kfptask/webhook/serviceaccount.yaml +++ b/config/v2/kfptask/webhook/serviceaccount.yaml @@ -7,4 +7,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton namespace: datasciencepipelinesapplications-controller - name: kfptask-webhook + name: webhook diff --git a/config/v2/kfptask/webhook/validatingwebhookconfig.yaml b/config/v2/kfptask/webhook/validatingwebhookconfig.yaml index a50ee1e25..3f72469a2 100644 --- a/config/v2/kfptask/webhook/validatingwebhookconfig.yaml +++ b/config/v2/kfptask/webhook/validatingwebhookconfig.yaml @@ -12,7 +12,7 @@ webhooks: - v1beta1 clientConfig: service: - name: kfptask-webhook + name: webhook namespace: datasciencepipelinesapplications-controller failurePolicy: Fail name: validation.webhook.kfptask.custom.tekton.dev diff --git a/config/v2/pipelineloop/clusterrole.leaderelection.yaml b/config/v2/pipelineloop/clusterrole.leaderelection.yaml index 233ffa982..a57b544d5 100644 --- a/config/v2/pipelineloop/clusterrole.leaderelection.yaml +++ b/config/v2/pipelineloop/clusterrole.leaderelection.yaml @@ -4,7 +4,7 @@ metadata: labels: app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-leader-election-clusterrole + name: pipelineloop-leader-election-clusterrole rules: - apiGroups: - coordination.k8s.io diff --git a/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml b/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml index d53b3bd25..ea5988dd0 100644 --- a/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml +++ b/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-controller-cluster-access-clusterrole + name: pipelineloop-controller-cluster-access-clusterrole rules: - apiGroups: - tekton.dev diff --git a/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml b/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml index f4c3d6c61..855eab049 100644 --- a/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml +++ b/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-controller-tenant-access-clusterrole + name: pipelineloop-controller-tenant-access-clusterrole rules: - apiGroups: - "" diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml index b8ff40533..adf99c4bf 100644 --- a/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml +++ b/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-controller-cluster-access-clusterrolebinding + name: pipelineloop-controller-cluster-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: tekton-pipelineloop-controller-cluster-access-clusterrole + name: pipelineloop-controller-cluster-access-clusterrole subjects: - kind: ServiceAccount - name: tekton-pipelineloop-controller + name: pipelineloop-controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml index b4dced872..2b1189a48 100644 --- a/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml +++ b/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-controller-leaderelection-clusterrolebinding + name: pipelineloop-controller-leaderelection-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: tekton-pipelineloop-leader-election-clusterrole + name: pipelineloop-leader-election-clusterrole subjects: - kind: ServiceAccount - name: tekton-pipelineloop-controller + name: pipelineloop-controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml index 0aa6b29b6..1b395017e 100644 --- a/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml +++ b/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-controller-tenant-access-clusterrolebinding + name: pipelineloop-controller-tenant-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: tekton-pipelineloop-controller-tenant-access-clusterrole + name: pipelineloop-controller-tenant-access-clusterrole subjects: - kind: ServiceAccount - name: tekton-pipelineloop-controller + name: pipelineloop-controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/deployment.yaml b/config/v2/pipelineloop/controller/deployment.yaml index 933bbe0b6..77c02b0ae 100644 --- a/config/v2/pipelineloop/controller/deployment.yaml +++ b/config/v2/pipelineloop/controller/deployment.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: tekton-pipelineloop-controller + name: pipelineloop-controller spec: replicas: 1 selector: @@ -23,7 +23,7 @@ spec: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "false" labels: - app: tekton-pipelineloop-controller + app: tektonpipelineloop-controller app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/name: controller @@ -46,8 +46,8 @@ spec: value: config-observability - name: METRICS_DOMAIN value: tekton.dev/pipeline - image: quay.io/internaldatahub/tekton-pipelineloop-controller:2.0.0 - name: tekton-pipelineloop-controller + image: quay.io/internaldatahub/tektonpipelineloop-controller:2.0.0 + name: pipelineloop-controller securityContext: allowPrivilegeEscalation: false capabilities: @@ -57,4 +57,4 @@ spec: runAsNonRoot: true seccompProfile: type: RuntimeDefault - serviceAccountName: tekton-pipelineloop-controller + serviceAccountName: pipelineloop-controller diff --git a/config/v2/pipelineloop/controller/role.yaml b/config/v2/pipelineloop/controller/role.yaml index cb3c3e9a5..6b7818e29 100644 --- a/config/v2/pipelineloop/controller/role.yaml +++ b/config/v2/pipelineloop/controller/role.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-controller-role + name: pipelineloop-controller-role rules: - apiGroups: - "" diff --git a/config/v2/pipelineloop/controller/rolebinding.yaml b/config/v2/pipelineloop/controller/rolebinding.yaml index 4e26f6d24..24feb66bf 100644 --- a/config/v2/pipelineloop/controller/rolebinding.yaml +++ b/config/v2/pipelineloop/controller/rolebinding.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: controller app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-controller-rolebinding + name: pipelineloop-controller-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: tekton-pipelineloop-controller-role + name: pipelineloop-controller-role subjects: - kind: ServiceAccount - name: tekton-pipelineloop-controller + name: pipelineloop-controller namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/serviceaccount.yaml b/config/v2/pipelineloop/controller/serviceaccount.yaml index 5b1bafc59..7e09fdc8d 100644 --- a/config/v2/pipelineloop/controller/serviceaccount.yaml +++ b/config/v2/pipelineloop/controller/serviceaccount.yaml @@ -7,4 +7,4 @@ metadata: app.kubernetes.io/part-of: tekton-pipeline-loops app.kubernetes.io/name: data-science-pipelines-operator namespace: datasciencepipelinesapplications-controller - name: tekton-pipelineloop-controller + name: pipelineloop-controller diff --git a/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml b/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml index f3b5f165d..336322695 100644 --- a/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml +++ b/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-webhook-cluster-access-clusterrole + name: pipelineloop-webhook-cluster-access-clusterrole rules: - apiGroups: - apiextensions.k8s.io diff --git a/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml index 9bab6fc0f..63587376d 100644 --- a/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml +++ b/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-webhook-cluster-access-clusterrolebinding + name: pipelineloop-webhook-cluster-access-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: tekton-pipelineloop-webhook-cluster-access-clusterrole + name: pipelineloop-webhook-cluster-access-clusterrole subjects: - kind: ServiceAccount - name: tekton-pipelineloop-webhook + name: pipelineloop-webhook namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml b/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml index 72ccc7792..b164f1cb6 100644 --- a/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml +++ b/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-webhook-leaderelection-clusterrolebinding + name: pipelineloop-webhook-leaderelection-clusterrolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: tekton-pipelineloop-leader-election-clusterrole + name: pipelineloop-leader-election-clusterrole subjects: - kind: ServiceAccount - name: tekton-pipelineloop-webhook + name: pipelineloop-webhook namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/deployment.yaml b/config/v2/pipelineloop/webhook/deployment.yaml index cc61bbcca..70370878c 100644 --- a/config/v2/pipelineloop/webhook/deployment.yaml +++ b/config/v2/pipelineloop/webhook/deployment.yaml @@ -9,7 +9,7 @@ metadata: app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: tekton-pipelineloop-webhook + name: pipelineloop-webhook spec: replicas: 1 selector: @@ -45,12 +45,12 @@ spec: - name: CONFIG_LEADERELECTION_NAME value: config-leader-election - name: WEBHOOK_SERVICE_NAME - value: tekton-pipelineloop-webhook + value: tektonpipelineloop-webhook - name: WEBHOOK_SECRET_NAME - value: tekton-pipelineloop-webhook-certs + value: tektonpipelineloop-webhook-certs - name: METRICS_DOMAIN value: tekton.dev/pipeline - image: quay.io/internaldatahub/tekton-pipelineloop-webhook:2.0.0 + image: quay.io/internaldatahub/tektonpipelineloop-webhook:2.0.0 name: webhook ports: - containerPort: 9090 @@ -68,4 +68,4 @@ spec: runAsNonRoot: true seccompProfile: type: RuntimeDefault - serviceAccountName: tekton-pipelineloop-webhook + serviceAccountName: pipelineloop-webhook diff --git a/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml b/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml index bb8faf3d3..761454a3e 100644 --- a/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml +++ b/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml @@ -12,7 +12,7 @@ webhooks: - v1beta1 clientConfig: service: - name: tekton-pipelineloop-webhook + name: pipelineloop-webhook namespace: datasciencepipelinesapplications-controller failurePolicy: Fail name: webhook.pipelineloop.custom.tekton.dev diff --git a/config/v2/pipelineloop/webhook/role.yaml b/config/v2/pipelineloop/webhook/role.yaml index 7c0cad973..e4c8c0d1a 100644 --- a/config/v2/pipelineloop/webhook/role.yaml +++ b/config/v2/pipelineloop/webhook/role.yaml @@ -5,7 +5,7 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-webhook-role + name: pipelineloop-webhook-role rules: - apiGroups: - "" @@ -35,7 +35,7 @@ rules: - apiGroups: - "" resourceNames: - - tekton-pipelineloop-webhook-certs + - tektonpipelineloop-webhook-certs resources: - secrets verbs: diff --git a/config/v2/pipelineloop/webhook/rolebinding.yaml b/config/v2/pipelineloop/webhook/rolebinding.yaml index f47f37f90..d5df12f0c 100644 --- a/config/v2/pipelineloop/webhook/rolebinding.yaml +++ b/config/v2/pipelineloop/webhook/rolebinding.yaml @@ -5,12 +5,12 @@ metadata: app.kubernetes.io/component: webhook app.kubernetes.io/instance: default app.kubernetes.io/part-of: tekton-pipeline-loops - name: tekton-pipelineloop-webhook-rolebinding + name: pipelineloop-webhook-rolebinding roleRef: apiGroup: rbac.authorization.k8s.io kind: Role - name: tekton-pipelineloop-webhook-role + name: pipelineloop-webhook-role subjects: - kind: ServiceAccount - name: tekton-pipelineloop-webhook + name: pipelineloop-webhook namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/service.yaml b/config/v2/pipelineloop/webhook/service.yaml index b8f09a7b1..e073bc3cd 100644 --- a/config/v2/pipelineloop/webhook/service.yaml +++ b/config/v2/pipelineloop/webhook/service.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/version: devel pipeline.tekton.dev/release: devel version: devel - name: tekton-pipelineloop-webhook + name: pipelineloop-webhook namespace: datasciencepipelinesapplications-controller spec: ports: diff --git a/config/v2/pipelineloop/webhook/serviceaccount.yaml b/config/v2/pipelineloop/webhook/serviceaccount.yaml index 2bd0bdddf..bd71350d6 100644 --- a/config/v2/pipelineloop/webhook/serviceaccount.yaml +++ b/config/v2/pipelineloop/webhook/serviceaccount.yaml @@ -7,4 +7,4 @@ metadata: app.kubernetes.io/part-of: tekton-pipeline-loops app.kubernetes.io/name: data-science-pipelines-operator namespace: datasciencepipelinesapplications-controller - name: tekton-pipelineloop-webhook + name: pipelineloop-webhook diff --git a/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml b/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml index 063ee7056..f5fad8ae3 100644 --- a/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml +++ b/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml @@ -12,7 +12,7 @@ webhooks: - v1beta1 clientConfig: service: - name: tekton-pipelineloop-webhook + name: pipelineloop-webhook namespace: datasciencepipelinesapplications-controller failurePolicy: Fail name: validation.webhook.pipelineloop.custom.tekton.dev diff --git a/config/v2/tektoncrds/scc.anyuid.yaml b/config/v2/tektoncrds/scc.anyuid.yaml index a25c7e939..a33cd5e0e 100644 --- a/config/v2/tektoncrds/scc.anyuid.yaml +++ b/config/v2/tektoncrds/scc.anyuid.yaml @@ -17,7 +17,7 @@ metadata: annotations: kubernetes.io/description: kubeflow-anyuid provides all features of the restricted SCC but allows users to run with any UID and any GID. - name: kubeflow-anyuid-kfp-tekton + name: anyuid-operator-tekton priority: 10 readOnlyRootFilesystem: false requiredDropCapabilities: @@ -33,10 +33,10 @@ users: - system:serviceaccount:kubeflow:minio - system:serviceaccount:kubeflow:default - system:serviceaccount:kubeflow:pipeline-runner -- system:serviceaccount:kubeflow:kubeflow-pipelines-cache -- system:serviceaccount:kubeflow:kubeflow-pipelines-cache-deployer-sa +- system:serviceaccount:kubeflow:cache +- system:serviceaccount:kubeflow:cache-deployer-sa - system:serviceaccount:kubeflow:metadata-grpc-server -- system:serviceaccount:kubeflow:kubeflow-pipelines-metadata-writer +- system:serviceaccount:kubeflow:metadata-writer - system:serviceaccount:kubeflow:ml-pipeline - system:serviceaccount:kubeflow:ml-pipeline-persistenceagent - system:serviceaccount:kubeflow:ml-pipeline-scheduledworkflow @@ -44,14 +44,14 @@ users: - system:serviceaccount:kubeflow:ml-pipeline-viewer-crd-service-account - system:serviceaccount:kubeflow:ml-pipeline-visualizationserver - system:serviceaccount:kubeflow:mysql -- system:serviceaccount:kubeflow:kfp-csi-s3 -- system:serviceaccount:kubeflow:kfp-csi-attacher -- system:serviceaccount:kubeflow:kfp-csi-provisioner -- system:serviceaccount:openshift-pipelines:kfp-driver -- system:serviceaccount:openshift-pipelines:kfp-exithandler-controller -- system:serviceaccount:openshift-pipelines:kfp-exithandler-webhook -- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-controller -- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-webhook +- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-s3 +- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-attacher +- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-provisioner +- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-driver +- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-exithandler-controller +- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-exithandler-webhook +- system:serviceaccount:openshift-pipelines:tektonpipelineloop-controller +- system:serviceaccount:openshift-pipelines:tektonpipelineloop-webhook volumes: - configMap - downwardAPI diff --git a/config/v2/tektoncrds/scc.privileged.yaml b/config/v2/tektoncrds/scc.privileged.yaml index eafc24ea2..052b50d0d 100644 --- a/config/v2/tektoncrds/scc.privileged.yaml +++ b/config/v2/tektoncrds/scc.privileged.yaml @@ -17,7 +17,7 @@ metadata: annotations: kubernetes.io/description: kubeflow-anyuid provides all features of the restricted SCC but allows users to run with any UID and any GID. - name: kubeflow-privileged-kfp-tekton + name: privileged-operator-tekton priority: 10 readOnlyRootFilesystem: false requiredDropCapabilities: @@ -33,10 +33,10 @@ users: - system:serviceaccount:kubeflow:minio - system:serviceaccount:kubeflow:default - system:serviceaccount:kubeflow:pipeline-runner -- system:serviceaccount:kubeflow:kubeflow-pipelines-cache -- system:serviceaccount:kubeflow:kubeflow-pipelines-cache-deployer-sa +- system:serviceaccount:kubeflow:cache +- system:serviceaccount:kubeflow:cache-deployer-sa - system:serviceaccount:kubeflow:metadata-grpc-server -- system:serviceaccount:kubeflow:kubeflow-pipelines-metadata-writer +- system:serviceaccount:kubeflow:metadata-writer - system:serviceaccount:kubeflow:ml-pipeline - system:serviceaccount:kubeflow:ml-pipeline-persistenceagent - system:serviceaccount:kubeflow:ml-pipeline-scheduledworkflow @@ -44,14 +44,14 @@ users: - system:serviceaccount:kubeflow:ml-pipeline-viewer-crd-service-account - system:serviceaccount:kubeflow:ml-pipeline-visualizationserver - system:serviceaccount:kubeflow:mysql -- system:serviceaccount:kubeflow:kfp-csi-s3 -- system:serviceaccount:kubeflow:kfp-csi-attacher -- system:serviceaccount:kubeflow:kfp-csi-provisioner -- system:serviceaccount:openshift-pipelines:kfp-driver -- system:serviceaccount:openshift-pipelines:kfp-exithandler-controller -- system:serviceaccount:openshift-pipelines:kfp-exithandler-webhook -- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-controller -- system:serviceaccount:openshift-pipelines:tekton-pipelineloop-webhook +- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-s3 +- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-attacher +- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-provisioner +- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-driver +- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-exithandler-controller +- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-exithandler-webhook +- system:serviceaccount:openshift-pipelines:tektonpipelineloop-controller +- system:serviceaccount:openshift-pipelines:tektonpipelineloop-webhook volumes: - configMap - downwardAPI From cefc24f9c493b3cde59b251ae0c114b066e1d7cd Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 19 Oct 2023 14:46:42 -0400 Subject: [PATCH 35/85] Update RBAC for webhook clusterroles - Allow capability to delete mutating and validating webhookconfigurations - Allow capability to CRUD finalizers on openshift-pipelines namespace --- .../exithandler/webhook/clusterrole.clusteraccess.yaml | 10 ++++++++++ .../v2/kfptask/webhook/clusterrole.clusteraccess.yaml | 10 ++++++++++ .../webhook/clusterrole.clusteraccess.yaml | 10 ++++++++++ 3 files changed, 30 insertions(+) diff --git a/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml b/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml index a283f23be..b773d1ca2 100644 --- a/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml +++ b/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml @@ -45,6 +45,7 @@ rules: verbs: - get - update + - delete - apiGroups: - apps resources: @@ -58,6 +59,14 @@ rules: - delete - patch - watch +- apiGroups: + - "" + resources: + - namespaces/finalizers + resourceNames: + - openshift-pipelines + verbs: + - update - apiGroups: - admissionregistration.k8s.io resourceNames: @@ -67,6 +76,7 @@ rules: verbs: - get - update + - delete - apiGroups: - policy resourceNames: diff --git a/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml index e57603f2c..fc0a84851 100644 --- a/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml +++ b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml @@ -45,6 +45,7 @@ rules: verbs: - get - update + - delete - apiGroups: - apps resources: @@ -58,6 +59,14 @@ rules: - delete - patch - watch +- apiGroups: + - "" + resources: + - namespaces/finalizers + resourceNames: + - openshift-pipelines + verbs: + - update - apiGroups: - admissionregistration.k8s.io resourceNames: @@ -67,6 +76,7 @@ rules: verbs: - get - update + - delete - apiGroups: - policy resourceNames: diff --git a/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml b/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml index 336322695..2af9cd61d 100644 --- a/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml +++ b/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml @@ -45,6 +45,7 @@ rules: verbs: - get - update + - delete - apiGroups: - apps resources: @@ -58,6 +59,14 @@ rules: - delete - patch - watch +- apiGroups: + - "" + resources: + - namespaces/finalizers + resourceNames: + - openshift-pipelines + verbs: + - update - apiGroups: - admissionregistration.k8s.io resourceNames: @@ -67,6 +76,7 @@ rules: verbs: - get - update + - delete - apiGroups: - policy resourceNames: From 2e17ad4787a4782b005bc1c9d1a60da0c813cdb3 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 19 Oct 2023 14:49:14 -0400 Subject: [PATCH 36/85] Add MLMD GRPC env vars to APIServer if defined --- config/internal/apiserver/default/deployment.yaml.tmpl | 8 ++++++++ controllers/apiserver_test.go | 1 + .../case_5/expected/created/apiserver_deployment.yaml | 4 ++++ 3 files changed, 13 insertions(+) diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index 7c08f2ddc..7eae3e22f 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -96,6 +96,14 @@ spec: value: "{{.APIServer.CacheImage}}" - name: MOVERESULTS_IMAGE value: "{{.APIServer.MoveResultsImage}}" + {{ if .MLMD.Deploy }} + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: "ds-pipeline-metadata-grpc-{{.Name}}" + {{ if.MLMD.GRPC.Port }} + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: "{{.MLMD.GRPC.Port}}" + {{ end }} + {{ end }} image: {{.APIServer.Image}} imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/apiserver_test.go b/controllers/apiserver_test.go index f075a752f..964e30099 100644 --- a/controllers/apiserver_test.go +++ b/controllers/apiserver_test.go @@ -36,6 +36,7 @@ func TestDeployAPIServer(t *testing.T) { APIServer: &dspav1alpha1.APIServer{ Deploy: true, }, + MLMD: &dspav1alpha1.MLMD{}, Database: &dspav1alpha1.Database{ DisableHealthCheck: false, MariaDB: &dspav1alpha1.MariaDB{ diff --git a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml index 672ecd431..ce795c0bd 100644 --- a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml @@ -96,6 +96,10 @@ spec: value: "ubi-minimal:test5" - name: MOVERESULTS_IMAGE value: "busybox:test5" + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: ds-pipeline-metadata-grpc-testdsp5 + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: "1337" image: api-server:test5 imagePullPolicy: Always name: ds-pipeline-api-server From 1416f286a50937a1e08e2b39bd0639231385bdd7 Mon Sep 17 00:00:00 2001 From: Ricardo Martinelli de Oliveira Date: Fri, 20 Oct 2023 13:03:35 -0300 Subject: [PATCH 37/85] Add NetworkPolicy for kfp-driver and TaskRuns to allow access to API Server --- config/internal/common/default/policy.yaml.tmpl | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/config/internal/common/default/policy.yaml.tmpl b/config/internal/common/default/policy.yaml.tmpl index 5832fd186..ec1533356 100644 --- a/config/internal/common/default/policy.yaml.tmpl +++ b/config/internal/common/default/policy.yaml.tmpl @@ -25,6 +25,16 @@ spec: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: redhat-ods-monitoring + - podSelector: + matchLabels: + app.kubernetes.io/name: kfp-driver + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: openshift-pipelines + - podSelector: + matchLabels: + app.kubernetes.io/managed-by: tekton-pipelines + pipelines.kubeflow.org/v2_component: 'true' - podSelector: matchLabels: app: mariadb-{{.Name}} From f5e75c557c3b8b41ed427a6cd9c932550a6bb104 Mon Sep 17 00:00:00 2001 From: ddalvi Date: Fri, 20 Oct 2023 13:42:52 -0400 Subject: [PATCH 38/85] Remove SCCs from DSPv2 manifests --- config/v2/tektoncrds/scc.anyuid.yaml | 61 ----------------------- config/v2/tektoncrds/scc.privileged.yaml | 62 ------------------------ 2 files changed, 123 deletions(-) delete mode 100644 config/v2/tektoncrds/scc.anyuid.yaml delete mode 100644 config/v2/tektoncrds/scc.privileged.yaml diff --git a/config/v2/tektoncrds/scc.anyuid.yaml b/config/v2/tektoncrds/scc.anyuid.yaml deleted file mode 100644 index a33cd5e0e..000000000 --- a/config/v2/tektoncrds/scc.anyuid.yaml +++ /dev/null @@ -1,61 +0,0 @@ -allowHostDirVolumePlugin: false -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegeEscalation: true -allowPrivilegedContainer: true -allowedCapabilities: null -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: null -fsGroup: - type: RunAsAny -groups: -- system:cluster-admins -kind: SecurityContextConstraints -metadata: - annotations: - kubernetes.io/description: kubeflow-anyuid provides all features of the restricted - SCC but allows users to run with any UID and any GID. - name: anyuid-operator-tekton -priority: 10 -readOnlyRootFilesystem: false -requiredDropCapabilities: -- MKNOD -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -users: -- system:serviceaccount:kubeflow:metadatadb -- system:serviceaccount:kubeflow:minio -- system:serviceaccount:kubeflow:default -- system:serviceaccount:kubeflow:pipeline-runner -- system:serviceaccount:kubeflow:cache -- system:serviceaccount:kubeflow:cache-deployer-sa -- system:serviceaccount:kubeflow:metadata-grpc-server -- system:serviceaccount:kubeflow:metadata-writer -- system:serviceaccount:kubeflow:ml-pipeline -- system:serviceaccount:kubeflow:ml-pipeline-persistenceagent -- system:serviceaccount:kubeflow:ml-pipeline-scheduledworkflow -- system:serviceaccount:kubeflow:ml-pipeline-ui -- system:serviceaccount:kubeflow:ml-pipeline-viewer-crd-service-account -- system:serviceaccount:kubeflow:ml-pipeline-visualizationserver -- system:serviceaccount:kubeflow:mysql -- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-s3 -- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-attacher -- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-provisioner -- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-driver -- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-exithandler-controller -- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-exithandler-webhook -- system:serviceaccount:openshift-pipelines:tektonpipelineloop-controller -- system:serviceaccount:openshift-pipelines:tektonpipelineloop-webhook -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- projected -- secret diff --git a/config/v2/tektoncrds/scc.privileged.yaml b/config/v2/tektoncrds/scc.privileged.yaml deleted file mode 100644 index 052b50d0d..000000000 --- a/config/v2/tektoncrds/scc.privileged.yaml +++ /dev/null @@ -1,62 +0,0 @@ -allowHostDirVolumePlugin: true -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegeEscalation: true -allowPrivilegedContainer: true -allowedCapabilities: null -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: null -fsGroup: - type: RunAsAny -groups: -- system:cluster-admins -kind: SecurityContextConstraints -metadata: - annotations: - kubernetes.io/description: kubeflow-anyuid provides all features of the restricted - SCC but allows users to run with any UID and any GID. - name: privileged-operator-tekton -priority: 10 -readOnlyRootFilesystem: false -requiredDropCapabilities: -- MKNOD -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -users: -- system:serviceaccount:kubeflow:metadatadb -- system:serviceaccount:kubeflow:minio -- system:serviceaccount:kubeflow:default -- system:serviceaccount:kubeflow:pipeline-runner -- system:serviceaccount:kubeflow:cache -- system:serviceaccount:kubeflow:cache-deployer-sa -- system:serviceaccount:kubeflow:metadata-grpc-server -- system:serviceaccount:kubeflow:metadata-writer -- system:serviceaccount:kubeflow:ml-pipeline -- system:serviceaccount:kubeflow:ml-pipeline-persistenceagent -- system:serviceaccount:kubeflow:ml-pipeline-scheduledworkflow -- system:serviceaccount:kubeflow:ml-pipeline-ui -- system:serviceaccount:kubeflow:ml-pipeline-viewer-crd-service-account -- system:serviceaccount:kubeflow:ml-pipeline-visualizationserver -- system:serviceaccount:kubeflow:mysql -- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-s3 -- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-attacher -- system:serviceaccount:kubeflow:data-science-pipelines-operator-csi-provisioner -- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-driver -- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-exithandler-controller -- system:serviceaccount:openshift-pipelines:data-science-pipelines-operator-exithandler-webhook -- system:serviceaccount:openshift-pipelines:tektonpipelineloop-controller -- system:serviceaccount:openshift-pipelines:tektonpipelineloop-webhook -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- projected -- secret -- hostPath From fa7ca32aaaf0f9abceba7d87e0ccf3474b646f0c Mon Sep 17 00:00:00 2001 From: Ricardo Martinelli de Oliveira Date: Wed, 25 Oct 2023 14:52:33 -0300 Subject: [PATCH 39/85] Last changes for DSPv2 PoC --- api/v1alpha1/dspipeline_types.go | 9 ++++----- ...hub.io_datasciencepipelinesapplications.yaml | 6 +++--- .../apiserver/default/deployment.yaml.tmpl | 6 +++++- config/internal/common/default/policy.yaml.tmpl | 15 +++++++++++---- config/v2/driver/deployment.yaml | 12 ++++++++++++ .../exithandler/clusterrole.leaderelection.yaml | 2 +- .../v2/pipelineloop/controller/deployment.yaml | 2 +- config/v2/pipelineloop/webhook/deployment.yaml | 2 +- config/v2/tektoncrds/kustomization.yaml | 2 -- controllers/dspipeline_params.go | 7 ------- controllers/mlmd.go | 17 ++++++++++------- controllers/mlmd_test.go | 9 ++++++--- .../expected/created/apiserver_deployment.yaml | 8 ++++++++ .../expected/created/apiserver_deployment.yaml | 8 ++++++++ .../expected/created/apiserver_deployment.yaml | 8 ++++++++ .../expected/created/apiserver_deployment.yaml | 8 ++++++++ .../expected/created/apiserver_deployment.yaml | 6 +++++- 17 files changed, 91 insertions(+), 36 deletions(-) diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index 5e6496a60..a14a76d66 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -40,9 +40,8 @@ type DSPASpec struct { // ObjectStorage specifies Object Store configurations, used for DS Pipelines artifact passing and storage. Specify either the your own External Storage (e.g. AWS S3), or use the default Minio deployment (unsupported, primarily for development, and testing) . // +kubebuilder:validation:Required *ObjectStorage `json:"objectStorage"` - // +kubebuilder:validation:Optional - // +kubebuilder:default:={deploy: false} - *MLMD `json:"mlmd"` + // +kubebuilder:default:={deploy: true} + *MLMD `json:"mlmd,omitempty"` // +kubebuilder:validation:Optional // +kubebuilder:default:={deploy: false} *CRDViewer `json:"crdviewer"` @@ -231,8 +230,8 @@ type Minio struct { } type MLMD struct { - // Enable DS Pipelines Operator management of MLMD. Setting Deploy to false disables operator reconciliation. Default: false - // +kubebuilder:default:=false + // Enable DS Pipelines Operator management of MLMD. Setting Deploy to false disables operator reconciliation. Default: true + // +kubebuilder:default:=true // +kubebuilder:validation:Optional Deploy bool `json:"deploy"` *Envoy `json:"envoy,omitempty"` diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index eea98ada9..5a9cf077a 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -285,13 +285,13 @@ spec: type: string mlmd: default: - deploy: false + deploy: true properties: deploy: - default: false + default: true description: 'Enable DS Pipelines Operator management of MLMD. Setting Deploy to false disables operator reconciliation. Default: - false' + true' type: boolean envoy: properties: diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index 7eae3e22f..e9826f0f9 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -98,12 +98,16 @@ spec: value: "{{.APIServer.MoveResultsImage}}" {{ if .MLMD.Deploy }} - name: METADATA_GRPC_SERVICE_SERVICE_HOST - value: "ds-pipeline-metadata-grpc-{{.Name}}" + value: "ds-pipeline-metadata-grpc-{{.Name}}.{{.Namespace}}.svc.cluster.local" {{ if.MLMD.GRPC.Port }} - name: METADATA_GRPC_SERVICE_SERVICE_PORT value: "{{.MLMD.GRPC.Port}}" {{ end }} {{ end }} + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-{{.Name}}.{{.Namespace}}.svc.cluster.local + - name: ML_PIPELINE_SERVICE_PORT_GRPC + value: "8887" image: {{.APIServer.Image}} imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/config/internal/common/default/policy.yaml.tmpl b/config/internal/common/default/policy.yaml.tmpl index ec1533356..892868cba 100644 --- a/config/internal/common/default/policy.yaml.tmpl +++ b/config/internal/common/default/policy.yaml.tmpl @@ -25,10 +25,7 @@ spec: - namespaceSelector: matchLabels: kubernetes.io/metadata.name: redhat-ods-monitoring - - podSelector: - matchLabels: - app.kubernetes.io/name: kfp-driver - namespaceSelector: + - namespaceSelector: matchLabels: kubernetes.io/metadata.name: openshift-pipelines - podSelector: @@ -72,3 +69,13 @@ spec: port: 8888 - protocol: TCP port: 8887 + - ports: + - protocol: TCP + port: 8080 + from: + - podSelector: + matchLabels: + app.kubernetes.io/name: data-science-pipelines-operator-driver + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: openshift-pipelines diff --git a/config/v2/driver/deployment.yaml b/config/v2/driver/deployment.yaml index 025ce936b..2ea4f2bf9 100644 --- a/config/v2/driver/deployment.yaml +++ b/config/v2/driver/deployment.yaml @@ -42,6 +42,18 @@ spec: value: config-observability - name: METRICS_DOMAIN value: tekton.dev/pipeline + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-sample.data-science-pipelines-application-v2.svc.cluster.local + - name: ML_PIPELINE_SERVICE_PORT_GRPC + value: '8887' + - name: MINIO_SERVICE_SERVICE_HOST + value: minio-sample.data-science-pipelines-application-v2.svc.cluster.local + - name: MINIO_SERVICE_SERVICE_PORT + value: '9000' + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: ds-pipeline-metadata-grpc-sample.data-science-pipelines-application-v2.svc.cluster.local + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: '8080' image: quay.io/internaldatahub/tekton-driver:2.0.0 imagePullPolicy: Always name: driver diff --git a/config/v2/exithandler/clusterrole.leaderelection.yaml b/config/v2/exithandler/clusterrole.leaderelection.yaml index f3a2752f9..77a121b48 100644 --- a/config/v2/exithandler/clusterrole.leaderelection.yaml +++ b/config/v2/exithandler/clusterrole.leaderelection.yaml @@ -4,7 +4,7 @@ metadata: labels: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton - name: leader-election-clusterrole + name: exithandler-leader-election-clusterrole rules: - apiGroups: - coordination.k8s.io diff --git a/config/v2/pipelineloop/controller/deployment.yaml b/config/v2/pipelineloop/controller/deployment.yaml index 77c02b0ae..1263b8ac2 100644 --- a/config/v2/pipelineloop/controller/deployment.yaml +++ b/config/v2/pipelineloop/controller/deployment.yaml @@ -46,7 +46,7 @@ spec: value: config-observability - name: METRICS_DOMAIN value: tekton.dev/pipeline - image: quay.io/internaldatahub/tektonpipelineloop-controller:2.0.0 + image: quay.io/internaldatahub/tekton-pipelineloop-controller:2.0.0 name: pipelineloop-controller securityContext: allowPrivilegeEscalation: false diff --git a/config/v2/pipelineloop/webhook/deployment.yaml b/config/v2/pipelineloop/webhook/deployment.yaml index 70370878c..1fcbd4c5f 100644 --- a/config/v2/pipelineloop/webhook/deployment.yaml +++ b/config/v2/pipelineloop/webhook/deployment.yaml @@ -50,7 +50,7 @@ spec: value: tektonpipelineloop-webhook-certs - name: METRICS_DOMAIN value: tekton.dev/pipeline - image: quay.io/internaldatahub/tektonpipelineloop-webhook:2.0.0 + image: quay.io/internaldatahub/tekton-pipelineloop-webhook:2.0.0 name: webhook ports: - containerPort: 9090 diff --git a/config/v2/tektoncrds/kustomization.yaml b/config/v2/tektoncrds/kustomization.yaml index 2a8fa4333..1d3cbf0f8 100644 --- a/config/v2/tektoncrds/kustomization.yaml +++ b/config/v2/tektoncrds/kustomization.yaml @@ -1,4 +1,2 @@ resources: - crd.yaml -- scc.anyuid.yaml -- scc.privileged.yaml diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index e6be6229a..75445ca5a 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -111,13 +111,6 @@ func (p *DSPAParams) ObjectStorageHealthCheckDisabled(dsp *dspa.DataSciencePipel return false } -func (p *DSPAParams) UsingMLMD(dsp *dspa.DataSciencePipelinesApplication) bool { - if dsp.Spec.MLMD != nil { - return dsp.Spec.MLMD.Deploy - } - return false -} - func passwordGen(n int) string { rand.Seed(time.Now().UnixNano()) var chars = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890") diff --git a/controllers/mlmd.go b/controllers/mlmd.go index 78db0983c..3a3197416 100644 --- a/controllers/mlmd.go +++ b/controllers/mlmd.go @@ -26,15 +26,18 @@ func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApp log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) - if params.UsingMLMD(dsp) { - log.Info("Applying ML-Metadata (MLMD) Resources") + if !dsp.Spec.MLMD.Deploy { + r.Log.Info("Skipping Application of ML-Metadata (MLMD) Resources") + return nil + } - err := r.ApplyDir(dsp, params, mlmdTemplatesDir) - if err != nil { - return err - } + log.Info("Applying ML-Metadata (MLMD) Resources") - log.Info("Finished applying MLMD Resources") + err := r.ApplyDir(dsp, params, mlmdTemplatesDir) + if err != nil { + return err } + + log.Info("Finished applying MLMD Resources") return nil } diff --git a/controllers/mlmd_test.go b/controllers/mlmd_test.go index 6aadc6751..79a4b9ecd 100644 --- a/controllers/mlmd_test.go +++ b/controllers/mlmd_test.go @@ -211,6 +211,9 @@ func TestDefaultDeployBehaviorMLMD(t *testing.T) { // possibly due to test setup - Investigate. ArchiveLogs: true, }, + MLMD: &dspav1alpha1.MLMD{ + Deploy: true, + }, Database: &dspav1alpha1.Database{ DisableHealthCheck: false, MariaDB: &dspav1alpha1.MariaDB{ @@ -261,18 +264,18 @@ func TestDefaultDeployBehaviorMLMD(t *testing.T) { // Ensure MLMD-Envoy resources still doesn't exist deployment = &appsv1.Deployment{} created, err = reconciler.IsResourceCreated(ctx, deployment, expectedMLMDEnvoyName, testNamespace) - assert.False(t, created) + assert.True(t, created) assert.Nil(t, err) // Ensure MLMD-GRPC resources still doesn't exist deployment = &appsv1.Deployment{} created, err = reconciler.IsResourceCreated(ctx, deployment, expectedMLMDGRPCName, testNamespace) - assert.False(t, created) + assert.True(t, created) assert.Nil(t, err) // Ensure MLMD-Writer resources still doesn't exist deployment = &appsv1.Deployment{} created, err = reconciler.IsResourceCreated(ctx, deployment, expectedMLMDWriterName, testNamespace) - assert.False(t, created) + assert.True(t, created) assert.Nil(t, err) } diff --git a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml index ecf172926..9f4155047 100644 --- a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml @@ -96,6 +96,14 @@ spec: value: "ubi-minimal:test0" - name: MOVERESULTS_IMAGE value: "busybox:test0" + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: "ds-pipeline-metadata-grpc-testdsp0.default.svc.cluster.local" + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: "8080" + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-testdsp0.default.svc.cluster.local + - name: ML_PIPELINE_SERVICE_PORT_GRPC + value: "8887" image: api-server:test0 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml index 1489a4e0d..8e0a430ce 100644 --- a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml @@ -96,6 +96,14 @@ spec: value: "ubi-minimal:test2" - name: MOVERESULTS_IMAGE value: "busybox:test2" + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: "ds-pipeline-metadata-grpc-testdsp2.default.svc.cluster.local" + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: "8080" + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-testdsp2.default.svc.cluster.local + - name: ML_PIPELINE_SERVICE_PORT_GRPC + value: "8887" image: api-server:test2 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml index 6371d1460..52320af6e 100644 --- a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml @@ -96,6 +96,14 @@ spec: value: ubi-minimal:test3 - name: MOVERESULTS_IMAGE value: busybox:test3 + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: "ds-pipeline-metadata-grpc-testdsp3.default.svc.cluster.local" + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: "8080" + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-testdsp3.default.svc.cluster.local + - name: ML_PIPELINE_SERVICE_PORT_GRPC + value: "8887" image: api-server:test3 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml index 043a86a37..a3fbf5d28 100644 --- a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml @@ -96,6 +96,14 @@ spec: value: "this-ubi-minimal-image-from-cr-should-be-used:test4" - name: MOVERESULTS_IMAGE value: "this-busybox-image-from-cr-should-be-used:test4" + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: "ds-pipeline-metadata-grpc-testdsp4.default.svc.cluster.local" + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: "8080" + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-testdsp4.default.svc.cluster.local + - name: ML_PIPELINE_SERVICE_PORT_GRPC + value: "8887" image: this-apiserver-image-from-cr-should-be-used:test4 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml index ce795c0bd..c68cead13 100644 --- a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml @@ -97,9 +97,13 @@ spec: - name: MOVERESULTS_IMAGE value: "busybox:test5" - name: METADATA_GRPC_SERVICE_SERVICE_HOST - value: ds-pipeline-metadata-grpc-testdsp5 + value: "ds-pipeline-metadata-grpc-testdsp5.default.svc.cluster.local" - name: METADATA_GRPC_SERVICE_SERVICE_PORT value: "1337" + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-testdsp5.default.svc.cluster.local + - name: ML_PIPELINE_SERVICE_PORT_GRPC + value: "8887" image: api-server:test5 imagePullPolicy: Always name: ds-pipeline-api-server From 01cd52d6350b3b389ae5ae32b8fd74df1b5399a2 Mon Sep 17 00:00:00 2001 From: Ricardo Martinelli de Oliveira Date: Mon, 30 Oct 2023 13:56:44 -0300 Subject: [PATCH 40/85] Replace sample iris-dataset pipeline with a v2 one --- .../sample-pipeline/sample-pipeline.yaml.tmpl | 1297 ++++++++++------- controllers/dspipeline_params.go | 2 + .../created/sample-pipeline.yaml.tmpl | 1042 ++++++------- .../testdata/declarative/case_6/config.yaml | 10 + .../declarative/case_6/deploy/cr.yaml | 90 ++ .../case_6/expected/apiserver_deployment.yaml | 214 +++ .../expected/configmap_artifact_script.yaml | 35 + .../case_6/expected/mariadb_deployment.yaml | 79 + .../case_6/expected/minio_deployment.yaml | 75 + .../expected/mlpipelines-ui_deployment.yaml | 153 ++ .../persistence-agent_deployment.yaml | 76 + .../case_6/expected/sample-config.yaml.tmpl | 17 + .../case_6/expected/sample-pipeline.yaml.tmpl | 252 ++++ .../scheduled-workflow_deployment.yaml | 65 + 14 files changed, 2365 insertions(+), 1042 deletions(-) create mode 100644 controllers/testdata/declarative/case_6/config.yaml create mode 100644 controllers/testdata/declarative/case_6/deploy/cr.yaml create mode 100644 controllers/testdata/declarative/case_6/expected/apiserver_deployment.yaml create mode 100644 controllers/testdata/declarative/case_6/expected/configmap_artifact_script.yaml create mode 100644 controllers/testdata/declarative/case_6/expected/mariadb_deployment.yaml create mode 100644 controllers/testdata/declarative/case_6/expected/minio_deployment.yaml create mode 100644 controllers/testdata/declarative/case_6/expected/mlpipelines-ui_deployment.yaml create mode 100644 controllers/testdata/declarative/case_6/expected/persistence-agent_deployment.yaml create mode 100644 controllers/testdata/declarative/case_6/expected/sample-config.yaml.tmpl create mode 100644 controllers/testdata/declarative/case_6/expected/sample-pipeline.yaml.tmpl create mode 100644 controllers/testdata/declarative/case_6/expected/scheduled-workflow_deployment.yaml diff --git a/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl b/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl index 261f364cf..6addfef33 100644 --- a/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl +++ b/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl @@ -1,3 +1,4 @@ +{{ if (eq .DSPVersion "v2") }} apiVersion: v1 kind: ConfigMap metadata: @@ -8,547 +9,801 @@ metadata: component: data-science-pipelines data: iris-pipeline-compiled.yaml: |- - apiVersion: tekton.dev/v1beta1 - kind: PipelineRun - metadata: - name: iris-pipeline - annotations: - tekton.dev/output_artifacts: '{"data-prep": [{"key": "artifacts/$PIPELINERUN/data-prep/X_test.tgz", - "name": "data-prep-X_test", "path": "/tmp/outputs/X_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/X_train.tgz", - "name": "data-prep-X_train", "path": "/tmp/outputs/X_train/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_test.tgz", - "name": "data-prep-y_test", "path": "/tmp/outputs/y_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_train.tgz", - "name": "data-prep-y_train", "path": "/tmp/outputs/y_train/data"}], "evaluate-model": - [{"key": "artifacts/$PIPELINERUN/evaluate-model/mlpipeline-metrics.tgz", "name": - "mlpipeline-metrics", "path": "/tmp/outputs/mlpipeline_metrics/data"}], "train-model": - [{"key": "artifacts/$PIPELINERUN/train-model/model.tgz", "name": "train-model-model", - "path": "/tmp/outputs/model/data"}]}' - tekton.dev/input_artifacts: '{"evaluate-model": [{"name": "data-prep-X_test", - "parent_task": "data-prep"}, {"name": "data-prep-y_test", "parent_task": "data-prep"}, - {"name": "train-model-model", "parent_task": "train-model"}], "train-model": - [{"name": "data-prep-X_train", "parent_task": "data-prep"}, {"name": "data-prep-y_train", - "parent_task": "data-prep"}], "validate-model": [{"name": "train-model-model", - "parent_task": "train-model"}]}' - tekton.dev/artifact_bucket: mlpipeline - tekton.dev/artifact_endpoint: minio-service.kubeflow:9000 - tekton.dev/artifact_endpoint_scheme: http:// - tekton.dev/artifact_items: '{"data-prep": [["X_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test"], - ["X_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train"], - ["y_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test"], - ["y_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train"]], - "evaluate-model": [["mlpipeline-metrics", "/tmp/outputs/mlpipeline_metrics/data"]], - "train-model": [["model", "$(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model"]], - "validate-model": []}' - sidecar.istio.io/inject: "false" - tekton.dev/template: '' - pipelines.kubeflow.org/big_data_passing_format: $(workspaces.$TASK_NAME.path)/artifacts/$ORIG_PR_NAME/$TASKRUN_NAME/$TASK_PARAM_NAME - pipelines.kubeflow.org/pipeline_spec: '{"inputs": [{"default": "iris-model", "name": - "model_obc", "optional": true, "type": "String"}], "name": "Iris Pipeline"}' - labels: - pipelines.kubeflow.org/pipelinename: '' - pipelines.kubeflow.org/generation: '' - spec: + # PIPELINE DEFINITION + # Name: iris-training-pipeline + # Inputs: + # min_max_scaler: bool + # neighbors: int + # standard_scaler: bool + # Outputs: + # train-model-metrics: system.ClassificationMetrics + components: + comp-create-dataset: + executorLabel: exec-create-dataset + outputDefinitions: + artifacts: + iris_dataset: + artifactType: + schemaTitle: system.Dataset + schemaVersion: 0.0.1 + comp-normalize-dataset: + executorLabel: exec-normalize-dataset + inputDefinitions: + artifacts: + input_iris_dataset: + artifactType: + schemaTitle: system.Dataset + schemaVersion: 0.0.1 + parameters: + min_max_scaler: + parameterType: BOOLEAN + standard_scaler: + parameterType: BOOLEAN + outputDefinitions: + artifacts: + normalized_iris_dataset: + artifactType: + schemaTitle: system.Dataset + schemaVersion: 0.0.1 + comp-train-model: + executorLabel: exec-train-model + inputDefinitions: + artifacts: + normalized_iris_dataset: + artifactType: + schemaTitle: system.Dataset + schemaVersion: 0.0.1 + parameters: + n_neighbors: + parameterType: NUMBER_INTEGER + outputDefinitions: + artifacts: + metrics: + artifactType: + schemaTitle: system.ClassificationMetrics + schemaVersion: 0.0.1 + model: + artifactType: + schemaTitle: system.Model + schemaVersion: 0.0.1 + deploymentSpec: + executors: + exec-create-dataset: + container: + args: + - --executor_input + - '{{"{{"}}${{"}}"}}' + - --function_to_execute + - create_dataset + command: + - sh + - -c + - "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\ + \ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\ + \ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.1'\ + \ && \"$0\" \"$@\"\n" + - sh + - -ec + - 'program_path=$(mktemp -d) + + printf "%s" "$0" > "$program_path/ephemeral_component.py" + + python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@" + + ' + - "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\ + \ *\n\ndef create_dataset(iris_dataset: Output[Dataset]):\n import pandas\ + \ as pd\n\n csv_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n\ + \ col_names = [\n 'Sepal_Length', 'Sepal_Width', 'Petal_Length',\ + \ 'Petal_Width', 'Labels'\n ]\n df = pd.read_csv(csv_url, names=col_names)\n\ + \n with open(iris_dataset.path, 'w') as f:\n df.to_csv(f)\n\n" + image: quay.io/rmartine/data-science:test9 + exec-normalize-dataset: + container: + args: + - --executor_input + - '{{"{{"}}${{"}}"}}' + - --function_to_execute + - normalize_dataset + command: + - sh + - -c + - "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\ + \ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\ + \ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.1'\ + \ && \"$0\" \"$@\"\n" + - sh + - -ec + - 'program_path=$(mktemp -d) + + printf "%s" "$0" > "$program_path/ephemeral_component.py" + + python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@" + + ' + - "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\ + \ *\n\ndef normalize_dataset(\n input_iris_dataset: Input[Dataset],\n\ + \ normalized_iris_dataset: Output[Dataset],\n standard_scaler: bool,\n\ + \ min_max_scaler: bool,\n):\n if standard_scaler is min_max_scaler:\n\ + \ raise ValueError(\n 'Exactly one of standard_scaler\ + \ or min_max_scaler must be True.')\n\n import pandas as pd\n from\ + \ sklearn.preprocessing import MinMaxScaler\n from sklearn.preprocessing\ + \ import StandardScaler\n\n with open(input_iris_dataset.path) as f:\n\ + \ df = pd.read_csv(f)\n labels = df.pop('Labels')\n\n if standard_scaler:\n\ + \ scaler = StandardScaler()\n if min_max_scaler:\n scaler\ + \ = MinMaxScaler()\n\n df = pd.DataFrame(scaler.fit_transform(df))\n\ + \ df['Labels'] = labels\n normalized_iris_dataset.metadata['state']\ + \ = \"Normalized\"\n with open(normalized_iris_dataset.path, 'w') as\ + \ f:\n df.to_csv(f)\n\n" + image: quay.io/rmartine/data-science:test9 + exec-train-model: + container: + args: + - --executor_input + - '{{"{{"}}${{"}}"}}' + - --function_to_execute + - train_model + command: + - sh + - -c + - "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\ + \ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\ + \ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.1'\ + \ && \"$0\" \"$@\"\n" + - sh + - -ec + - 'program_path=$(mktemp -d) + + printf "%s" "$0" > "$program_path/ephemeral_component.py" + + python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@" + + ' + - "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\ + \ *\n\ndef train_model(\n normalized_iris_dataset: Input[Dataset],\n\ + \ model: Output[Model],\n metrics: Output[ClassificationMetrics],\n\ + \ n_neighbors: int,\n):\n import pickle\n\n import pandas as pd\n\ + \ from sklearn.neighbors import KNeighborsClassifier\n\n from sklearn.metrics\ + \ import roc_curve\n from sklearn.model_selection import train_test_split,\ + \ cross_val_predict\n from sklearn.metrics import confusion_matrix\n\n\ + \n with open(normalized_iris_dataset.path) as f:\n df = pd.read_csv(f)\n\ + \n y = df.pop('Labels')\n X = df\n\n X_train, X_test, y_train,\ + \ y_test = train_test_split(X, y, random_state=0)\n\n clf = KNeighborsClassifier(n_neighbors=n_neighbors)\n\ + \ clf.fit(X_train, y_train)\n\n predictions = cross_val_predict(\n\ + \ clf, X_train, y_train, cv=3)\n metrics.log_confusion_matrix(\n\ + \ ['Iris-Setosa', 'Iris-Versicolour', 'Iris-Virginica'],\n \ + \ confusion_matrix(\n y_train,\n predictions).tolist()\ + \ # .tolist() to convert np array to list.\n )\n\n model.metadata['framework']\ + \ = 'scikit-learn'\n with open(model.path, 'wb') as f:\n pickle.dump(clf,\ + \ f)\n\n" + image: quay.io/rmartine/data-science:test9 + pipelineInfo: + name: iris-training-pipeline + root: + dag: + outputs: + artifacts: + train-model-metrics: + artifactSelectors: + - outputArtifactKey: metrics + producerSubtask: train-model + tasks: + create-dataset: + cachingOptions: + enableCache: true + componentRef: + name: comp-create-dataset + taskInfo: + name: create-dataset + normalize-dataset: + cachingOptions: + enableCache: true + componentRef: + name: comp-normalize-dataset + dependentTasks: + - create-dataset + inputs: + artifacts: + input_iris_dataset: + taskOutputArtifact: + outputArtifactKey: iris_dataset + producerTask: create-dataset + parameters: + min_max_scaler: + runtimeValue: + constant: false + standard_scaler: + runtimeValue: + constant: true + taskInfo: + name: normalize-dataset + train-model: + cachingOptions: + enableCache: true + componentRef: + name: comp-train-model + dependentTasks: + - normalize-dataset + inputs: + artifacts: + normalized_iris_dataset: + taskOutputArtifact: + outputArtifactKey: normalized_iris_dataset + producerTask: normalize-dataset + parameters: + n_neighbors: + componentInputParameter: neighbors + taskInfo: + name: train-model + inputDefinitions: + parameters: + min_max_scaler: + parameterType: BOOLEAN + neighbors: + parameterType: NUMBER_INTEGER + standard_scaler: + parameterType: BOOLEAN + outputDefinitions: + artifacts: + train-model-metrics: + artifactType: + schemaTitle: system.ClassificationMetrics + schemaVersion: 0.0.1 + schemaVersion: 2.1.0 + sdkVersion: kfp-2.0.1 +{{ else }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-pipeline-{{.Name}} + namespace: {{.Namespace}} + labels: + app: ds-pipeline-{{.Name}} + component: data-science-pipelines +data: + iris-pipeline-compiled.yaml: |- + apiVersion: tekton.dev/v1beta1 + kind: PipelineRun + metadata: + name: iris-pipeline + annotations: + tekton.dev/output_artifacts: '{"data-prep": [{"key": "artifacts/$PIPELINERUN/data-prep/X_test.tgz", + "name": "data-prep-X_test", "path": "/tmp/outputs/X_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/X_train.tgz", + "name": "data-prep-X_train", "path": "/tmp/outputs/X_train/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_test.tgz", + "name": "data-prep-y_test", "path": "/tmp/outputs/y_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_train.tgz", + "name": "data-prep-y_train", "path": "/tmp/outputs/y_train/data"}], "evaluate-model": + [{"key": "artifacts/$PIPELINERUN/evaluate-model/mlpipeline-metrics.tgz", "name": + "mlpipeline-metrics", "path": "/tmp/outputs/mlpipeline_metrics/data"}], "train-model": + [{"key": "artifacts/$PIPELINERUN/train-model/model.tgz", "name": "train-model-model", + "path": "/tmp/outputs/model/data"}]}' + tekton.dev/input_artifacts: '{"evaluate-model": [{"name": "data-prep-X_test", + "parent_task": "data-prep"}, {"name": "data-prep-y_test", "parent_task": "data-prep"}, + {"name": "train-model-model", "parent_task": "train-model"}], "train-model": + [{"name": "data-prep-X_train", "parent_task": "data-prep"}, {"name": "data-prep-y_train", + "parent_task": "data-prep"}], "validate-model": [{"name": "train-model-model", + "parent_task": "train-model"}]}' + tekton.dev/artifact_bucket: mlpipeline + tekton.dev/artifact_endpoint: minio-service.kubeflow:9000 + tekton.dev/artifact_endpoint_scheme: http:// + tekton.dev/artifact_items: '{"data-prep": [["X_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test"], + ["X_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train"], + ["y_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test"], + ["y_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train"]], + "evaluate-model": [["mlpipeline-metrics", "/tmp/outputs/mlpipeline_metrics/data"]], + "train-model": [["model", "$(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model"]], + "validate-model": []}' + sidecar.istio.io/inject: "false" + tekton.dev/template: '' + pipelines.kubeflow.org/big_data_passing_format: $(workspaces.$TASK_NAME.path)/artifacts/$ORIG_PR_NAME/$TASKRUN_NAME/$TASK_PARAM_NAME + pipelines.kubeflow.org/pipeline_spec: '{"inputs": [{"default": "iris-model", "name": + "model_obc", "optional": true, "type": "String"}], "name": "Iris Pipeline"}' + labels: + pipelines.kubeflow.org/pipelinename: '' + pipelines.kubeflow.org/generation: '' + spec: + params: + - name: model_obc + value: iris-model + pipelineSpec: params: - name: model_obc - value: iris-model - pipelineSpec: - params: - - name: model_obc - default: iris-model - tasks: - - name: data-prep - taskSpec: - steps: - - name: main - args: - - --X-train - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train - - --X-test - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test - - --y-train - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train - - --y-test - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def _make_parent_dirs_and_return_path(file_path: str): - import os - os.makedirs(os.path.dirname(file_path), exist_ok=True) - return file_path - - def data_prep( - X_train_file, - X_test_file, - y_train_file, - y_test_file, - ): - import pickle - - import pandas as pd - - from sklearn import datasets - from sklearn.model_selection import train_test_split - - def get_iris_data(): - iris = datasets.load_iris() - data = pd.DataFrame( - { - "sepalLength": iris.data[:, 0], - "sepalWidth": iris.data[:, 1], - "petalLength": iris.data[:, 2], - "petalWidth": iris.data[:, 3], - "species": iris.target, - } - ) - - print("Initial Dataset:") - print(data.head()) - - return data - - def create_training_set(dataset, test_size = 0.3): - # Features - X = dataset[["sepalLength", "sepalWidth", "petalLength", "petalWidth"]] - # Labels - y = dataset["species"] - - # Split dataset into training set and test set - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=test_size, random_state=11 - ) - - return X_train, X_test, y_train, y_test - - def save_pickle(object_file, target_object): - with open(object_file, "wb") as f: - pickle.dump(target_object, f) - - dataset = get_iris_data() - X_train, X_test, y_train, y_test = create_training_set(dataset) - - save_pickle(X_train_file, X_train) - save_pickle(X_test_file, X_test) - save_pickle(y_train_file, y_train) - save_pickle(y_test_file, y_test) - - import argparse - _parser = argparse.ArgumentParser(prog='Data prep', description='') - _parser.add_argument("--X-train", dest="X_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--X-test", dest="X_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-train", dest="y_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-test", dest="y_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = data_prep(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: output-taskrun-name - command: - - sh - - -ec - - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: copy-results-artifacts - command: - - sh - - -ec - - | - set -exo pipefail - TOTAL_SIZE=0 - copy_artifact() { + default: iris-model + tasks: + - name: data-prep + taskSpec: + steps: + - name: main + args: + - --X-train + - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train + - --X-test + - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test + - --y-train + - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train + - --y-test + - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test + command: + - sh + - -c + - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location + 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m + pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' + --user) && "$0" "$@" + - sh + - -ec + - | + program_path=$(mktemp) + printf "%s" "$0" > "$program_path" + python3 -u "$program_path" "$@" + - | + def _make_parent_dirs_and_return_path(file_path: str): + import os + os.makedirs(os.path.dirname(file_path), exist_ok=True) + return file_path + + def data_prep( + X_train_file, + X_test_file, + y_train_file, + y_test_file, + ): + import pickle + + import pandas as pd + + from sklearn import datasets + from sklearn.model_selection import train_test_split + + def get_iris_data(): + iris = datasets.load_iris() + data = pd.DataFrame( + { + "sepalLength": iris.data[:, 0], + "sepalWidth": iris.data[:, 1], + "petalLength": iris.data[:, 2], + "petalWidth": iris.data[:, 3], + "species": iris.target, + } + ) + + print("Initial Dataset:") + print(data.head()) + + return data + + def create_training_set(dataset, test_size = 0.3): + # Features + X = dataset[["sepalLength", "sepalWidth", "petalLength", "petalWidth"]] + # Labels + y = dataset["species"] + + # Split dataset into training set and test set + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=test_size, random_state=11 + ) + + return X_train, X_test, y_train, y_test + + def save_pickle(object_file, target_object): + with open(object_file, "wb") as f: + pickle.dump(target_object, f) + + dataset = get_iris_data() + X_train, X_test, y_train, y_test = create_training_set(dataset) + + save_pickle(X_train_file, X_train) + save_pickle(X_test_file, X_test) + save_pickle(y_train_file, y_train) + save_pickle(y_test_file, y_test) + + import argparse + _parser = argparse.ArgumentParser(prog='Data prep', description='') + _parser.add_argument("--X-train", dest="X_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--X-test", dest="X_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--y-train", dest="y_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--y-test", dest="y_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parsed_args = vars(_parser.parse_args()) + + _outputs = data_prep(**_parsed_args) + image: registry.access.redhat.com/ubi8/python-38 + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] + - image: registry.access.redhat.com/ubi8/ubi-minimal + name: output-taskrun-name + command: + - sh + - -ec + - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" + - image: registry.access.redhat.com/ubi8/ubi-minimal + name: copy-results-artifacts + command: + - sh + - -ec + - | + set -exo pipefail + TOTAL_SIZE=0 + copy_artifact() { + if [ -d "$1" ]; then + tar -czvf "$1".tar.gz "$1" + SUFFIX=".tar.gz" + fi + ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` + TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) + touch "$2" + if [[ $TOTAL_SIZE -lt 3072 ]]; then if [ -d "$1" ]; then - tar -czvf "$1".tar.gz "$1" - SUFFIX=".tar.gz" + tar -tzf "$1".tar.gz > "$2" + elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then + cp "$1" "$2" fi - ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` - TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) - touch "$2" - if [[ $TOTAL_SIZE -lt 3072 ]]; then - if [ -d "$1" ]; then - tar -tzf "$1".tar.gz > "$2" - elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then - cp "$1" "$2" - fi - fi - } - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train $(results.X-train.path) - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test $(results.X-test.path) - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train $(results.y-train.path) - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test $(results.y-test.path) - onError: continue - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - results: - - name: X-test - description: /tmp/outputs/X_test/data - - name: X-train - description: /tmp/outputs/X_train/data - - name: taskrun-name - - name: y-test - description: /tmp/outputs/y_test/data - - name: y-train - description: /tmp/outputs/y_train/data - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Data prep", "outputs": - [{"name": "X_train"}, {"name": "X_test"}, {"name": "y_train"}, {"name": - "y_test"}], "version": "Data prep@sha256=5aeb512900f57983c9f643ec30ddb4ccc66490a443269b51ce0a67d57cb373b0"}' - workspaces: - - name: data-prep + fi + } + copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train $(results.X-train.path) + copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test $(results.X-test.path) + copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train $(results.y-train.path) + copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test $(results.y-test.path) + onError: continue + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] + results: + - name: X-test + description: /tmp/outputs/X_test/data + - name: X-train + description: /tmp/outputs/X_train/data + - name: taskrun-name + - name: y-test + description: /tmp/outputs/y_test/data + - name: y-train + description: /tmp/outputs/y_train/data + metadata: + labels: + pipelines.kubeflow.org/cache_enabled: "true" + annotations: + pipelines.kubeflow.org/component_spec_digest: '{"name": "Data prep", "outputs": + [{"name": "X_train"}, {"name": "X_test"}, {"name": "y_train"}, {"name": + "y_test"}], "version": "Data prep@sha256=5aeb512900f57983c9f643ec30ddb4ccc66490a443269b51ce0a67d57cb373b0"}' workspaces: - name: data-prep - workspace: iris-pipeline - - name: train-model - params: - - name: data-prep-trname - value: $(tasks.data-prep.results.taskrun-name) - taskSpec: - steps: - - name: main - args: - - --X-train - - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_train - - --y-train - - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_train - - --model - - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def _make_parent_dirs_and_return_path(file_path: str): - import os - os.makedirs(os.path.dirname(file_path), exist_ok=True) - return file_path - - def train_model( - X_train_file, - y_train_file, - model_file, - ): - import pickle - - from sklearn.ensemble import RandomForestClassifier - - def load_pickle(object_file): - with open(object_file, "rb") as f: - target_object = pickle.load(f) - - return target_object - - def save_pickle(object_file, target_object): - with open(object_file, "wb") as f: - pickle.dump(target_object, f) - - def train_iris(X_train, y_train): - model = RandomForestClassifier(n_estimators=100) - model.fit(X_train, y_train) - - return model - - X_train = load_pickle(X_train_file) - y_train = load_pickle(y_train_file) - - model = train_iris(X_train, y_train) - - save_pickle(model_file, model) - - import argparse - _parser = argparse.ArgumentParser(prog='Train model', description='') - _parser.add_argument("--X-train", dest="X_train_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-train", dest="y_train_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--model", dest="model_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = train_model(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: output-taskrun-name - command: - - sh - - -ec - - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: copy-results-artifacts - command: - - sh - - -ec - - | - set -exo pipefail - TOTAL_SIZE=0 - copy_artifact() { + workspaces: + - name: data-prep + workspace: iris-pipeline + - name: train-model + params: + - name: data-prep-trname + value: $(tasks.data-prep.results.taskrun-name) + taskSpec: + steps: + - name: main + args: + - --X-train + - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_train + - --y-train + - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_train + - --model + - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model + command: + - sh + - -c + - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location + 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m + pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' + --user) && "$0" "$@" + - sh + - -ec + - | + program_path=$(mktemp) + printf "%s" "$0" > "$program_path" + python3 -u "$program_path" "$@" + - | + def _make_parent_dirs_and_return_path(file_path: str): + import os + os.makedirs(os.path.dirname(file_path), exist_ok=True) + return file_path + + def train_model( + X_train_file, + y_train_file, + model_file, + ): + import pickle + + from sklearn.ensemble import RandomForestClassifier + + def load_pickle(object_file): + with open(object_file, "rb") as f: + target_object = pickle.load(f) + + return target_object + + def save_pickle(object_file, target_object): + with open(object_file, "wb") as f: + pickle.dump(target_object, f) + + def train_iris(X_train, y_train): + model = RandomForestClassifier(n_estimators=100) + model.fit(X_train, y_train) + + return model + + X_train = load_pickle(X_train_file) + y_train = load_pickle(y_train_file) + + model = train_iris(X_train, y_train) + + save_pickle(model_file, model) + + import argparse + _parser = argparse.ArgumentParser(prog='Train model', description='') + _parser.add_argument("--X-train", dest="X_train_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--y-train", dest="y_train_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--model", dest="model_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parsed_args = vars(_parser.parse_args()) + + _outputs = train_model(**_parsed_args) + image: registry.access.redhat.com/ubi8/python-38 + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] + - image: registry.access.redhat.com/ubi8/ubi-minimal + name: output-taskrun-name + command: + - sh + - -ec + - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" + - image: registry.access.redhat.com/ubi8/ubi-minimal + name: copy-results-artifacts + command: + - sh + - -ec + - | + set -exo pipefail + TOTAL_SIZE=0 + copy_artifact() { + if [ -d "$1" ]; then + tar -czvf "$1".tar.gz "$1" + SUFFIX=".tar.gz" + fi + ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` + TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) + touch "$2" + if [[ $TOTAL_SIZE -lt 3072 ]]; then if [ -d "$1" ]; then - tar -czvf "$1".tar.gz "$1" - SUFFIX=".tar.gz" - fi - ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` - TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) - touch "$2" - if [[ $TOTAL_SIZE -lt 3072 ]]; then - if [ -d "$1" ]; then - tar -tzf "$1".tar.gz > "$2" - elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then - cp "$1" "$2" - fi + tar -tzf "$1".tar.gz > "$2" + elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then + cp "$1" "$2" fi - } - copy_artifact $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model $(results.model.path) - onError: continue - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - params: - - name: data-prep-trname - results: - - name: model - description: /tmp/outputs/model/data - - name: taskrun-name - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Train model", - "outputs": [{"name": "model"}], "version": "Train model@sha256=cb1fbd399ee5849dcdfaafced23a0496cae1d5861795062b22512b766ec418ce"}' - workspaces: - - name: train-model + fi + } + copy_artifact $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model $(results.model.path) + onError: continue + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] + params: + - name: data-prep-trname + results: + - name: model + description: /tmp/outputs/model/data + - name: taskrun-name + metadata: + labels: + pipelines.kubeflow.org/cache_enabled: "true" + annotations: + pipelines.kubeflow.org/component_spec_digest: '{"name": "Train model", + "outputs": [{"name": "model"}], "version": "Train model@sha256=cb1fbd399ee5849dcdfaafced23a0496cae1d5861795062b22512b766ec418ce"}' workspaces: - name: train-model - workspace: iris-pipeline - runAfter: - - data-prep - - data-prep - - name: evaluate-model + workspaces: + - name: train-model + workspace: iris-pipeline + runAfter: + - data-prep + - data-prep + - name: evaluate-model + params: + - name: data-prep-trname + value: $(tasks.data-prep.results.taskrun-name) + - name: train-model-trname + value: $(tasks.train-model.results.taskrun-name) + taskSpec: + steps: + - name: main + args: + - --X-test + - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_test + - --y-test + - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_test + - --model + - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model + - --mlpipeline-metrics + - /tmp/outputs/mlpipeline_metrics/data + command: + - sh + - -c + - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location + 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m + pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' + --user) && "$0" "$@" + - sh + - -ec + - | + program_path=$(mktemp) + printf "%s" "$0" > "$program_path" + python3 -u "$program_path" "$@" + - | + def _make_parent_dirs_and_return_path(file_path: str): + import os + os.makedirs(os.path.dirname(file_path), exist_ok=True) + return file_path + + def evaluate_model( + X_test_file, + y_test_file, + model_file, + mlpipeline_metrics_file, + ): + import json + import pickle + + from sklearn.metrics import accuracy_score + + def load_pickle(object_file): + with open(object_file, "rb") as f: + target_object = pickle.load(f) + + return target_object + + X_test = load_pickle(X_test_file) + y_test = load_pickle(y_test_file) + model = load_pickle(model_file) + + y_pred = model.predict(X_test) + + accuracy_score_metric = accuracy_score(y_test, y_pred) + print(f"Accuracy: {accuracy_score_metric}") + + metrics = { + "metrics": [ + { + "name": "accuracy-score", + "numberValue": accuracy_score_metric, + "format": "PERCENTAGE", + }, + ] + } + + with open(mlpipeline_metrics_file, "w") as f: + json.dump(metrics, f) + + import argparse + _parser = argparse.ArgumentParser(prog='Evaluate model', description='') + _parser.add_argument("--X-test", dest="X_test_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--y-test", dest="y_test_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--mlpipeline-metrics", dest="mlpipeline_metrics_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parsed_args = vars(_parser.parse_args()) + + _outputs = evaluate_model(**_parsed_args) + image: registry.access.redhat.com/ubi8/python-38 + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] params: - name: data-prep-trname - value: $(tasks.data-prep.results.taskrun-name) - name: train-model-trname - value: $(tasks.train-model.results.taskrun-name) - taskSpec: - steps: - - name: main - args: - - --X-test - - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_test - - --y-test - - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_test - - --model - - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model - - --mlpipeline-metrics - - /tmp/outputs/mlpipeline_metrics/data - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def _make_parent_dirs_and_return_path(file_path: str): - import os - os.makedirs(os.path.dirname(file_path), exist_ok=True) - return file_path - - def evaluate_model( - X_test_file, - y_test_file, - model_file, - mlpipeline_metrics_file, - ): - import json - import pickle - - from sklearn.metrics import accuracy_score - - def load_pickle(object_file): - with open(object_file, "rb") as f: - target_object = pickle.load(f) - - return target_object - - X_test = load_pickle(X_test_file) - y_test = load_pickle(y_test_file) - model = load_pickle(model_file) - - y_pred = model.predict(X_test) - - accuracy_score_metric = accuracy_score(y_test, y_pred) - print(f"Accuracy: {accuracy_score_metric}") - - metrics = { - "metrics": [ - { - "name": "accuracy-score", - "numberValue": accuracy_score_metric, - "format": "PERCENTAGE", - }, - ] - } - - with open(mlpipeline_metrics_file, "w") as f: - json.dump(metrics, f) - - import argparse - _parser = argparse.ArgumentParser(prog='Evaluate model', description='') - _parser.add_argument("--X-test", dest="X_test_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-test", dest="y_test_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--mlpipeline-metrics", dest="mlpipeline_metrics_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = evaluate_model(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - params: - - name: data-prep-trname - - name: train-model-trname - stepTemplate: - volumeMounts: - - name: mlpipeline-metrics - mountPath: /tmp/outputs/mlpipeline_metrics - volumes: + stepTemplate: + volumeMounts: - name: mlpipeline-metrics - emptyDir: {} - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Evaluate model", - "outputs": [{"name": "mlpipeline_metrics", "type": "Metrics"}], "version": - "Evaluate model@sha256=f398e65faecc6f5a4ba11a2c78d8a2274e3ede205a0e199c8bb615531a3abd4a"}' - workspaces: - - name: evaluate-model + mountPath: /tmp/outputs/mlpipeline_metrics + volumes: + - name: mlpipeline-metrics + emptyDir: {} + metadata: + labels: + pipelines.kubeflow.org/cache_enabled: "true" + annotations: + pipelines.kubeflow.org/component_spec_digest: '{"name": "Evaluate model", + "outputs": [{"name": "mlpipeline_metrics", "type": "Metrics"}], "version": + "Evaluate model@sha256=f398e65faecc6f5a4ba11a2c78d8a2274e3ede205a0e199c8bb615531a3abd4a"}' workspaces: - name: evaluate-model - workspace: iris-pipeline - runAfter: - - data-prep - - data-prep - - train-model - - name: validate-model + workspaces: + - name: evaluate-model + workspace: iris-pipeline + runAfter: + - data-prep + - data-prep + - train-model + - name: validate-model + params: + - name: train-model-trname + value: $(tasks.train-model.results.taskrun-name) + taskSpec: + steps: + - name: main + args: + - --model + - $(workspaces.validate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model + command: + - sh + - -c + - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location + 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m + pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' + --user) && "$0" "$@" + - sh + - -ec + - | + program_path=$(mktemp) + printf "%s" "$0" > "$program_path" + python3 -u "$program_path" "$@" + - | + def validate_model(model_file): + import pickle + + def load_pickle(object_file): + with open(object_file, "rb") as f: + target_object = pickle.load(f) + + return target_object + + model = load_pickle(model_file) + + input_values = [[5, 3, 1.6, 0.2]] + + print(f"Performing test prediction on {input_values}") + result = model.predict(input_values) + + print(f"Response: {result}") + + import argparse + _parser = argparse.ArgumentParser(prog='Validate model', description='') + _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) + _parsed_args = vars(_parser.parse_args()) + + _outputs = validate_model(**_parsed_args) + image: registry.access.redhat.com/ubi8/python-38 + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] params: - name: train-model-trname - value: $(tasks.train-model.results.taskrun-name) - taskSpec: - steps: - - name: main - args: - - --model - - $(workspaces.validate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def validate_model(model_file): - import pickle - - def load_pickle(object_file): - with open(object_file, "rb") as f: - target_object = pickle.load(f) - - return target_object - - model = load_pickle(model_file) - - input_values = [[5, 3, 1.6, 0.2]] - - print(f"Performing test prediction on {input_values}") - result = model.predict(input_values) - - print(f"Response: {result}") - - import argparse - _parser = argparse.ArgumentParser(prog='Validate model', description='') - _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = validate_model(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - params: - - name: train-model-trname - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Validate model", - "outputs": [], "version": "Validate model@sha256=53d18ff94fc8f164e7d8455f2c87fa7fdac17e7502502aaa52012e4247d089ee"}' - workspaces: - - name: validate-model + metadata: + labels: + pipelines.kubeflow.org/cache_enabled: "true" + annotations: + pipelines.kubeflow.org/component_spec_digest: '{"name": "Validate model", + "outputs": [], "version": "Validate model@sha256=53d18ff94fc8f164e7d8455f2c87fa7fdac17e7502502aaa52012e4247d089ee"}' workspaces: - name: validate-model - workspace: iris-pipeline - runAfter: - - train-model workspaces: - - name: iris-pipeline + - name: validate-model + workspace: iris-pipeline + runAfter: + - train-model workspaces: - name: iris-pipeline - volumeClaimTemplate: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi + workspaces: + - name: iris-pipeline + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi +{{ end }} diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 75445ca5a..e816b131e 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -39,6 +39,7 @@ type DSPAParams struct { Name string Namespace string Owner mf.Owner + DSPVersion string APIServer *dspa.APIServer APIServerServiceName string OAuthProxy string @@ -432,6 +433,7 @@ func setResourcesDefault(defaultValue dspa.ResourceRequirements, value **dspa.Re func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePipelinesApplication, client client.Client, log logr.Logger) error { p.Name = dsp.Name p.Namespace = dsp.Namespace + p.DSPVersion = dsp.Spec.DSPVersion p.Owner = dsp p.APIServer = dsp.Spec.APIServer.DeepCopy() p.APIServerServiceName = fmt.Sprintf("%s-%s", config.DSPServicePrefix, p.Name) diff --git a/controllers/testdata/declarative/case_2/expected/created/sample-pipeline.yaml.tmpl b/controllers/testdata/declarative/case_2/expected/created/sample-pipeline.yaml.tmpl index 9b1ac3cd8..9fd4da3b0 100644 --- a/controllers/testdata/declarative/case_2/expected/created/sample-pipeline.yaml.tmpl +++ b/controllers/testdata/declarative/case_2/expected/created/sample-pipeline.yaml.tmpl @@ -8,547 +8,547 @@ metadata: component: data-science-pipelines data: iris-pipeline-compiled.yaml: |- - apiVersion: tekton.dev/v1beta1 - kind: PipelineRun - metadata: - name: iris-pipeline - annotations: - tekton.dev/output_artifacts: '{"data-prep": [{"key": "artifacts/$PIPELINERUN/data-prep/X_test.tgz", - "name": "data-prep-X_test", "path": "/tmp/outputs/X_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/X_train.tgz", - "name": "data-prep-X_train", "path": "/tmp/outputs/X_train/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_test.tgz", - "name": "data-prep-y_test", "path": "/tmp/outputs/y_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_train.tgz", - "name": "data-prep-y_train", "path": "/tmp/outputs/y_train/data"}], "evaluate-model": - [{"key": "artifacts/$PIPELINERUN/evaluate-model/mlpipeline-metrics.tgz", "name": - "mlpipeline-metrics", "path": "/tmp/outputs/mlpipeline_metrics/data"}], "train-model": - [{"key": "artifacts/$PIPELINERUN/train-model/model.tgz", "name": "train-model-model", - "path": "/tmp/outputs/model/data"}]}' - tekton.dev/input_artifacts: '{"evaluate-model": [{"name": "data-prep-X_test", - "parent_task": "data-prep"}, {"name": "data-prep-y_test", "parent_task": "data-prep"}, - {"name": "train-model-model", "parent_task": "train-model"}], "train-model": - [{"name": "data-prep-X_train", "parent_task": "data-prep"}, {"name": "data-prep-y_train", - "parent_task": "data-prep"}], "validate-model": [{"name": "train-model-model", - "parent_task": "train-model"}]}' - tekton.dev/artifact_bucket: mlpipeline - tekton.dev/artifact_endpoint: minio-service.kubeflow:9000 - tekton.dev/artifact_endpoint_scheme: http:// - tekton.dev/artifact_items: '{"data-prep": [["X_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test"], - ["X_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train"], - ["y_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test"], - ["y_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train"]], - "evaluate-model": [["mlpipeline-metrics", "/tmp/outputs/mlpipeline_metrics/data"]], - "train-model": [["model", "$(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model"]], - "validate-model": []}' - sidecar.istio.io/inject: "false" - tekton.dev/template: '' - pipelines.kubeflow.org/big_data_passing_format: $(workspaces.$TASK_NAME.path)/artifacts/$ORIG_PR_NAME/$TASKRUN_NAME/$TASK_PARAM_NAME - pipelines.kubeflow.org/pipeline_spec: '{"inputs": [{"default": "iris-model", "name": - "model_obc", "optional": true, "type": "String"}], "name": "Iris Pipeline"}' - labels: - pipelines.kubeflow.org/pipelinename: '' - pipelines.kubeflow.org/generation: '' - spec: + apiVersion: tekton.dev/v1beta1 + kind: PipelineRun + metadata: + name: iris-pipeline + annotations: + tekton.dev/output_artifacts: '{"data-prep": [{"key": "artifacts/$PIPELINERUN/data-prep/X_test.tgz", + "name": "data-prep-X_test", "path": "/tmp/outputs/X_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/X_train.tgz", + "name": "data-prep-X_train", "path": "/tmp/outputs/X_train/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_test.tgz", + "name": "data-prep-y_test", "path": "/tmp/outputs/y_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_train.tgz", + "name": "data-prep-y_train", "path": "/tmp/outputs/y_train/data"}], "evaluate-model": + [{"key": "artifacts/$PIPELINERUN/evaluate-model/mlpipeline-metrics.tgz", "name": + "mlpipeline-metrics", "path": "/tmp/outputs/mlpipeline_metrics/data"}], "train-model": + [{"key": "artifacts/$PIPELINERUN/train-model/model.tgz", "name": "train-model-model", + "path": "/tmp/outputs/model/data"}]}' + tekton.dev/input_artifacts: '{"evaluate-model": [{"name": "data-prep-X_test", + "parent_task": "data-prep"}, {"name": "data-prep-y_test", "parent_task": "data-prep"}, + {"name": "train-model-model", "parent_task": "train-model"}], "train-model": + [{"name": "data-prep-X_train", "parent_task": "data-prep"}, {"name": "data-prep-y_train", + "parent_task": "data-prep"}], "validate-model": [{"name": "train-model-model", + "parent_task": "train-model"}]}' + tekton.dev/artifact_bucket: mlpipeline + tekton.dev/artifact_endpoint: minio-service.kubeflow:9000 + tekton.dev/artifact_endpoint_scheme: http:// + tekton.dev/artifact_items: '{"data-prep": [["X_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test"], + ["X_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train"], + ["y_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test"], + ["y_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train"]], + "evaluate-model": [["mlpipeline-metrics", "/tmp/outputs/mlpipeline_metrics/data"]], + "train-model": [["model", "$(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model"]], + "validate-model": []}' + sidecar.istio.io/inject: "false" + tekton.dev/template: '' + pipelines.kubeflow.org/big_data_passing_format: $(workspaces.$TASK_NAME.path)/artifacts/$ORIG_PR_NAME/$TASKRUN_NAME/$TASK_PARAM_NAME + pipelines.kubeflow.org/pipeline_spec: '{"inputs": [{"default": "iris-model", "name": + "model_obc", "optional": true, "type": "String"}], "name": "Iris Pipeline"}' + labels: + pipelines.kubeflow.org/pipelinename: '' + pipelines.kubeflow.org/generation: '' + spec: + params: + - name: model_obc + value: iris-model + pipelineSpec: params: - name: model_obc - value: iris-model - pipelineSpec: - params: - - name: model_obc - default: iris-model - tasks: - - name: data-prep - taskSpec: - steps: - - name: main - args: - - --X-train - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train - - --X-test - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test - - --y-train - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train - - --y-test - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def _make_parent_dirs_and_return_path(file_path: str): - import os - os.makedirs(os.path.dirname(file_path), exist_ok=True) - return file_path - - def data_prep( - X_train_file, - X_test_file, - y_train_file, - y_test_file, - ): - import pickle - - import pandas as pd - - from sklearn import datasets - from sklearn.model_selection import train_test_split - - def get_iris_data(): - iris = datasets.load_iris() - data = pd.DataFrame( - { - "sepalLength": iris.data[:, 0], - "sepalWidth": iris.data[:, 1], - "petalLength": iris.data[:, 2], - "petalWidth": iris.data[:, 3], - "species": iris.target, - } - ) - - print("Initial Dataset:") - print(data.head()) - - return data - - def create_training_set(dataset, test_size = 0.3): - # Features - X = dataset[["sepalLength", "sepalWidth", "petalLength", "petalWidth"]] - # Labels - y = dataset["species"] - - # Split dataset into training set and test set - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=test_size, random_state=11 - ) - - return X_train, X_test, y_train, y_test - - def save_pickle(object_file, target_object): - with open(object_file, "wb") as f: - pickle.dump(target_object, f) - - dataset = get_iris_data() - X_train, X_test, y_train, y_test = create_training_set(dataset) - - save_pickle(X_train_file, X_train) - save_pickle(X_test_file, X_test) - save_pickle(y_train_file, y_train) - save_pickle(y_test_file, y_test) - - import argparse - _parser = argparse.ArgumentParser(prog='Data prep', description='') - _parser.add_argument("--X-train", dest="X_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--X-test", dest="X_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-train", dest="y_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-test", dest="y_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = data_prep(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: output-taskrun-name - command: - - sh - - -ec - - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: copy-results-artifacts - command: - - sh - - -ec - - | - set -exo pipefail - TOTAL_SIZE=0 - copy_artifact() { + default: iris-model + tasks: + - name: data-prep + taskSpec: + steps: + - name: main + args: + - --X-train + - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train + - --X-test + - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test + - --y-train + - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train + - --y-test + - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test + command: + - sh + - -c + - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location + 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m + pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' + --user) && "$0" "$@" + - sh + - -ec + - | + program_path=$(mktemp) + printf "%s" "$0" > "$program_path" + python3 -u "$program_path" "$@" + - | + def _make_parent_dirs_and_return_path(file_path: str): + import os + os.makedirs(os.path.dirname(file_path), exist_ok=True) + return file_path + + def data_prep( + X_train_file, + X_test_file, + y_train_file, + y_test_file, + ): + import pickle + + import pandas as pd + + from sklearn import datasets + from sklearn.model_selection import train_test_split + + def get_iris_data(): + iris = datasets.load_iris() + data = pd.DataFrame( + { + "sepalLength": iris.data[:, 0], + "sepalWidth": iris.data[:, 1], + "petalLength": iris.data[:, 2], + "petalWidth": iris.data[:, 3], + "species": iris.target, + } + ) + + print("Initial Dataset:") + print(data.head()) + + return data + + def create_training_set(dataset, test_size = 0.3): + # Features + X = dataset[["sepalLength", "sepalWidth", "petalLength", "petalWidth"]] + # Labels + y = dataset["species"] + + # Split dataset into training set and test set + X_train, X_test, y_train, y_test = train_test_split( + X, y, test_size=test_size, random_state=11 + ) + + return X_train, X_test, y_train, y_test + + def save_pickle(object_file, target_object): + with open(object_file, "wb") as f: + pickle.dump(target_object, f) + + dataset = get_iris_data() + X_train, X_test, y_train, y_test = create_training_set(dataset) + + save_pickle(X_train_file, X_train) + save_pickle(X_test_file, X_test) + save_pickle(y_train_file, y_train) + save_pickle(y_test_file, y_test) + + import argparse + _parser = argparse.ArgumentParser(prog='Data prep', description='') + _parser.add_argument("--X-train", dest="X_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--X-test", dest="X_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--y-train", dest="y_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--y-test", dest="y_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parsed_args = vars(_parser.parse_args()) + + _outputs = data_prep(**_parsed_args) + image: registry.access.redhat.com/ubi8/python-38 + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] + - image: registry.access.redhat.com/ubi8/ubi-minimal + name: output-taskrun-name + command: + - sh + - -ec + - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" + - image: registry.access.redhat.com/ubi8/ubi-minimal + name: copy-results-artifacts + command: + - sh + - -ec + - | + set -exo pipefail + TOTAL_SIZE=0 + copy_artifact() { + if [ -d "$1" ]; then + tar -czvf "$1".tar.gz "$1" + SUFFIX=".tar.gz" + fi + ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` + TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) + touch "$2" + if [[ $TOTAL_SIZE -lt 3072 ]]; then if [ -d "$1" ]; then - tar -czvf "$1".tar.gz "$1" - SUFFIX=".tar.gz" - fi - ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` - TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) - touch "$2" - if [[ $TOTAL_SIZE -lt 3072 ]]; then - if [ -d "$1" ]; then - tar -tzf "$1".tar.gz > "$2" - elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then - cp "$1" "$2" - fi + tar -tzf "$1".tar.gz > "$2" + elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then + cp "$1" "$2" fi - } - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train $(results.X-train.path) - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test $(results.X-test.path) - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train $(results.y-train.path) - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test $(results.y-test.path) - onError: continue - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - results: - - name: X-test - description: /tmp/outputs/X_test/data - - name: X-train - description: /tmp/outputs/X_train/data - - name: taskrun-name - - name: y-test - description: /tmp/outputs/y_test/data - - name: y-train - description: /tmp/outputs/y_train/data - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Data prep", "outputs": - [{"name": "X_train"}, {"name": "X_test"}, {"name": "y_train"}, {"name": - "y_test"}], "version": "Data prep@sha256=5aeb512900f57983c9f643ec30ddb4ccc66490a443269b51ce0a67d57cb373b0"}' - workspaces: - - name: data-prep + fi + } + copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train $(results.X-train.path) + copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test $(results.X-test.path) + copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train $(results.y-train.path) + copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test $(results.y-test.path) + onError: continue + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] + results: + - name: X-test + description: /tmp/outputs/X_test/data + - name: X-train + description: /tmp/outputs/X_train/data + - name: taskrun-name + - name: y-test + description: /tmp/outputs/y_test/data + - name: y-train + description: /tmp/outputs/y_train/data + metadata: + labels: + pipelines.kubeflow.org/cache_enabled: "true" + annotations: + pipelines.kubeflow.org/component_spec_digest: '{"name": "Data prep", "outputs": + [{"name": "X_train"}, {"name": "X_test"}, {"name": "y_train"}, {"name": + "y_test"}], "version": "Data prep@sha256=5aeb512900f57983c9f643ec30ddb4ccc66490a443269b51ce0a67d57cb373b0"}' workspaces: - name: data-prep - workspace: iris-pipeline - - name: train-model - params: - - name: data-prep-trname - value: $(tasks.data-prep.results.taskrun-name) - taskSpec: - steps: - - name: main - args: - - --X-train - - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_train - - --y-train - - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_train - - --model - - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def _make_parent_dirs_and_return_path(file_path: str): - import os - os.makedirs(os.path.dirname(file_path), exist_ok=True) - return file_path - - def train_model( - X_train_file, - y_train_file, - model_file, - ): - import pickle - - from sklearn.ensemble import RandomForestClassifier - - def load_pickle(object_file): - with open(object_file, "rb") as f: - target_object = pickle.load(f) - - return target_object - - def save_pickle(object_file, target_object): - with open(object_file, "wb") as f: - pickle.dump(target_object, f) - - def train_iris(X_train, y_train): - model = RandomForestClassifier(n_estimators=100) - model.fit(X_train, y_train) - - return model - - X_train = load_pickle(X_train_file) - y_train = load_pickle(y_train_file) - - model = train_iris(X_train, y_train) - - save_pickle(model_file, model) - - import argparse - _parser = argparse.ArgumentParser(prog='Train model', description='') - _parser.add_argument("--X-train", dest="X_train_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-train", dest="y_train_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--model", dest="model_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = train_model(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: output-taskrun-name - command: - - sh - - -ec - - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: copy-results-artifacts - command: - - sh - - -ec - - | - set -exo pipefail - TOTAL_SIZE=0 - copy_artifact() { + workspaces: + - name: data-prep + workspace: iris-pipeline + - name: train-model + params: + - name: data-prep-trname + value: $(tasks.data-prep.results.taskrun-name) + taskSpec: + steps: + - name: main + args: + - --X-train + - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_train + - --y-train + - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_train + - --model + - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model + command: + - sh + - -c + - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location + 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m + pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' + --user) && "$0" "$@" + - sh + - -ec + - | + program_path=$(mktemp) + printf "%s" "$0" > "$program_path" + python3 -u "$program_path" "$@" + - | + def _make_parent_dirs_and_return_path(file_path: str): + import os + os.makedirs(os.path.dirname(file_path), exist_ok=True) + return file_path + + def train_model( + X_train_file, + y_train_file, + model_file, + ): + import pickle + + from sklearn.ensemble import RandomForestClassifier + + def load_pickle(object_file): + with open(object_file, "rb") as f: + target_object = pickle.load(f) + + return target_object + + def save_pickle(object_file, target_object): + with open(object_file, "wb") as f: + pickle.dump(target_object, f) + + def train_iris(X_train, y_train): + model = RandomForestClassifier(n_estimators=100) + model.fit(X_train, y_train) + + return model + + X_train = load_pickle(X_train_file) + y_train = load_pickle(y_train_file) + + model = train_iris(X_train, y_train) + + save_pickle(model_file, model) + + import argparse + _parser = argparse.ArgumentParser(prog='Train model', description='') + _parser.add_argument("--X-train", dest="X_train_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--y-train", dest="y_train_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--model", dest="model_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parsed_args = vars(_parser.parse_args()) + + _outputs = train_model(**_parsed_args) + image: registry.access.redhat.com/ubi8/python-38 + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] + - image: registry.access.redhat.com/ubi8/ubi-minimal + name: output-taskrun-name + command: + - sh + - -ec + - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" + - image: registry.access.redhat.com/ubi8/ubi-minimal + name: copy-results-artifacts + command: + - sh + - -ec + - | + set -exo pipefail + TOTAL_SIZE=0 + copy_artifact() { + if [ -d "$1" ]; then + tar -czvf "$1".tar.gz "$1" + SUFFIX=".tar.gz" + fi + ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` + TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) + touch "$2" + if [[ $TOTAL_SIZE -lt 3072 ]]; then if [ -d "$1" ]; then - tar -czvf "$1".tar.gz "$1" - SUFFIX=".tar.gz" + tar -tzf "$1".tar.gz > "$2" + elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then + cp "$1" "$2" fi - ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` - TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) - touch "$2" - if [[ $TOTAL_SIZE -lt 3072 ]]; then - if [ -d "$1" ]; then - tar -tzf "$1".tar.gz > "$2" - elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then - cp "$1" "$2" - fi - fi - } - copy_artifact $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model $(results.model.path) - onError: continue - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - params: - - name: data-prep-trname - results: - - name: model - description: /tmp/outputs/model/data - - name: taskrun-name - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Train model", - "outputs": [{"name": "model"}], "version": "Train model@sha256=cb1fbd399ee5849dcdfaafced23a0496cae1d5861795062b22512b766ec418ce"}' - workspaces: - - name: train-model + fi + } + copy_artifact $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model $(results.model.path) + onError: continue + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] + params: + - name: data-prep-trname + results: + - name: model + description: /tmp/outputs/model/data + - name: taskrun-name + metadata: + labels: + pipelines.kubeflow.org/cache_enabled: "true" + annotations: + pipelines.kubeflow.org/component_spec_digest: '{"name": "Train model", + "outputs": [{"name": "model"}], "version": "Train model@sha256=cb1fbd399ee5849dcdfaafced23a0496cae1d5861795062b22512b766ec418ce"}' workspaces: - name: train-model - workspace: iris-pipeline - runAfter: - - data-prep - - data-prep - - name: evaluate-model + workspaces: + - name: train-model + workspace: iris-pipeline + runAfter: + - data-prep + - data-prep + - name: evaluate-model + params: + - name: data-prep-trname + value: $(tasks.data-prep.results.taskrun-name) + - name: train-model-trname + value: $(tasks.train-model.results.taskrun-name) + taskSpec: + steps: + - name: main + args: + - --X-test + - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_test + - --y-test + - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_test + - --model + - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model + - --mlpipeline-metrics + - /tmp/outputs/mlpipeline_metrics/data + command: + - sh + - -c + - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location + 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m + pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' + --user) && "$0" "$@" + - sh + - -ec + - | + program_path=$(mktemp) + printf "%s" "$0" > "$program_path" + python3 -u "$program_path" "$@" + - | + def _make_parent_dirs_and_return_path(file_path: str): + import os + os.makedirs(os.path.dirname(file_path), exist_ok=True) + return file_path + + def evaluate_model( + X_test_file, + y_test_file, + model_file, + mlpipeline_metrics_file, + ): + import json + import pickle + + from sklearn.metrics import accuracy_score + + def load_pickle(object_file): + with open(object_file, "rb") as f: + target_object = pickle.load(f) + + return target_object + + X_test = load_pickle(X_test_file) + y_test = load_pickle(y_test_file) + model = load_pickle(model_file) + + y_pred = model.predict(X_test) + + accuracy_score_metric = accuracy_score(y_test, y_pred) + print(f"Accuracy: {accuracy_score_metric}") + + metrics = { + "metrics": [ + { + "name": "accuracy-score", + "numberValue": accuracy_score_metric, + "format": "PERCENTAGE", + }, + ] + } + + with open(mlpipeline_metrics_file, "w") as f: + json.dump(metrics, f) + + import argparse + _parser = argparse.ArgumentParser(prog='Evaluate model', description='') + _parser.add_argument("--X-test", dest="X_test_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--y-test", dest="y_test_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) + _parser.add_argument("--mlpipeline-metrics", dest="mlpipeline_metrics_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) + _parsed_args = vars(_parser.parse_args()) + + _outputs = evaluate_model(**_parsed_args) + image: registry.access.redhat.com/ubi8/python-38 + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] params: - name: data-prep-trname - value: $(tasks.data-prep.results.taskrun-name) - name: train-model-trname - value: $(tasks.train-model.results.taskrun-name) - taskSpec: - steps: - - name: main - args: - - --X-test - - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_test - - --y-test - - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_test - - --model - - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model - - --mlpipeline-metrics - - /tmp/outputs/mlpipeline_metrics/data - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def _make_parent_dirs_and_return_path(file_path: str): - import os - os.makedirs(os.path.dirname(file_path), exist_ok=True) - return file_path - - def evaluate_model( - X_test_file, - y_test_file, - model_file, - mlpipeline_metrics_file, - ): - import json - import pickle - - from sklearn.metrics import accuracy_score - - def load_pickle(object_file): - with open(object_file, "rb") as f: - target_object = pickle.load(f) - - return target_object - - X_test = load_pickle(X_test_file) - y_test = load_pickle(y_test_file) - model = load_pickle(model_file) - - y_pred = model.predict(X_test) - - accuracy_score_metric = accuracy_score(y_test, y_pred) - print(f"Accuracy: {accuracy_score_metric}") - - metrics = { - "metrics": [ - { - "name": "accuracy-score", - "numberValue": accuracy_score_metric, - "format": "PERCENTAGE", - }, - ] - } - - with open(mlpipeline_metrics_file, "w") as f: - json.dump(metrics, f) - - import argparse - _parser = argparse.ArgumentParser(prog='Evaluate model', description='') - _parser.add_argument("--X-test", dest="X_test_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-test", dest="y_test_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--mlpipeline-metrics", dest="mlpipeline_metrics_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = evaluate_model(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - params: - - name: data-prep-trname - - name: train-model-trname - stepTemplate: - volumeMounts: - - name: mlpipeline-metrics - mountPath: /tmp/outputs/mlpipeline_metrics - volumes: + stepTemplate: + volumeMounts: - name: mlpipeline-metrics - emptyDir: {} - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Evaluate model", - "outputs": [{"name": "mlpipeline_metrics", "type": "Metrics"}], "version": - "Evaluate model@sha256=f398e65faecc6f5a4ba11a2c78d8a2274e3ede205a0e199c8bb615531a3abd4a"}' - workspaces: - - name: evaluate-model + mountPath: /tmp/outputs/mlpipeline_metrics + volumes: + - name: mlpipeline-metrics + emptyDir: {} + metadata: + labels: + pipelines.kubeflow.org/cache_enabled: "true" + annotations: + pipelines.kubeflow.org/component_spec_digest: '{"name": "Evaluate model", + "outputs": [{"name": "mlpipeline_metrics", "type": "Metrics"}], "version": + "Evaluate model@sha256=f398e65faecc6f5a4ba11a2c78d8a2274e3ede205a0e199c8bb615531a3abd4a"}' workspaces: - name: evaluate-model - workspace: iris-pipeline - runAfter: - - data-prep - - data-prep - - train-model - - name: validate-model + workspaces: + - name: evaluate-model + workspace: iris-pipeline + runAfter: + - data-prep + - data-prep + - train-model + - name: validate-model + params: + - name: train-model-trname + value: $(tasks.train-model.results.taskrun-name) + taskSpec: + steps: + - name: main + args: + - --model + - $(workspaces.validate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model + command: + - sh + - -c + - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location + 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m + pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' + --user) && "$0" "$@" + - sh + - -ec + - | + program_path=$(mktemp) + printf "%s" "$0" > "$program_path" + python3 -u "$program_path" "$@" + - | + def validate_model(model_file): + import pickle + + def load_pickle(object_file): + with open(object_file, "rb") as f: + target_object = pickle.load(f) + + return target_object + + model = load_pickle(model_file) + + input_values = [[5, 3, 1.6, 0.2]] + + print(f"Performing test prediction on {input_values}") + result = model.predict(input_values) + + print(f"Response: {result}") + + import argparse + _parser = argparse.ArgumentParser(prog='Validate model', description='') + _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) + _parsed_args = vars(_parser.parse_args()) + + _outputs = validate_model(**_parsed_args) + image: registry.access.redhat.com/ubi8/python-38 + env: + - name: ORIG_PR_NAME + valueFrom: + fieldRef: + fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] params: - name: train-model-trname - value: $(tasks.train-model.results.taskrun-name) - taskSpec: - steps: - - name: main - args: - - --model - - $(workspaces.validate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def validate_model(model_file): - import pickle - - def load_pickle(object_file): - with open(object_file, "rb") as f: - target_object = pickle.load(f) - - return target_object - - model = load_pickle(model_file) - - input_values = [[5, 3, 1.6, 0.2]] - - print(f"Performing test prediction on {input_values}") - result = model.predict(input_values) - - print(f"Response: {result}") - - import argparse - _parser = argparse.ArgumentParser(prog='Validate model', description='') - _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = validate_model(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - params: - - name: train-model-trname - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Validate model", - "outputs": [], "version": "Validate model@sha256=53d18ff94fc8f164e7d8455f2c87fa7fdac17e7502502aaa52012e4247d089ee"}' - workspaces: - - name: validate-model + metadata: + labels: + pipelines.kubeflow.org/cache_enabled: "true" + annotations: + pipelines.kubeflow.org/component_spec_digest: '{"name": "Validate model", + "outputs": [], "version": "Validate model@sha256=53d18ff94fc8f164e7d8455f2c87fa7fdac17e7502502aaa52012e4247d089ee"}' workspaces: - name: validate-model - workspace: iris-pipeline - runAfter: - - train-model workspaces: - - name: iris-pipeline + - name: validate-model + workspace: iris-pipeline + runAfter: + - train-model workspaces: - name: iris-pipeline - volumeClaimTemplate: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi + workspaces: + - name: iris-pipeline + volumeClaimTemplate: + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 2Gi diff --git a/controllers/testdata/declarative/case_6/config.yaml b/controllers/testdata/declarative/case_6/config.yaml new file mode 100644 index 000000000..a03247a37 --- /dev/null +++ b/controllers/testdata/declarative/case_6/config.yaml @@ -0,0 +1,10 @@ +# When a complete DSPA is deployed with (defaults specified) +Images: + ApiServer: api-server:test6 + Artifact: artifact-manager:test6 + PersistentAgent: persistenceagent:test6 + ScheduledWorkflow: scheduledworkflow:test6 + Cache: ubi-minimal:test6 + MoveResultsImage: busybox:test6 + MariaDB: mariadb:test6 + OAuthProxy: oauth-proxy:test6 diff --git a/controllers/testdata/declarative/case_6/deploy/cr.yaml b/controllers/testdata/declarative/case_6/deploy/cr.yaml new file mode 100644 index 000000000..a04a407a1 --- /dev/null +++ b/controllers/testdata/declarative/case_6/deploy/cr.yaml @@ -0,0 +1,90 @@ +apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 +kind: DataSciencePipelinesApplication +metadata: + name: testdsp6 +spec: + dspVersion: v2 + apiServer: + deploy: true + image: api-server:test6 + applyTektonCustomResource: true + archiveLogs: false + artifactImage: artifact-manager:test6 + cacheImage: ubi-minimal:test6 + moveResultsImage: busybox:test6 + injectDefaultScript: true + stripEOF: true + enableOauth: true + enableSamplePipeline: true + terminateStatus: Cancelled + trackArtifacts: true + dbConfigConMaxLifetimeSec: 125 + collectMetrics: true + autoUpdatePipelineDefaultVersion: true + resources: + requests: + cpu: "1231m" + memory: "1Gi" + limits: + cpu: "2522m" + memory: "5Gi" + persistenceAgent: + deploy: true + image: persistenceagent:test6 + numWorkers: 5 + resources: + requests: + cpu: "1233m" + memory: "1Gi" + limits: + cpu: "2524m" + memory: "5Gi" + scheduledWorkflow: + deploy: true + image: scheduledworkflow:test6 + cronScheduleTimezone: EST + resources: + requests: + cpu: "1235m" + memory: "1Gi" + limits: + cpu: "2526m" + memory: "5Gi" + mlpipelineUI: + deploy: true + image: frontend:test6 + configMap: some-test-configmap + resources: + requests: + cpu: "1239m" + memory: "1Gi" + limits: + cpu: "2530m" + memory: "5Gi" + database: + mariaDB: + deploy: true + image: mariadb:test6 + username: testuser + pipelineDBName: randomDBName + pvcSize: 32Gi + resources: + requests: + cpu: "1212m" + memory: "1Gi" + limits: + cpu: "2554m" + memory: "5Gi" + objectStorage: + minio: + deploy: true + image: minio:test6 + bucket: mlpipeline + pvcSize: 40Gi + resources: + requests: + cpu: "1334m" + memory: "1Gi" + limits: + cpu: "2535m" + memory: "5Gi" diff --git a/controllers/testdata/declarative/case_6/expected/apiserver_deployment.yaml b/controllers/testdata/declarative/case_6/expected/apiserver_deployment.yaml new file mode 100644 index 000000000..bdf2f827e --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/apiserver_deployment.yaml @@ -0,0 +1,214 @@ +apiVersion: apps/v1ds-pipeline-testdsp6 +kind: Deployment +metadata: + name: ds-pipeline-testdsp6 + namespace: default + labels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines + dspa: testdsp2 +spec: + selector: + matchLabels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines + dspa: testdsp2 + template: + metadata: + labels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines + dspa: testdsp2 + spec: + containers: + - env: + - name: POD_NAMESPACE + value: "default" + - name: DBCONFIG_USER + value: "testuser" + - name: DBCONFIG_PASSWORD + valueFrom: + secretKeyRef: + key: "password" + name: "ds-pipeline-db-testdsp2" + - name: DBCONFIG_DBNAME + value: "randomDBName" + - name: DBCONFIG_HOST + value: "mariadb-testdsp2.default.svc.cluster.local" + - name: DBCONFIG_PORT + value: "3306" + - name: ARTIFACT_BUCKET + value: "mlpipeline" + - name: ARTIFACT_ENDPOINT + value: "http://minio-testdsp2.default.svc.cluster.local:9000" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "artifact_script" + name: "ds-pipeline-artifact-script-testdsp2" + - name: ARTIFACT_IMAGE + value: "artifact-manager:test2" + - name: ARCHIVE_LOGS + value: "false" + - name: EXECUTIONTYPE + value: PipelineRun + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp2" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" + - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION + value: "true" + - name: DBCONFIG_CONMAXLIFETIMESEC + value: "125" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" + - name: OBJECTSTORECONFIG_BUCKETNAME + value: "mlpipeline" + - name: OBJECTSTORECONFIG_ACCESSKEY + valueFrom: + secretKeyRef: + key: "accesskey" + name: "mlpipeline-minio-artifact" + - name: OBJECTSTORECONFIG_SECRETACCESSKEY + valueFrom: + secretKeyRef: + key: "secretkey" + name: "mlpipeline-minio-artifact" + - name: OBJECTSTORECONFIG_SECURE + value: "false" + - name: MINIO_SERVICE_SERVICE_HOST + value: "minio-testdsp2.default.svc.cluster.local" + - name: MINIO_SERVICE_SERVICE_PORT + value: "9000" + - name: CACHE_IMAGE + value: "ubi-minimal:test2" + - name: MOVERESULTS_IMAGE + value: "busybox:test2" + - name: METADATA_GRPC_SERVICE_SERVICE_HOST + value: "ds-pipeline-metadata-grpc-testdsp2.default.svc.cluster.local" + - name: METADATA_GRPC_SERVICE_SERVICE_PORT + value: "8080" + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-testdsp6.default.svc.cluster.local + - name: ML_PIPELINE_SERVICE_PORT_GRPC + value: "8887" + image: api-server:test2 + imagePullPolicy: Always + name: ds-pipeline-api-server + ports: + - containerPort: 8888 + name: http + protocol: TCP + - containerPort: 8887 + name: grpc + protocol: TCP + livenessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:8888/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 1231m + memory: 1Gi + limits: + cpu: 2522m + memory: 5Gi + volumeMounts: + - mountPath: /config/sample_config.json + name: sample-config + subPath: sample_config.json + - mountPath: /samples/ + name: sample-pipeline + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-testdsp6 + - --upstream=http://localhost:8888 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp6","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp6","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test2 + ports: + - containerPort: 8443 + name: oauth + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: oauth + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + volumes: + - name: proxy-tls + secret: + secretName: ds-pipelines-proxy-tls-testdsp2 + defaultMode: 420 + - configMap: + defaultMode: 420 + name: sample-config-testdsp2 + name: sample-config + - configMap: + defaultMode: 420 + name: sample-pipeline-testdsp2 + name: sample-pipeline + serviceAccountName: ds-pipeline-testdsp6 diff --git a/controllers/testdata/declarative/case_6/expected/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_6/expected/configmap_artifact_script.yaml new file mode 100644 index 000000000..9294a70e1 --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/configmap_artifact_script.yaml @@ -0,0 +1,35 @@ +apiVersion: v1 +data: + artifact_script: |- + #!/usr/bin/env sh + push_artifact() { + workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") + workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) + artifact_name=$(basename $2) + if [ -f "$workspace_dest/$artifact_name" ]; then + echo sending to: ${workspace_dest}/${artifact_name} + tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} + aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + elif [ -f "$2" ]; then + tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} + aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + else + echo "$2 file does not exist. Skip artifact tracking for $1" + fi + } + push_log() { + cat /var/log/containers/$PODNAME*$NAMESPACE*step-main*.log > step-main.log + push_artifact main-log step-main.log + } + strip_eof() { + if [ -f "$2" ]; then + awk 'NF' $2 | head -c -1 > $1_temp_save && cp $1_temp_save $2 + fi + } +kind: ConfigMap +metadata: + name: ds-pipeline-artifact-script-testdsp2 + namespace: default + labels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines diff --git a/controllers/testdata/declarative/case_6/expected/mariadb_deployment.yaml b/controllers/testdata/declarative/case_6/expected/mariadb_deployment.yaml new file mode 100644 index 000000000..e1a326516 --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/mariadb_deployment.yaml @@ -0,0 +1,79 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: mariadb-testdsp6 + namespace: default + labels: + app: mariadb-testdsp6 + component: data-science-pipelines + dspa: testdsp6 +spec: + strategy: + type: Recreate # Need this since backing PVC is ReadWriteOnce, which creates resource lock condition in default Rolling strategy + selector: + matchLabels: + app: mariadb-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + template: + metadata: + labels: + app: mariadb-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + spec: + containers: + - name: mariadb + image: mariadb:test2 + ports: + - containerPort: 3306 + protocol: TCP + readinessProbe: + exec: + command: + - /bin/sh + - "-i" + - "-c" + - >- + MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D + $MYSQL_DATABASE -e 'SELECT 1' + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + tcpSocket: + port: 3306 + timeoutSeconds: 1 + env: + - name: MYSQL_USER + value: "testuser" + - name: MYSQL_PASSWORD + valueFrom: + secretKeyRef: + key: "password" + name: "ds-pipeline-db-testdsp6" + - name: MYSQL_DATABASE + value: "randomDBName" + - name: MYSQL_ALLOW_EMPTY_PASSWORD + value: "true" + resources: + requests: + cpu: 1212m + memory: 1Gi + limits: + cpu: 2554m + memory: 5Gi + volumeMounts: + - name: mariadb-persistent-storage + mountPath: /var/lib/mysql + volumes: + - name: mariadb-persistent-storage + persistentVolumeClaim: + claimName: mariadb-testdsp6 diff --git a/controllers/testdata/declarative/case_6/expected/minio_deployment.yaml b/controllers/testdata/declarative/case_6/expected/minio_deployment.yaml new file mode 100644 index 000000000..8f8b3b930 --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/minio_deployment.yaml @@ -0,0 +1,75 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: minio-testdsp6 + namespace: default + labels: + app: minio-testdsp6 + component: data-science-pipelines + dspa: testdsp6 +spec: + selector: + matchLabels: + app: minio-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + strategy: + type: Recreate + template: + metadata: + labels: + app: minio-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + spec: + containers: + - args: + - server + - /data + env: + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + key: "accesskey" + name: "mlpipeline-minio-artifact" + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + key: "secretkey" + name: "mlpipeline-minio-artifact" + image: minio:test2 + name: minio + ports: + - containerPort: 9000 + protocol: TCP + livenessProbe: + tcpSocket: + port: 9000 + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + tcpSocket: + port: 9000 + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + requests: + cpu: 1334m + memory: 1Gi + limits: + cpu: 2535m + memory: 5Gi + volumeMounts: + - mountPath: /data + name: data + subPath: minio + volumes: + - name: data + persistentVolumeClaim: + claimName: minio-testdsp6 diff --git a/controllers/testdata/declarative/case_6/expected/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_6/expected/mlpipelines-ui_deployment.yaml new file mode 100644 index 000000000..3faec65f6 --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/mlpipelines-ui_deployment.yaml @@ -0,0 +1,153 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-ui-testdsp6 + namespace: default + labels: + app: ds-pipeline-ui-testdsp6 + component: data-science-pipelines + dspa: testdsp6 +spec: + selector: + matchLabels: + app: ds-pipeline-ui-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-ui-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + spec: + containers: + - env: + - name: VIEWER_TENSORBOARD_POD_TEMPLATE_SPEC_PATH + value: /etc/config/viewer-pod-template.json + - name: MINIO_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + key: "accesskey" + name: "mlpipeline-minio-artifact" + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + key: "secretkey" + name: "mlpipeline-minio-artifact" + - name: ALLOW_CUSTOM_VISUALIZATIONS + value: "true" + - name: ARGO_ARCHIVE_LOGS + value: "true" + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-testdsp6 + - name: ML_PIPELINE_SERVICE_PORT + value: '8888' + - name: METADATA_ENVOY_SERVICE_SERVICE_HOST + value: ds-pipeline-metadata-envoy-testdsp6 + - name: METADATA_ENVOY_SERVICE_SERVICE_PORT + value: "9090" + image: frontend:test2 + imagePullPolicy: IfNotPresent + livenessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:3000/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + name: ds-pipeline-ui + ports: + - containerPort: 3000 + protocol: TCP + readinessProbe: + exec: + command: + - wget + - -q + - -S + - -O + - '-' + - http://localhost:3000/apis/v1beta1/healthz + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 1239m + memory: 1Gi + limits: + cpu: 2530m + memory: 5Gi + volumeMounts: + - mountPath: /etc/config + name: config-volume + readOnly: true + - name: oauth-proxy + args: + - --https-address=:8443 + - --provider=openshift + - --openshift-service-account=ds-pipeline-ui-testdsp6 + - --upstream=http://localhost:3000 + - --tls-cert=/etc/tls/private/tls.crt + - --tls-key=/etc/tls/private/tls.key + - --cookie-secret=SECRET + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-testdsp6","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-ui-testdsp6","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' + image: oauth-proxy:test2 + ports: + - containerPort: 8443 + name: https + protocol: TCP + livenessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 30 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /oauth/healthz + port: 8443 + scheme: HTTPS + initialDelaySeconds: 5 + timeoutSeconds: 1 + periodSeconds: 5 + successThreshold: 1 + failureThreshold: 3 + resources: + limits: + cpu: 100m + memory: 256Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - mountPath: /etc/tls/private + name: proxy-tls + serviceAccountName: ds-pipeline-ui-testdsp6 + volumes: + - configMap: + name: some-test-configmap + defaultMode: 420 + name: config-volume + - name: proxy-tls + secret: + secretName: ds-pipelines-ui-proxy-tls-testdsp6 + defaultMode: 420 diff --git a/controllers/testdata/declarative/case_6/expected/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_6/expected/persistence-agent_deployment.yaml new file mode 100644 index 000000000..afed69995 --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/persistence-agent_deployment.yaml @@ -0,0 +1,76 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-persistenceagent-testdsp6 + namespace: default + labels: + app: ds-pipeline-persistenceagent-testdsp6 + component: data-science-pipelines + dspa: testdsp6 +spec: + selector: + matchLabels: + app: ds-pipeline-persistenceagent-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-persistenceagent-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + spec: + containers: + - env: + - name: NAMESPACE + value: "default" + - name: TTL_SECONDS_AFTER_WORKFLOW_FINISH + value: "86400" + - name: NUM_WORKERS + value: "2" + - name: KUBEFLOW_USERID_HEADER + value: kubeflow-userid + - name: KUBEFLOW_USERID_PREFIX + value: "" + - name: EXECUTIONTYPE + value: PipelineRun + image: persistenceagent:test2 + imagePullPolicy: IfNotPresent + name: ds-pipeline-persistenceagent + command: + - persistence_agent + - "--logtostderr=true" + - "--ttlSecondsAfterWorkflowFinish=86400" + - "--numWorker=5" + - "--mlPipelineAPIServerName=ds-pipeline-testdsp6" + - "--namespace=testdsp6" + - "--mlPipelineServiceHttpPort=8888" + - "--mlPipelineServiceGRPCPort=8887" + livenessProbe: + exec: + command: + - test + - -x + - persistence_agent + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - test + - -x + - persistence_agent + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 1233m + memory: 1Gi + limits: + cpu: 2524m + memory: 5Gi + serviceAccountName: ds-pipeline-persistenceagent-testdsp6 diff --git a/controllers/testdata/declarative/case_6/expected/sample-config.yaml.tmpl b/controllers/testdata/declarative/case_6/expected/sample-config.yaml.tmpl new file mode 100644 index 000000000..f5ca8011c --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/sample-config.yaml.tmpl @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-config-testdsp6 + namespace: default + labels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines +data: + sample_config.json: |- + [ + { + "name": "[Demo] iris-training", + "description": "[source code](https://github.com/opendatahub-io/data-science-pipelines/tree/master/samples/iris-sklearn) A simple pipeline to demonstrate a basic ML Training workflow", + "file": "/samples/iris-pipeline-compiled.yaml" + } + ] diff --git a/controllers/testdata/declarative/case_6/expected/sample-pipeline.yaml.tmpl b/controllers/testdata/declarative/case_6/expected/sample-pipeline.yaml.tmpl new file mode 100644 index 000000000..92cb390fb --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/sample-pipeline.yaml.tmpl @@ -0,0 +1,252 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-pipeline-testdsp6 + namespace: default + labels: + app: ds-pipeline-testdsp6 + component: data-science-pipelines +data: + iris-pipeline-compiled.yaml: |- + # PIPELINE DEFINITION + # Name: iris-training-pipeline + # Inputs: + # min_max_scaler: bool + # neighbors: int + # standard_scaler: bool + # Outputs: + # train-model-metrics: system.ClassificationMetrics + components: + comp-create-dataset: + executorLabel: exec-create-dataset + outputDefinitions: + artifacts: + iris_dataset: + artifactType: + schemaTitle: system.Dataset + schemaVersion: 0.0.1 + comp-normalize-dataset: + executorLabel: exec-normalize-dataset + inputDefinitions: + artifacts: + input_iris_dataset: + artifactType: + schemaTitle: system.Dataset + schemaVersion: 0.0.1 + parameters: + min_max_scaler: + parameterType: BOOLEAN + standard_scaler: + parameterType: BOOLEAN + outputDefinitions: + artifacts: + normalized_iris_dataset: + artifactType: + schemaTitle: system.Dataset + schemaVersion: 0.0.1 + comp-train-model: + executorLabel: exec-train-model + inputDefinitions: + artifacts: + normalized_iris_dataset: + artifactType: + schemaTitle: system.Dataset + schemaVersion: 0.0.1 + parameters: + n_neighbors: + parameterType: NUMBER_INTEGER + outputDefinitions: + artifacts: + metrics: + artifactType: + schemaTitle: system.ClassificationMetrics + schemaVersion: 0.0.1 + model: + artifactType: + schemaTitle: system.Model + schemaVersion: 0.0.1 + deploymentSpec: + executors: + exec-create-dataset: + container: + args: + - --executor_input + - '{{$}}' + - --function_to_execute + - create_dataset + command: + - sh + - -c + - "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\ + \ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\ + \ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.1'\ + \ && \"$0\" \"$@\"\n" + - sh + - -ec + - 'program_path=$(mktemp -d) + + printf "%s" "$0" > "$program_path/ephemeral_component.py" + + python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@" + + ' + - "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\ + \ *\n\ndef create_dataset(iris_dataset: Output[Dataset]):\n import pandas\ + \ as pd\n\n csv_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'\n\ + \ col_names = [\n 'Sepal_Length', 'Sepal_Width', 'Petal_Length',\ + \ 'Petal_Width', 'Labels'\n ]\n df = pd.read_csv(csv_url, names=col_names)\n\ + \n with open(iris_dataset.path, 'w') as f:\n df.to_csv(f)\n\n" + image: quay.io/rmartine/data-science:test9 + exec-normalize-dataset: + container: + args: + - --executor_input + - '{{$}}' + - --function_to_execute + - normalize_dataset + command: + - sh + - -c + - "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\ + \ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\ + \ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.1'\ + \ && \"$0\" \"$@\"\n" + - sh + - -ec + - 'program_path=$(mktemp -d) + + printf "%s" "$0" > "$program_path/ephemeral_component.py" + + python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@" + + ' + - "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\ + \ *\n\ndef normalize_dataset(\n input_iris_dataset: Input[Dataset],\n\ + \ normalized_iris_dataset: Output[Dataset],\n standard_scaler: bool,\n\ + \ min_max_scaler: bool,\n):\n if standard_scaler is min_max_scaler:\n\ + \ raise ValueError(\n 'Exactly one of standard_scaler\ + \ or min_max_scaler must be True.')\n\n import pandas as pd\n from\ + \ sklearn.preprocessing import MinMaxScaler\n from sklearn.preprocessing\ + \ import StandardScaler\n\n with open(input_iris_dataset.path) as f:\n\ + \ df = pd.read_csv(f)\n labels = df.pop('Labels')\n\n if standard_scaler:\n\ + \ scaler = StandardScaler()\n if min_max_scaler:\n scaler\ + \ = MinMaxScaler()\n\n df = pd.DataFrame(scaler.fit_transform(df))\n\ + \ df['Labels'] = labels\n normalized_iris_dataset.metadata['state']\ + \ = \"Normalized\"\n with open(normalized_iris_dataset.path, 'w') as\ + \ f:\n df.to_csv(f)\n\n" + image: quay.io/rmartine/data-science:test9 + exec-train-model: + container: + args: + - --executor_input + - '{{$}}' + - --function_to_execute + - train_model + command: + - sh + - -c + - "\nif ! [ -x \"$(command -v pip)\" ]; then\n python3 -m ensurepip ||\ + \ python3 -m ensurepip --user || apt-get install python3-pip\nfi\n\nPIP_DISABLE_PIP_VERSION_CHECK=1\ + \ python3 -m pip install --quiet --no-warn-script-location 'kfp==2.0.1'\ + \ && \"$0\" \"$@\"\n" + - sh + - -ec + - 'program_path=$(mktemp -d) + + printf "%s" "$0" > "$program_path/ephemeral_component.py" + + python3 -m kfp.components.executor_main --component_module_path "$program_path/ephemeral_component.py" "$@" + + ' + - "\nimport kfp\nfrom kfp import dsl\nfrom kfp.dsl import *\nfrom typing import\ + \ *\n\ndef train_model(\n normalized_iris_dataset: Input[Dataset],\n\ + \ model: Output[Model],\n metrics: Output[ClassificationMetrics],\n\ + \ n_neighbors: int,\n):\n import pickle\n\n import pandas as pd\n\ + \ from sklearn.neighbors import KNeighborsClassifier\n\n from sklearn.metrics\ + \ import roc_curve\n from sklearn.model_selection import train_test_split,\ + \ cross_val_predict\n from sklearn.metrics import confusion_matrix\n\n\ + \n with open(normalized_iris_dataset.path) as f:\n df = pd.read_csv(f)\n\ + \n y = df.pop('Labels')\n X = df\n\n X_train, X_test, y_train,\ + \ y_test = train_test_split(X, y, random_state=0)\n\n clf = KNeighborsClassifier(n_neighbors=n_neighbors)\n\ + \ clf.fit(X_train, y_train)\n\n predictions = cross_val_predict(\n\ + \ clf, X_train, y_train, cv=3)\n metrics.log_confusion_matrix(\n\ + \ ['Iris-Setosa', 'Iris-Versicolour', 'Iris-Virginica'],\n \ + \ confusion_matrix(\n y_train,\n predictions).tolist()\ + \ # .tolist() to convert np array to list.\n )\n\n model.metadata['framework']\ + \ = 'scikit-learn'\n with open(model.path, 'wb') as f:\n pickle.dump(clf,\ + \ f)\n\n" + image: quay.io/rmartine/data-science:test9 + pipelineInfo: + name: iris-training-pipeline + root: + dag: + outputs: + artifacts: + train-model-metrics: + artifactSelectors: + - outputArtifactKey: metrics + producerSubtask: train-model + tasks: + create-dataset: + cachingOptions: + enableCache: true + componentRef: + name: comp-create-dataset + taskInfo: + name: create-dataset + normalize-dataset: + cachingOptions: + enableCache: true + componentRef: + name: comp-normalize-dataset + dependentTasks: + - create-dataset + inputs: + artifacts: + input_iris_dataset: + taskOutputArtifact: + outputArtifactKey: iris_dataset + producerTask: create-dataset + parameters: + min_max_scaler: + runtimeValue: + constant: false + standard_scaler: + runtimeValue: + constant: true + taskInfo: + name: normalize-dataset + train-model: + cachingOptions: + enableCache: true + componentRef: + name: comp-train-model + dependentTasks: + - normalize-dataset + inputs: + artifacts: + normalized_iris_dataset: + taskOutputArtifact: + outputArtifactKey: normalized_iris_dataset + producerTask: normalize-dataset + parameters: + n_neighbors: + componentInputParameter: neighbors + taskInfo: + name: train-model + inputDefinitions: + parameters: + min_max_scaler: + parameterType: BOOLEAN + neighbors: + parameterType: NUMBER_INTEGER + standard_scaler: + parameterType: BOOLEAN + outputDefinitions: + artifacts: + train-model-metrics: + artifactType: + schemaTitle: system.ClassificationMetrics + schemaVersion: 0.0.1 + schemaVersion: 2.1.0 + sdkVersion: kfp-2.0.1 diff --git a/controllers/testdata/declarative/case_6/expected/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_6/expected/scheduled-workflow_deployment.yaml new file mode 100644 index 000000000..e0037fb35 --- /dev/null +++ b/controllers/testdata/declarative/case_6/expected/scheduled-workflow_deployment.yaml @@ -0,0 +1,65 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ds-pipeline-scheduledworkflow-testdsp6 + namespace: default + labels: + app: ds-pipeline-scheduledworkflow-testdsp6 + component: data-science-pipelines + dspa: testdsp6 +spec: + selector: + matchLabels: + app: ds-pipeline-scheduledworkflow-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + template: + metadata: + annotations: + cluster-autoscaler.kubernetes.io/safe-to-evict: "true" + labels: + app: ds-pipeline-scheduledworkflow-testdsp6 + component: data-science-pipelines + dspa: testdsp6 + spec: + containers: + - env: + - name: NAMESPACE + value: "default" + - name: CRON_SCHEDULE_TIMEZONE + value: "EST" + - name: EXECUTIONTYPE + value: PipelineRun + image: scheduledworkflow:test2 + imagePullPolicy: IfNotPresent + name: ds-pipeline-scheduledworkflow + command: + - controller + - "--logtostderr=true" + - "--namespace=default" + livenessProbe: + exec: + command: + - test + - -x + - controller + initialDelaySeconds: 30 + periodSeconds: 5 + timeoutSeconds: 2 + readinessProbe: + exec: + command: + - test + - -x + - controller + initialDelaySeconds: 3 + periodSeconds: 5 + timeoutSeconds: 2 + resources: + requests: + cpu: 1235m + memory: 1Gi + limits: + cpu: 2526m + memory: 5Gi + serviceAccountName: ds-pipeline-scheduledworkflow-testdsp6 From 027e8abfc98c41acfc1f284a2c3a1caa396baa44 Mon Sep 17 00:00:00 2001 From: ddalvi Date: Fri, 3 Nov 2023 12:38:51 -0400 Subject: [PATCH 41/85] Slim down v2 ClusterRoles --- config/v2/cache/clusterrole.yaml | 1 - config/v2/driver/clusterrole.yaml | 7 ------- .../clusterrole.leaderelection.yaml | 1 - .../controller/clusterrole.clusteraccess.yaml | 18 ------------------ .../controller/clusterrole.tenantaccess.yaml | 1 - .../v2/kfptask/clusterrole.leaderelection.yaml | 1 - .../webhook/clusterrole.clusteraccess.yaml | 6 ------ .../clusterrole.leaderelection.yaml | 1 - .../controller/clusterrole.tenantaccess.yaml | 1 - 9 files changed, 37 deletions(-) diff --git a/config/v2/cache/clusterrole.yaml b/config/v2/cache/clusterrole.yaml index 73f9ab43b..cf977bcc0 100644 --- a/config/v2/cache/clusterrole.yaml +++ b/config/v2/cache/clusterrole.yaml @@ -9,7 +9,6 @@ rules: - certificates.k8s.io resources: - certificatesigningrequests - - certificatesigningrequests/approval verbs: - create - delete diff --git a/config/v2/driver/clusterrole.yaml b/config/v2/driver/clusterrole.yaml index 3468389c2..29a24c982 100644 --- a/config/v2/driver/clusterrole.yaml +++ b/config/v2/driver/clusterrole.yaml @@ -13,14 +13,7 @@ rules: resources: - runs - customruns - - runs/finalizers - - customruns/finalizers - - runs/status - - customruns/status - pipelineruns - - task - - taskruns - - conditions verbs: - get - list diff --git a/config/v2/exithandler/clusterrole.leaderelection.yaml b/config/v2/exithandler/clusterrole.leaderelection.yaml index 77a121b48..b4fc8822c 100644 --- a/config/v2/exithandler/clusterrole.leaderelection.yaml +++ b/config/v2/exithandler/clusterrole.leaderelection.yaml @@ -16,5 +16,4 @@ rules: - create - update - delete - - patch - watch diff --git a/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml index e6b5e8a9e..a5ca4f9b1 100644 --- a/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml +++ b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml @@ -22,23 +22,6 @@ rules: - delete - patch - watch -- apiGroups: - - tekton.dev - resources: - - runs/status - - customruns/status - - taskruns/status - - pipelineruns/status - - runs/finalizers - - customruns/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch - apiGroups: - custom.tekton.dev resources: @@ -55,7 +38,6 @@ rules: - apps resources: - deployments - - deployments/finalizers verbs: - get - list diff --git a/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml b/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml index d666f375b..7cbe237f2 100644 --- a/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml +++ b/config/v2/exithandler/controller/clusterrole.tenantaccess.yaml @@ -17,5 +17,4 @@ rules: - create - update - delete - - patch - watch diff --git a/config/v2/kfptask/clusterrole.leaderelection.yaml b/config/v2/kfptask/clusterrole.leaderelection.yaml index f3a2752f9..55a6a866e 100644 --- a/config/v2/kfptask/clusterrole.leaderelection.yaml +++ b/config/v2/kfptask/clusterrole.leaderelection.yaml @@ -16,5 +16,4 @@ rules: - create - update - delete - - patch - watch diff --git a/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml index fc0a84851..8555624bc 100644 --- a/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml +++ b/config/v2/kfptask/webhook/clusterrole.clusteraccess.yaml @@ -16,8 +16,6 @@ rules: - get - list - update - - patch - - watch - apiGroups: - "" resources: @@ -26,8 +24,6 @@ rules: - get - list - update - - patch - - watch - apiGroups: - admissionregistration.k8s.io resources: @@ -57,8 +53,6 @@ rules: - create - update - delete - - patch - - watch - apiGroups: - "" resources: diff --git a/config/v2/pipelineloop/clusterrole.leaderelection.yaml b/config/v2/pipelineloop/clusterrole.leaderelection.yaml index a57b544d5..7412e4c78 100644 --- a/config/v2/pipelineloop/clusterrole.leaderelection.yaml +++ b/config/v2/pipelineloop/clusterrole.leaderelection.yaml @@ -16,5 +16,4 @@ rules: - create - update - delete - - patch - watch diff --git a/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml b/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml index 855eab049..f194db425 100644 --- a/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml +++ b/config/v2/pipelineloop/controller/clusterrole.tenantaccess.yaml @@ -17,5 +17,4 @@ rules: - create - update - delete - - patch - watch From bea760e2f63a08a4a39e8db9162ef83b17c8eda6 Mon Sep 17 00:00:00 2001 From: Achyut Madhusudan Date: Mon, 6 Nov 2023 21:44:24 +0530 Subject: [PATCH 42/85] Added missing Tekton ConfigMaps and Secrets. Signed-off-by: Achyut Madhusudan --- config/overlays/make-v2deploy/kustomization.yaml | 2 ++ config/v2/configmaps/configartifactbucket.yaml | 8 ++++++++ config/v2/configmaps/configartifactpvc.yaml | 8 ++++++++ config/v2/configmaps/configdefaults.yaml | 8 ++++++++ config/v2/configmaps/configobservability.yaml | 9 +++++++++ config/v2/configmaps/configspire.yaml | 8 ++++++++ config/v2/configmaps/configtrustedsources.yaml | 8 ++++++++ config/v2/configmaps/featureflags.yaml | 8 ++++++++ config/v2/configmaps/kustomization.yaml | 8 ++++++++ config/v2/exithandler/webhook/kustomization.yaml | 1 - config/v2/kfptask/webhook/kustomization.yaml | 1 - .../kfpexithandlerwebhookcertssecret.yaml} | 2 +- .../kfptaskwebhookcertssecret.yaml} | 2 +- config/v2/secrets/kustomization.yaml | 4 ++++ .../v2/secrets/tektonpipelineloopwebhookcertssecret.yaml | 9 +++++++++ 15 files changed, 82 insertions(+), 4 deletions(-) create mode 100644 config/v2/configmaps/configartifactbucket.yaml create mode 100644 config/v2/configmaps/configartifactpvc.yaml create mode 100644 config/v2/configmaps/configdefaults.yaml create mode 100644 config/v2/configmaps/configobservability.yaml create mode 100644 config/v2/configmaps/configspire.yaml create mode 100644 config/v2/configmaps/configtrustedsources.yaml create mode 100644 config/v2/configmaps/featureflags.yaml create mode 100644 config/v2/configmaps/kustomization.yaml rename config/v2/{exithandler/webhook/secret.yaml => secrets/kfpexithandlerwebhookcertssecret.yaml} (84%) rename config/v2/{kfptask/webhook/secret.yaml => secrets/kfptaskwebhookcertssecret.yaml} (87%) create mode 100644 config/v2/secrets/kustomization.yaml create mode 100644 config/v2/secrets/tektonpipelineloopwebhookcertssecret.yaml diff --git a/config/overlays/make-v2deploy/kustomization.yaml b/config/overlays/make-v2deploy/kustomization.yaml index 6d2e0a3ca..b34e8a370 100644 --- a/config/overlays/make-v2deploy/kustomization.yaml +++ b/config/overlays/make-v2deploy/kustomization.yaml @@ -3,3 +3,5 @@ kind: Kustomization namespace: openshift-pipelines resources: - ../../v2 +- ../../v2/configmaps +- ../../v2/secrets diff --git a/config/v2/configmaps/configartifactbucket.yaml b/config/v2/configmaps/configartifactbucket.yaml new file mode 100644 index 000000000..2df1c0bad --- /dev/null +++ b/config/v2/configmaps/configartifactbucket.yaml @@ -0,0 +1,8 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: config-artifact-bucket + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + operator.tekton.dev/operand-name: tektoncd-pipelines diff --git a/config/v2/configmaps/configartifactpvc.yaml b/config/v2/configmaps/configartifactpvc.yaml new file mode 100644 index 000000000..a5d869bbb --- /dev/null +++ b/config/v2/configmaps/configartifactpvc.yaml @@ -0,0 +1,8 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: config-artifact-pvc + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + operator.tekton.dev/operand-name: tektoncd-pipelines diff --git a/config/v2/configmaps/configdefaults.yaml b/config/v2/configmaps/configdefaults.yaml new file mode 100644 index 000000000..dc48532e7 --- /dev/null +++ b/config/v2/configmaps/configdefaults.yaml @@ -0,0 +1,8 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: config-defaults + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + operator.tekton.dev/operand-name: tektoncd-pipelines diff --git a/config/v2/configmaps/configobservability.yaml b/config/v2/configmaps/configobservability.yaml new file mode 100644 index 000000000..6a12cdb76 --- /dev/null +++ b/config/v2/configmaps/configobservability.yaml @@ -0,0 +1,9 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: config-observability + labels: + app.kubernetes.io/component: resolvers + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + operator.tekton.dev/operand-name: tektoncd-pipelines diff --git a/config/v2/configmaps/configspire.yaml b/config/v2/configmaps/configspire.yaml new file mode 100644 index 000000000..c4dc80b44 --- /dev/null +++ b/config/v2/configmaps/configspire.yaml @@ -0,0 +1,8 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: config-spire + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + operator.tekton.dev/operand-name: tektoncd-pipelines diff --git a/config/v2/configmaps/configtrustedsources.yaml b/config/v2/configmaps/configtrustedsources.yaml new file mode 100644 index 000000000..9c1cd485c --- /dev/null +++ b/config/v2/configmaps/configtrustedsources.yaml @@ -0,0 +1,8 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: config-trusted-resources + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + operator.tekton.dev/operand-name: tektoncd-pipelines diff --git a/config/v2/configmaps/featureflags.yaml b/config/v2/configmaps/featureflags.yaml new file mode 100644 index 000000000..9218692c7 --- /dev/null +++ b/config/v2/configmaps/featureflags.yaml @@ -0,0 +1,8 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: feature-flags + labels: + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: tekton-pipelines + operator.tekton.dev/operand-name: tektoncd-pipelines diff --git a/config/v2/configmaps/kustomization.yaml b/config/v2/configmaps/kustomization.yaml new file mode 100644 index 000000000..df5f2f957 --- /dev/null +++ b/config/v2/configmaps/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- configdefaults.yaml +- configobservability.yaml +- configspire.yaml +- featureflags.yaml +- configartifactbucket.yaml +- configartifactpvc.yaml +- configtrustedsources.yaml diff --git a/config/v2/exithandler/webhook/kustomization.yaml b/config/v2/exithandler/webhook/kustomization.yaml index 2a58a8aa8..8202e77ff 100644 --- a/config/v2/exithandler/webhook/kustomization.yaml +++ b/config/v2/exithandler/webhook/kustomization.yaml @@ -5,7 +5,6 @@ resources: - mutatingwebhookconfig.yaml - role.yaml - rolebinding.yaml -- secret.yaml - service.yaml - serviceaccount.yaml - validatingwebhookconfig.yaml diff --git a/config/v2/kfptask/webhook/kustomization.yaml b/config/v2/kfptask/webhook/kustomization.yaml index 6692ef450..df691ded5 100644 --- a/config/v2/kfptask/webhook/kustomization.yaml +++ b/config/v2/kfptask/webhook/kustomization.yaml @@ -6,7 +6,6 @@ resources: - mutatingwebhookconfig.yaml - role.yaml - rolebinding.yaml -- secret.yaml - service.yaml - serviceaccount.yaml - validatingwebhookconfig.yaml diff --git a/config/v2/exithandler/webhook/secret.yaml b/config/v2/secrets/kfpexithandlerwebhookcertssecret.yaml similarity index 84% rename from config/v2/exithandler/webhook/secret.yaml rename to config/v2/secrets/kfpexithandlerwebhookcertssecret.yaml index b9546c694..ae60d20fa 100644 --- a/config/v2/exithandler/webhook/secret.yaml +++ b/config/v2/secrets/kfpexithandlerwebhookcertssecret.yaml @@ -6,4 +6,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton pipeline.tekton.dev/release: devel - name: exithandler-webhook-certs + name: kfp-exithandler-webhook-certs diff --git a/config/v2/kfptask/webhook/secret.yaml b/config/v2/secrets/kfptaskwebhookcertssecret.yaml similarity index 87% rename from config/v2/kfptask/webhook/secret.yaml rename to config/v2/secrets/kfptaskwebhookcertssecret.yaml index 8e4b6bb5c..6387033ce 100644 --- a/config/v2/kfptask/webhook/secret.yaml +++ b/config/v2/secrets/kfptaskwebhookcertssecret.yaml @@ -6,4 +6,4 @@ metadata: app.kubernetes.io/instance: default app.kubernetes.io/part-of: kfp-tekton pipeline.tekton.dev/release: devel - name: webhook-certs + name: kfptask-webhook-certs diff --git a/config/v2/secrets/kustomization.yaml b/config/v2/secrets/kustomization.yaml new file mode 100644 index 000000000..2907d843f --- /dev/null +++ b/config/v2/secrets/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- kfpexithandlerwebhookcertssecret.yaml +- kfptaskwebhookcertssecret.yaml +- tektonpipelineloopwebhookcertssecret.yaml diff --git a/config/v2/secrets/tektonpipelineloopwebhookcertssecret.yaml b/config/v2/secrets/tektonpipelineloopwebhookcertssecret.yaml new file mode 100644 index 000000000..262a53f52 --- /dev/null +++ b/config/v2/secrets/tektonpipelineloopwebhookcertssecret.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: webhook + app.kubernetes.io/instance: default + app.kubernetes.io/part-of: kfp-tekton + pipeline.tekton.dev/release: devel + name: tektonpipelineloop-webhook-certs From eb73db1c2fd317e46e4c9623d5526bf7a5c6ff71 Mon Sep 17 00:00:00 2001 From: ddalvi Date: Mon, 13 Nov 2023 16:30:57 -0500 Subject: [PATCH 43/85] Adding back run and task related permissions --- config/v2/driver/clusterrole.yaml | 7 +++++++ .../exithandler/controller/clusterrole.clusteraccess.yaml | 6 ++++++ 2 files changed, 13 insertions(+) diff --git a/config/v2/driver/clusterrole.yaml b/config/v2/driver/clusterrole.yaml index 29a24c982..3468389c2 100644 --- a/config/v2/driver/clusterrole.yaml +++ b/config/v2/driver/clusterrole.yaml @@ -13,7 +13,14 @@ rules: resources: - runs - customruns + - runs/finalizers + - customruns/finalizers + - runs/status + - customruns/status - pipelineruns + - task + - taskruns + - conditions verbs: - get - list diff --git a/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml index a5ca4f9b1..e8c8a9eae 100644 --- a/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml +++ b/config/v2/exithandler/controller/clusterrole.clusteraccess.yaml @@ -14,6 +14,12 @@ rules: - customruns - taskruns - pipelineruns + - runs/status + - customruns/status + - taskruns/status + - pipelineruns/status + - runs/finalizers + - customruns/finalizers verbs: - get - list From 07a78e2d40a350b00fe7fe003211e2fc23ccf754 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 21 Nov 2023 18:56:22 -0500 Subject: [PATCH 44/85] Seperate name-collided functest cases - Two seperate test cases accidentally were merged together due to both being named 'case_6'. Therfore, renamed one to 'case_7' --- .../expected/created/apiserver_deployment.yaml | 6 ++++++ controllers/testdata/declarative/case_7/config.yaml | 12 ++++++++++++ .../declarative/{case_6 => case_7}/deploy/cr.yaml | 0 .../expected/apiserver_deployment.yaml | 0 .../expected/configmap_artifact_script.yaml | 0 .../expected/mariadb_deployment.yaml | 0 .../expected/minio_deployment.yaml | 0 .../expected/mlpipelines-ui_deployment.yaml | 0 .../expected/persistence-agent_deployment.yaml | 0 .../expected/sample-config.yaml.tmpl | 0 .../expected/sample-pipeline.yaml.tmpl | 0 .../expected/scheduled-workflow_deployment.yaml | 0 12 files changed, 18 insertions(+) create mode 100644 controllers/testdata/declarative/case_7/config.yaml rename controllers/testdata/declarative/{case_6 => case_7}/deploy/cr.yaml (100%) rename controllers/testdata/declarative/{case_6 => case_7}/expected/apiserver_deployment.yaml (100%) rename controllers/testdata/declarative/{case_6 => case_7}/expected/configmap_artifact_script.yaml (100%) rename controllers/testdata/declarative/{case_6 => case_7}/expected/mariadb_deployment.yaml (100%) rename controllers/testdata/declarative/{case_6 => case_7}/expected/minio_deployment.yaml (100%) rename controllers/testdata/declarative/{case_6 => case_7}/expected/mlpipelines-ui_deployment.yaml (100%) rename controllers/testdata/declarative/{case_6 => case_7}/expected/persistence-agent_deployment.yaml (100%) rename controllers/testdata/declarative/{case_6 => case_7}/expected/sample-config.yaml.tmpl (100%) rename controllers/testdata/declarative/{case_6 => case_7}/expected/sample-pipeline.yaml.tmpl (100%) rename controllers/testdata/declarative/{case_6 => case_7}/expected/scheduled-workflow_deployment.yaml (100%) diff --git a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml index 897a3a3b8..2cbeda30d 100644 --- a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml @@ -50,6 +50,8 @@ spec: value: "artifact-manager:test6" - name: ARCHIVE_LOGS value: "false" + - name: EXECUTIONTYPE + value: PipelineRun - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_NAME value: testcabundleconfigmap6 - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_KEY @@ -106,6 +108,10 @@ spec: value: "ubi-minimal:test6" - name: MOVERESULTS_IMAGE value: "busybox:test6" + - name: ML_PIPELINE_SERVICE_HOST + value: ds-pipeline-testdsp6.default.svc.cluster.local + - name: ML_PIPELINE_SERVICE_PORT_GRPC + value: "8887" image: api-server:test6 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_7/config.yaml b/controllers/testdata/declarative/case_7/config.yaml new file mode 100644 index 000000000..828f72e62 --- /dev/null +++ b/controllers/testdata/declarative/case_7/config.yaml @@ -0,0 +1,12 @@ +# When a minimal DSPA is deployed +Images: + ApiServer: api-server:test7 + Artifact: artifact-manager:test7 + PersistentAgent: persistenceagent:test7 + ScheduledWorkflow: scheduledworkflow:test7 + Cache: ubi-minimal:test7 + MoveResultsImage: busybox:test7 + MlPipelineUI: frontend:test7 + MariaDB: mariadb:test7 + Minio: minio:test7 + OAuthProxy: oauth-proxy:test7 diff --git a/controllers/testdata/declarative/case_6/deploy/cr.yaml b/controllers/testdata/declarative/case_7/deploy/cr.yaml similarity index 100% rename from controllers/testdata/declarative/case_6/deploy/cr.yaml rename to controllers/testdata/declarative/case_7/deploy/cr.yaml diff --git a/controllers/testdata/declarative/case_6/expected/apiserver_deployment.yaml b/controllers/testdata/declarative/case_7/expected/apiserver_deployment.yaml similarity index 100% rename from controllers/testdata/declarative/case_6/expected/apiserver_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/apiserver_deployment.yaml diff --git a/controllers/testdata/declarative/case_6/expected/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_7/expected/configmap_artifact_script.yaml similarity index 100% rename from controllers/testdata/declarative/case_6/expected/configmap_artifact_script.yaml rename to controllers/testdata/declarative/case_7/expected/configmap_artifact_script.yaml diff --git a/controllers/testdata/declarative/case_6/expected/mariadb_deployment.yaml b/controllers/testdata/declarative/case_7/expected/mariadb_deployment.yaml similarity index 100% rename from controllers/testdata/declarative/case_6/expected/mariadb_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/mariadb_deployment.yaml diff --git a/controllers/testdata/declarative/case_6/expected/minio_deployment.yaml b/controllers/testdata/declarative/case_7/expected/minio_deployment.yaml similarity index 100% rename from controllers/testdata/declarative/case_6/expected/minio_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/minio_deployment.yaml diff --git a/controllers/testdata/declarative/case_6/expected/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_7/expected/mlpipelines-ui_deployment.yaml similarity index 100% rename from controllers/testdata/declarative/case_6/expected/mlpipelines-ui_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/mlpipelines-ui_deployment.yaml diff --git a/controllers/testdata/declarative/case_6/expected/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_7/expected/persistence-agent_deployment.yaml similarity index 100% rename from controllers/testdata/declarative/case_6/expected/persistence-agent_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/persistence-agent_deployment.yaml diff --git a/controllers/testdata/declarative/case_6/expected/sample-config.yaml.tmpl b/controllers/testdata/declarative/case_7/expected/sample-config.yaml.tmpl similarity index 100% rename from controllers/testdata/declarative/case_6/expected/sample-config.yaml.tmpl rename to controllers/testdata/declarative/case_7/expected/sample-config.yaml.tmpl diff --git a/controllers/testdata/declarative/case_6/expected/sample-pipeline.yaml.tmpl b/controllers/testdata/declarative/case_7/expected/sample-pipeline.yaml.tmpl similarity index 100% rename from controllers/testdata/declarative/case_6/expected/sample-pipeline.yaml.tmpl rename to controllers/testdata/declarative/case_7/expected/sample-pipeline.yaml.tmpl diff --git a/controllers/testdata/declarative/case_6/expected/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_7/expected/scheduled-workflow_deployment.yaml similarity index 100% rename from controllers/testdata/declarative/case_6/expected/scheduled-workflow_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/scheduled-workflow_deployment.yaml From 72c031592e1c408188a454b3f37461e2ea6674a1 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 21 Nov 2023 19:25:58 -0500 Subject: [PATCH 45/85] Fix functest case_7 - Functest expected results not in proper location, correct - Update expected manifests to validate properly --- .../declarative/case_7/deploy/cr.yaml | 20 +++--- .../{ => created}/apiserver_deployment.yaml | 66 ++++++++++--------- .../configmap_artifact_script.yaml | 15 +++-- .../{ => created}/mariadb_deployment.yaml | 20 +++--- .../{ => created}/minio_deployment.yaml | 22 +++---- .../mlpipelines-ui_deployment.yaml | 36 +++++----- .../persistence-agent_deployment.yaml | 22 +++---- .../{ => created}/sample-config.yaml.tmpl | 4 +- .../{ => created}/sample-pipeline.yaml.tmpl | 4 +- .../scheduled-workflow_deployment.yaml | 18 ++--- 10 files changed, 120 insertions(+), 107 deletions(-) rename controllers/testdata/declarative/case_7/expected/{ => created}/apiserver_deployment.yaml (78%) rename controllers/testdata/declarative/case_7/expected/{ => created}/configmap_artifact_script.yaml (74%) rename controllers/testdata/declarative/case_7/expected/{ => created}/mariadb_deployment.yaml (86%) rename controllers/testdata/declarative/case_7/expected/{ => created}/minio_deployment.yaml (82%) rename controllers/testdata/declarative/case_7/expected/{ => created}/mlpipelines-ui_deployment.yaml (86%) rename controllers/testdata/declarative/case_7/expected/{ => created}/persistence-agent_deployment.yaml (79%) rename controllers/testdata/declarative/case_7/expected/{ => created}/sample-config.yaml.tmpl (88%) rename controllers/testdata/declarative/case_7/expected/{ => created}/sample-pipeline.yaml.tmpl (99%) rename controllers/testdata/declarative/case_7/expected/{ => created}/scheduled-workflow_deployment.yaml (79%) diff --git a/controllers/testdata/declarative/case_7/deploy/cr.yaml b/controllers/testdata/declarative/case_7/deploy/cr.yaml index a04a407a1..787109894 100644 --- a/controllers/testdata/declarative/case_7/deploy/cr.yaml +++ b/controllers/testdata/declarative/case_7/deploy/cr.yaml @@ -1,17 +1,17 @@ apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 kind: DataSciencePipelinesApplication metadata: - name: testdsp6 + name: testdsp7 spec: dspVersion: v2 apiServer: deploy: true - image: api-server:test6 + image: api-server:test7 applyTektonCustomResource: true archiveLogs: false - artifactImage: artifact-manager:test6 - cacheImage: ubi-minimal:test6 - moveResultsImage: busybox:test6 + artifactImage: artifact-manager:test7 + cacheImage: ubi-minimal:test7 + moveResultsImage: busybox:test7 injectDefaultScript: true stripEOF: true enableOauth: true @@ -30,7 +30,7 @@ spec: memory: "5Gi" persistenceAgent: deploy: true - image: persistenceagent:test6 + image: persistenceagent:test7 numWorkers: 5 resources: requests: @@ -41,7 +41,7 @@ spec: memory: "5Gi" scheduledWorkflow: deploy: true - image: scheduledworkflow:test6 + image: scheduledworkflow:test7 cronScheduleTimezone: EST resources: requests: @@ -52,7 +52,7 @@ spec: memory: "5Gi" mlpipelineUI: deploy: true - image: frontend:test6 + image: frontend:test7 configMap: some-test-configmap resources: requests: @@ -64,7 +64,7 @@ spec: database: mariaDB: deploy: true - image: mariadb:test6 + image: mariadb:test7 username: testuser pipelineDBName: randomDBName pvcSize: 32Gi @@ -78,7 +78,7 @@ spec: objectStorage: minio: deploy: true - image: minio:test6 + image: minio:test7 bucket: mlpipeline pvcSize: 40Gi resources: diff --git a/controllers/testdata/declarative/case_7/expected/apiserver_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml similarity index 78% rename from controllers/testdata/declarative/case_7/expected/apiserver_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml index bdf2f827e..567e79bbf 100644 --- a/controllers/testdata/declarative/case_7/expected/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml @@ -1,24 +1,24 @@ -apiVersion: apps/v1ds-pipeline-testdsp6 +apiVersion: apps/v1 kind: Deployment metadata: - name: ds-pipeline-testdsp6 + name: ds-pipeline-testdsp7 namespace: default labels: - app: ds-pipeline-testdsp6 + app: ds-pipeline-testdsp7 component: data-science-pipelines - dspa: testdsp2 + dspa: testdsp7 spec: selector: matchLabels: - app: ds-pipeline-testdsp6 + app: ds-pipeline-testdsp7 component: data-science-pipelines - dspa: testdsp2 + dspa: testdsp7 template: metadata: labels: - app: ds-pipeline-testdsp6 + app: ds-pipeline-testdsp7 component: data-science-pipelines - dspa: testdsp2 + dspa: testdsp7 spec: containers: - env: @@ -30,24 +30,24 @@ spec: valueFrom: secretKeyRef: key: "password" - name: "ds-pipeline-db-testdsp2" + name: "ds-pipeline-db-testdsp7" - name: DBCONFIG_DBNAME value: "randomDBName" - name: DBCONFIG_HOST - value: "mariadb-testdsp2.default.svc.cluster.local" + value: "mariadb-testdsp7.default.svc.cluster.local" - name: DBCONFIG_PORT value: "3306" - name: ARTIFACT_BUCKET value: "mlpipeline" - name: ARTIFACT_ENDPOINT - value: "http://minio-testdsp2.default.svc.cluster.local:9000" + value: "http://minio-testdsp7.default.svc.cluster.local:9000" - name: ARTIFACT_SCRIPT valueFrom: configMapKeyRef: key: "artifact_script" - name: "ds-pipeline-artifact-script-testdsp2" + name: "ds-pipeline-artifact-script-testdsp7" - name: ARTIFACT_IMAGE - value: "artifact-manager:test2" + value: "artifact-manager:test7" - name: ARCHIVE_LOGS value: "false" - name: EXECUTIONTYPE @@ -59,7 +59,7 @@ spec: - name: PIPELINE_RUNTIME value: "tekton" - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT - value: "pipeline-runner-testdsp2" + value: "pipeline-runner-testdsp7" - name: INJECT_DEFAULT_SCRIPT value: "true" - name: APPLY_TEKTON_CUSTOM_RESOURCE @@ -74,37 +74,43 @@ spec: value: "ds-pipeline-visualizationserver" - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT value: "8888" + - name: OBJECTSTORECONFIG_CREDENTIALSSECRET + value: "ds-pipeline-s3-testdsp7" + - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY + value: "accesskey" + - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY + value: "secretkey" - name: OBJECTSTORECONFIG_BUCKETNAME value: "mlpipeline" - name: OBJECTSTORECONFIG_ACCESSKEY valueFrom: secretKeyRef: key: "accesskey" - name: "mlpipeline-minio-artifact" + name: "ds-pipeline-s3-testdsp7" - name: OBJECTSTORECONFIG_SECRETACCESSKEY valueFrom: secretKeyRef: key: "secretkey" - name: "mlpipeline-minio-artifact" + name: "ds-pipeline-s3-testdsp7" - name: OBJECTSTORECONFIG_SECURE value: "false" - name: MINIO_SERVICE_SERVICE_HOST - value: "minio-testdsp2.default.svc.cluster.local" + value: "minio-testdsp7.default.svc.cluster.local" - name: MINIO_SERVICE_SERVICE_PORT value: "9000" - name: CACHE_IMAGE - value: "ubi-minimal:test2" + value: "ubi-minimal:test7" - name: MOVERESULTS_IMAGE - value: "busybox:test2" + value: "busybox:test7" - name: METADATA_GRPC_SERVICE_SERVICE_HOST - value: "ds-pipeline-metadata-grpc-testdsp2.default.svc.cluster.local" + value: "ds-pipeline-metadata-grpc-testdsp7.default.svc.cluster.local" - name: METADATA_GRPC_SERVICE_SERVICE_PORT value: "8080" - name: ML_PIPELINE_SERVICE_HOST - value: ds-pipeline-testdsp6.default.svc.cluster.local + value: ds-pipeline-testdsp7.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" - image: api-server:test2 + image: api-server:test7 imagePullPolicy: Always name: ds-pipeline-api-server ports: @@ -155,15 +161,15 @@ spec: args: - --https-address=:8443 - --provider=openshift - - --openshift-service-account=ds-pipeline-testdsp6 + - --openshift-service-account=ds-pipeline-testdsp7 - --upstream=http://localhost:8888 - --tls-cert=/etc/tls/private/tls.crt - --tls-key=/etc/tls/private/tls.key - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp6","namespace":"default"}}' - - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp6","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-testdsp7","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-testdsp7","verb":"get","resourceAPIGroup":"route.openshift.io"}' - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: oauth-proxy:test2 + image: oauth-proxy:test7 ports: - containerPort: 8443 name: oauth @@ -201,14 +207,14 @@ spec: volumes: - name: proxy-tls secret: - secretName: ds-pipelines-proxy-tls-testdsp2 + secretName: ds-pipelines-proxy-tls-testdsp7 defaultMode: 420 - configMap: defaultMode: 420 - name: sample-config-testdsp2 + name: sample-config-testdsp7 name: sample-config - configMap: defaultMode: 420 - name: sample-pipeline-testdsp2 + name: sample-pipeline-testdsp7 name: sample-pipeline - serviceAccountName: ds-pipeline-testdsp6 + serviceAccountName: ds-pipeline-testdsp7 diff --git a/controllers/testdata/declarative/case_7/expected/configmap_artifact_script.yaml b/controllers/testdata/declarative/case_7/expected/created/configmap_artifact_script.yaml similarity index 74% rename from controllers/testdata/declarative/case_7/expected/configmap_artifact_script.yaml rename to controllers/testdata/declarative/case_7/expected/created/configmap_artifact_script.yaml index 9294a70e1..a1550c013 100644 --- a/controllers/testdata/declarative/case_7/expected/configmap_artifact_script.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/configmap_artifact_script.yaml @@ -6,13 +6,20 @@ data: workspace_dir=$(echo $(context.taskRun.name) | sed -e "s/$(context.pipeline.name)-//g") workspace_dest=/workspace/${workspace_dir}/artifacts/$(context.pipelineRun.name)/$(context.taskRun.name) artifact_name=$(basename $2) + + aws_cp() { + + aws s3 --endpoint http://minio-testdsp7.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + + } + if [ -f "$workspace_dest/$artifact_name" ]; then echo sending to: ${workspace_dest}/${artifact_name} tar -cvzf $1.tgz -C ${workspace_dest} ${artifact_name} - aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 elif [ -f "$2" ]; then tar -cvzf $1.tgz -C $(dirname $2) ${artifact_name} - aws s3 --endpoint http://minio-testdsp2.default.svc.cluster.local:9000 cp $1.tgz s3://mlpipeline/artifacts/$PIPELINERUN/$PIPELINETASK/$1.tgz + aws_cp $1 else echo "$2 file does not exist. Skip artifact tracking for $1" fi @@ -28,8 +35,8 @@ data: } kind: ConfigMap metadata: - name: ds-pipeline-artifact-script-testdsp2 + name: ds-pipeline-artifact-script-testdsp7 namespace: default labels: - app: ds-pipeline-testdsp6 + app: ds-pipeline-testdsp7 component: data-science-pipelines diff --git a/controllers/testdata/declarative/case_7/expected/mariadb_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/mariadb_deployment.yaml similarity index 86% rename from controllers/testdata/declarative/case_7/expected/mariadb_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/created/mariadb_deployment.yaml index e1a326516..e982d3b31 100644 --- a/controllers/testdata/declarative/case_7/expected/mariadb_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/mariadb_deployment.yaml @@ -2,30 +2,30 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: mariadb-testdsp6 + name: mariadb-testdsp7 namespace: default labels: - app: mariadb-testdsp6 + app: mariadb-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: strategy: type: Recreate # Need this since backing PVC is ReadWriteOnce, which creates resource lock condition in default Rolling strategy selector: matchLabels: - app: mariadb-testdsp6 + app: mariadb-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 template: metadata: labels: - app: mariadb-testdsp6 + app: mariadb-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: containers: - name: mariadb - image: mariadb:test2 + image: mariadb:test7 ports: - containerPort: 3306 protocol: TCP @@ -58,7 +58,7 @@ spec: valueFrom: secretKeyRef: key: "password" - name: "ds-pipeline-db-testdsp6" + name: "ds-pipeline-db-testdsp7" - name: MYSQL_DATABASE value: "randomDBName" - name: MYSQL_ALLOW_EMPTY_PASSWORD @@ -76,4 +76,4 @@ spec: volumes: - name: mariadb-persistent-storage persistentVolumeClaim: - claimName: mariadb-testdsp6 + claimName: mariadb-testdsp7 diff --git a/controllers/testdata/declarative/case_7/expected/minio_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/minio_deployment.yaml similarity index 82% rename from controllers/testdata/declarative/case_7/expected/minio_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/created/minio_deployment.yaml index 8f8b3b930..da4a1627b 100644 --- a/controllers/testdata/declarative/case_7/expected/minio_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/minio_deployment.yaml @@ -1,26 +1,26 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: minio-testdsp6 + name: minio-testdsp7 namespace: default labels: - app: minio-testdsp6 + app: minio-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: selector: matchLabels: - app: minio-testdsp6 + app: minio-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 strategy: type: Recreate template: metadata: labels: - app: minio-testdsp6 + app: minio-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: containers: - args: @@ -31,13 +31,13 @@ spec: valueFrom: secretKeyRef: key: "accesskey" - name: "mlpipeline-minio-artifact" + name: "ds-pipeline-s3-testdsp7" - name: MINIO_SECRET_KEY valueFrom: secretKeyRef: key: "secretkey" - name: "mlpipeline-minio-artifact" - image: minio:test2 + name: "ds-pipeline-s3-testdsp7" + image: minio:test7 name: minio ports: - containerPort: 9000 @@ -72,4 +72,4 @@ spec: volumes: - name: data persistentVolumeClaim: - claimName: minio-testdsp6 + claimName: minio-testdsp7 diff --git a/controllers/testdata/declarative/case_7/expected/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/mlpipelines-ui_deployment.yaml similarity index 86% rename from controllers/testdata/declarative/case_7/expected/mlpipelines-ui_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/created/mlpipelines-ui_deployment.yaml index 3faec65f6..839521717 100644 --- a/controllers/testdata/declarative/case_7/expected/mlpipelines-ui_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/mlpipelines-ui_deployment.yaml @@ -1,26 +1,26 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: ds-pipeline-ui-testdsp6 + name: ds-pipeline-ui-testdsp7 namespace: default labels: - app: ds-pipeline-ui-testdsp6 + app: ds-pipeline-ui-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: selector: matchLabels: - app: ds-pipeline-ui-testdsp6 + app: ds-pipeline-ui-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 template: metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "true" labels: - app: ds-pipeline-ui-testdsp6 + app: ds-pipeline-ui-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: containers: - env: @@ -35,25 +35,25 @@ spec: valueFrom: secretKeyRef: key: "accesskey" - name: "mlpipeline-minio-artifact" + name: "ds-pipeline-s3-testdsp7" - name: MINIO_SECRET_KEY valueFrom: secretKeyRef: key: "secretkey" - name: "mlpipeline-minio-artifact" + name: "ds-pipeline-s3-testdsp7" - name: ALLOW_CUSTOM_VISUALIZATIONS value: "true" - name: ARGO_ARCHIVE_LOGS value: "true" - name: ML_PIPELINE_SERVICE_HOST - value: ds-pipeline-testdsp6 + value: ds-pipeline-testdsp7 - name: ML_PIPELINE_SERVICE_PORT value: '8888' - name: METADATA_ENVOY_SERVICE_SERVICE_HOST - value: ds-pipeline-metadata-envoy-testdsp6 + value: ds-pipeline-metadata-envoy-testdsp7 - name: METADATA_ENVOY_SERVICE_SERVICE_PORT value: "9090" - image: frontend:test2 + image: frontend:test7 imagePullPolicy: IfNotPresent livenessProbe: exec: @@ -98,15 +98,15 @@ spec: args: - --https-address=:8443 - --provider=openshift - - --openshift-service-account=ds-pipeline-ui-testdsp6 + - --openshift-service-account=ds-pipeline-ui-testdsp7 - --upstream=http://localhost:3000 - --tls-cert=/etc/tls/private/tls.crt - --tls-key=/etc/tls/private/tls.key - --cookie-secret=SECRET - - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-testdsp6","namespace":"default"}}' - - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-ui-testdsp6","verb":"get","resourceAPIGroup":"route.openshift.io"}' + - '--openshift-delegate-urls={"/": {"group":"route.openshift.io","resource":"routes","verb":"get","name":"ds-pipeline-ui-testdsp7","namespace":"default"}}' + - '--openshift-sar={"namespace":"default","resource":"routes","resourceName":"ds-pipeline-ui-testdsp7","verb":"get","resourceAPIGroup":"route.openshift.io"}' - --skip-auth-regex='(^/metrics|^/apis/v1beta1/healthz)' - image: oauth-proxy:test2 + image: oauth-proxy:test7 ports: - containerPort: 8443 name: https @@ -141,7 +141,7 @@ spec: volumeMounts: - mountPath: /etc/tls/private name: proxy-tls - serviceAccountName: ds-pipeline-ui-testdsp6 + serviceAccountName: ds-pipeline-ui-testdsp7 volumes: - configMap: name: some-test-configmap @@ -149,5 +149,5 @@ spec: name: config-volume - name: proxy-tls secret: - secretName: ds-pipelines-ui-proxy-tls-testdsp6 + secretName: ds-pipelines-ui-proxy-tls-testdsp7 defaultMode: 420 diff --git a/controllers/testdata/declarative/case_7/expected/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/persistence-agent_deployment.yaml similarity index 79% rename from controllers/testdata/declarative/case_7/expected/persistence-agent_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/created/persistence-agent_deployment.yaml index afed69995..f914a38e2 100644 --- a/controllers/testdata/declarative/case_7/expected/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/persistence-agent_deployment.yaml @@ -1,26 +1,26 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: ds-pipeline-persistenceagent-testdsp6 + name: ds-pipeline-persistenceagent-testdsp7 namespace: default labels: - app: ds-pipeline-persistenceagent-testdsp6 + app: ds-pipeline-persistenceagent-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: selector: matchLabels: - app: ds-pipeline-persistenceagent-testdsp6 + app: ds-pipeline-persistenceagent-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 template: metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "true" labels: - app: ds-pipeline-persistenceagent-testdsp6 + app: ds-pipeline-persistenceagent-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: containers: - env: @@ -36,7 +36,7 @@ spec: value: "" - name: EXECUTIONTYPE value: PipelineRun - image: persistenceagent:test2 + image: persistenceagent:test7 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent command: @@ -44,8 +44,8 @@ spec: - "--logtostderr=true" - "--ttlSecondsAfterWorkflowFinish=86400" - "--numWorker=5" - - "--mlPipelineAPIServerName=ds-pipeline-testdsp6" - - "--namespace=testdsp6" + - "--mlPipelineAPIServerName=ds-pipeline-testdsp7" + - "--namespace=testdsp7" - "--mlPipelineServiceHttpPort=8888" - "--mlPipelineServiceGRPCPort=8887" livenessProbe: @@ -73,4 +73,4 @@ spec: limits: cpu: 2524m memory: 5Gi - serviceAccountName: ds-pipeline-persistenceagent-testdsp6 + serviceAccountName: ds-pipeline-persistenceagent-testdsp7 diff --git a/controllers/testdata/declarative/case_7/expected/sample-config.yaml.tmpl b/controllers/testdata/declarative/case_7/expected/created/sample-config.yaml.tmpl similarity index 88% rename from controllers/testdata/declarative/case_7/expected/sample-config.yaml.tmpl rename to controllers/testdata/declarative/case_7/expected/created/sample-config.yaml.tmpl index f5ca8011c..c7bfcafe5 100644 --- a/controllers/testdata/declarative/case_7/expected/sample-config.yaml.tmpl +++ b/controllers/testdata/declarative/case_7/expected/created/sample-config.yaml.tmpl @@ -1,10 +1,10 @@ apiVersion: v1 kind: ConfigMap metadata: - name: sample-config-testdsp6 + name: sample-config-testdsp7 namespace: default labels: - app: ds-pipeline-testdsp6 + app: ds-pipeline-testdsp7 component: data-science-pipelines data: sample_config.json: |- diff --git a/controllers/testdata/declarative/case_7/expected/sample-pipeline.yaml.tmpl b/controllers/testdata/declarative/case_7/expected/created/sample-pipeline.yaml.tmpl similarity index 99% rename from controllers/testdata/declarative/case_7/expected/sample-pipeline.yaml.tmpl rename to controllers/testdata/declarative/case_7/expected/created/sample-pipeline.yaml.tmpl index 92cb390fb..f7c147db5 100644 --- a/controllers/testdata/declarative/case_7/expected/sample-pipeline.yaml.tmpl +++ b/controllers/testdata/declarative/case_7/expected/created/sample-pipeline.yaml.tmpl @@ -1,10 +1,10 @@ apiVersion: v1 kind: ConfigMap metadata: - name: sample-pipeline-testdsp6 + name: sample-pipeline-testdsp7 namespace: default labels: - app: ds-pipeline-testdsp6 + app: ds-pipeline-testdsp7 component: data-science-pipelines data: iris-pipeline-compiled.yaml: |- diff --git a/controllers/testdata/declarative/case_7/expected/scheduled-workflow_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/scheduled-workflow_deployment.yaml similarity index 79% rename from controllers/testdata/declarative/case_7/expected/scheduled-workflow_deployment.yaml rename to controllers/testdata/declarative/case_7/expected/created/scheduled-workflow_deployment.yaml index e0037fb35..03d14f33f 100644 --- a/controllers/testdata/declarative/case_7/expected/scheduled-workflow_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/scheduled-workflow_deployment.yaml @@ -1,26 +1,26 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: ds-pipeline-scheduledworkflow-testdsp6 + name: ds-pipeline-scheduledworkflow-testdsp7 namespace: default labels: - app: ds-pipeline-scheduledworkflow-testdsp6 + app: ds-pipeline-scheduledworkflow-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: selector: matchLabels: - app: ds-pipeline-scheduledworkflow-testdsp6 + app: ds-pipeline-scheduledworkflow-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 template: metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "true" labels: - app: ds-pipeline-scheduledworkflow-testdsp6 + app: ds-pipeline-scheduledworkflow-testdsp7 component: data-science-pipelines - dspa: testdsp6 + dspa: testdsp7 spec: containers: - env: @@ -30,7 +30,7 @@ spec: value: "EST" - name: EXECUTIONTYPE value: PipelineRun - image: scheduledworkflow:test2 + image: scheduledworkflow:test7 imagePullPolicy: IfNotPresent name: ds-pipeline-scheduledworkflow command: @@ -62,4 +62,4 @@ spec: limits: cpu: 2526m memory: 5Gi - serviceAccountName: ds-pipeline-scheduledworkflow-testdsp6 + serviceAccountName: ds-pipeline-scheduledworkflow-testdsp7 From eca5c8b4432812d67bfc8ec79583c82ad77eab63 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 21 Nov 2023 19:30:22 -0500 Subject: [PATCH 46/85] Fix improperly formatted kustomization.yaml --- config/base/kustomization.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index b349a343c..a7c950c50 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -134,7 +134,7 @@ vars: apiVersion: v1 fieldref: fieldpath: data.DSPO_HEALTHCHECK_DATABASE_CONNECTIONTIMEOUT - - name: DSPO_HEALTHCHECK_OBJECTSTORE_CONNECTIONTIMEOUT + - name: DSPO_HEALTHCHECK_OBJECTSTORE_CONNECTIONTIMEOUT objref: kind: ConfigMap name: dspo-parameters @@ -190,7 +190,7 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGESV2_SCHEDULEDWORKFLOW - - name: IMAGESV2_MLMDENVOY + - name: IMAGESV2_MLMDENVOY objref: kind: ConfigMap name: dspo-parameters From e250647618bd36ccd1771ac0dfdec21c1bcefc1e Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Mon, 23 Oct 2023 16:44:02 -0400 Subject: [PATCH 47/85] Deploy Argo Infrastructure --- Makefile | 15 + .../clusterrole.argo-aggregate-to-admin.yaml | 34 ++ .../clusterrole.argo-aggregate-to-edit.yaml | 32 ++ .../clusterrole.argo-aggregate-to-view.yaml | 27 + .../argo/clusterrole.argo-cluster-role.yaml | 106 ++++ .../clusterrole.argo-server-cluster-role.yaml | 66 +++ .../argo/clusterrolebinding.argo-binding.yaml | 13 + ...lusterrolebinding.argo-server-binding.yaml | 13 + ...nfigmap.workflow-controller-configmap.yaml | 6 + config/argo/crd.applications.yaml | 531 ++++++++++++++++++ config/argo/crd.clusterworkflowtemplates.yaml | 38 ++ config/argo/crd.cronworkflows.yaml | 42 ++ config/argo/crd.scheduledworkflows.yaml | 41 ++ config/argo/crd.viewers.yaml | 36 ++ config/argo/crd.workfloweventbinding.yaml | 37 ++ config/argo/crd.workflows.yaml | 52 ++ config/argo/crd.workflowtaskresult.yaml | 427 ++++++++++++++ config/argo/crd.workflowtaskset.yaml | 43 ++ config/argo/crd.workflowtemplate.yaml | 37 ++ config/argo/deployment.argo-server.yaml | 49 ++ .../argo/deployment.workflow-controller.yaml | 58 ++ config/argo/kustomization.yaml | 39 ++ config/argo/priorityclass.yaml | 10 + config/argo/role.argo.yaml | 21 + config/argo/rolebinding.argo-binding.yaml | 14 + config/argo/service.argo-server.yaml | 13 + config/argo/serviceaccount.argo-server.yaml | 6 + config/argo/serviceaccount.argo.yaml | 6 + .../make-argodeploy/kustomization.yaml | 5 + 29 files changed, 1817 insertions(+) create mode 100644 config/argo/clusterrole.argo-aggregate-to-admin.yaml create mode 100644 config/argo/clusterrole.argo-aggregate-to-edit.yaml create mode 100644 config/argo/clusterrole.argo-aggregate-to-view.yaml create mode 100644 config/argo/clusterrole.argo-cluster-role.yaml create mode 100644 config/argo/clusterrole.argo-server-cluster-role.yaml create mode 100644 config/argo/clusterrolebinding.argo-binding.yaml create mode 100644 config/argo/clusterrolebinding.argo-server-binding.yaml create mode 100644 config/argo/configmap.workflow-controller-configmap.yaml create mode 100644 config/argo/crd.applications.yaml create mode 100644 config/argo/crd.clusterworkflowtemplates.yaml create mode 100644 config/argo/crd.cronworkflows.yaml create mode 100644 config/argo/crd.scheduledworkflows.yaml create mode 100644 config/argo/crd.viewers.yaml create mode 100644 config/argo/crd.workfloweventbinding.yaml create mode 100644 config/argo/crd.workflows.yaml create mode 100644 config/argo/crd.workflowtaskresult.yaml create mode 100644 config/argo/crd.workflowtaskset.yaml create mode 100644 config/argo/crd.workflowtemplate.yaml create mode 100644 config/argo/deployment.argo-server.yaml create mode 100644 config/argo/deployment.workflow-controller.yaml create mode 100644 config/argo/kustomization.yaml create mode 100644 config/argo/priorityclass.yaml create mode 100644 config/argo/role.argo.yaml create mode 100644 config/argo/rolebinding.argo-binding.yaml create mode 100644 config/argo/service.argo-server.yaml create mode 100644 config/argo/serviceaccount.argo-server.yaml create mode 100644 config/argo/serviceaccount.argo.yaml create mode 100644 config/overlays/make-argodeploy/kustomization.yaml diff --git a/Makefile b/Makefile index c1497854a..4dcc3f02b 100644 --- a/Makefile +++ b/Makefile @@ -54,6 +54,8 @@ ENVTEST_K8S_VERSION = 1.25.0 OPERATOR_NS ?= odh-applications # Namespace to deploy v2 infrastructure V2INFRA_NS ?= openshift-pipelines +# Namespace to deploy argo infrastructure +ARGO_NS ?= argo # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -173,6 +175,19 @@ v2undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/con && $(KUSTOMIZE) edit set namespace ${V2INFRA_NS} $(KUSTOMIZE) build config/overlays/make-v2deploy | kubectl delete --ignore-not-found=$(ignore-not-found) -f - +.PHONY: argodeploy +argodeploy: manifests kustomize + cd config/overlays/make-argodeploy \ + && $(KUSTOMIZE) edit set namespace ${ARGO_NS} + $(KUSTOMIZE) build config/overlays/make-argodeploy | kubectl apply -f - + +.PHONY: argoundeploy +argoundeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. + cd config/overlays/make-argodeploy \ + && $(KUSTOMIZE) edit set namespace ${ARGO_NS} + $(KUSTOMIZE) build config/overlays/make-argodeploy | kubectl delete --ignore-not-found=$(ignore-not-found) -f - + + ##@ Build Dependencies ## Location to install dependencies to diff --git a/config/argo/clusterrole.argo-aggregate-to-admin.yaml b/config/argo/clusterrole.argo-aggregate-to-admin.yaml new file mode 100644 index 000000000..f978dca0c --- /dev/null +++ b/config/argo/clusterrole.argo-aggregate-to-admin.yaml @@ -0,0 +1,34 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-admin: "true" + name: argo-aggregate-to-admin +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/config/argo/clusterrole.argo-aggregate-to-edit.yaml b/config/argo/clusterrole.argo-aggregate-to-edit.yaml new file mode 100644 index 000000000..4797d0a1b --- /dev/null +++ b/config/argo/clusterrole.argo-aggregate-to-edit.yaml @@ -0,0 +1,32 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-edit: "true" + name: argo-aggregate-to-edit +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch \ No newline at end of file diff --git a/config/argo/clusterrole.argo-aggregate-to-view.yaml b/config/argo/clusterrole.argo-aggregate-to-view.yaml new file mode 100644 index 000000000..318097cf0 --- /dev/null +++ b/config/argo/clusterrole.argo-aggregate-to-view.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: argo-aggregate-to-view +rules: +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workfloweventbindings + - workfloweventbindings/finalizers + - workflowtemplates + - workflowtemplates/finalizers + - cronworkflows + - cronworkflows/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + - workflowtaskresults + - workflowtaskresults/finalizers + verbs: + - get + - list + - watch \ No newline at end of file diff --git a/config/argo/clusterrole.argo-cluster-role.yaml b/config/argo/clusterrole.argo-cluster-role.yaml new file mode 100644 index 000000000..8e7410107 --- /dev/null +++ b/config/argo/clusterrole.argo-cluster-role.yaml @@ -0,0 +1,106 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-cluster-role +rules: +- apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/finalizers + verbs: + - create + - update + - delete + - get +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + - workflowartifactgctasks + verbs: + - get + - list + - watch + - update + - patch + - delete + - create +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + - clusterworkflowtemplates + - clusterworkflowtemplates/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - list + - watch + - deletecollection +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - argoproj.io + resources: + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - get + - delete \ No newline at end of file diff --git a/config/argo/clusterrole.argo-server-cluster-role.yaml b/config/argo/clusterrole.argo-server-cluster-role.yaml new file mode 100644 index 000000000..699c36c41 --- /dev/null +++ b/config/argo/clusterrole.argo-server-cluster-role.yaml @@ -0,0 +1,66 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: argo-server-cluster-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +- apiGroups: + - "" + resources: + - pods + - pods/exec + - pods/log + verbs: + - get + - list + - watch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - watch + - create + - patch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list + - watch +- apiGroups: + - argoproj.io + resources: + - eventsources + - sensors + - workflows + - workfloweventbindings + - workflowtemplates + - cronworkflows + - clusterworkflowtemplates + verbs: + - create + - get + - list + - watch + - update + - patch + - delete \ No newline at end of file diff --git a/config/argo/clusterrolebinding.argo-binding.yaml b/config/argo/clusterrolebinding.argo-binding.yaml new file mode 100644 index 000000000..05a9369b2 --- /dev/null +++ b/config/argo/clusterrolebinding.argo-binding.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-cluster-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo \ No newline at end of file diff --git a/config/argo/clusterrolebinding.argo-server-binding.yaml b/config/argo/clusterrolebinding.argo-server-binding.yaml new file mode 100644 index 000000000..81f664337 --- /dev/null +++ b/config/argo/clusterrolebinding.argo-server-binding.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: argo-server-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: argo-server-cluster-role +subjects: +- kind: ServiceAccount + name: argo-server + namespace: argo \ No newline at end of file diff --git a/config/argo/configmap.workflow-controller-configmap.yaml b/config/argo/configmap.workflow-controller-configmap.yaml new file mode 100644 index 000000000..86379fcbe --- /dev/null +++ b/config/argo/configmap.workflow-controller-configmap.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: workflow-controller-configmap + namespace: argo \ No newline at end of file diff --git a/config/argo/crd.applications.yaml b/config/argo/crd.applications.yaml new file mode 100644 index 000000000..b2a459f03 --- /dev/null +++ b/config/argo/crd.applications.yaml @@ -0,0 +1,531 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.kubernetes.io: https://github.com/kubernetes-sigs/application/pull/2 + controller-gen.kubebuilder.io/version: v0.4.0 + creationTimestamp: null + labels: + controller-tools.k8s.io: "1.0" + name: applications.app.k8s.io +spec: + group: app.k8s.io + names: + categories: + - all + kind: Application + listKind: ApplicationList + plural: applications + shortNames: + - app + singular: application + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: The type of the application + jsonPath: .spec.descriptor.type + name: Type + type: string + - description: The creation date + jsonPath: .spec.descriptor.version + name: Version + type: string + - description: The application object owns the matched resources + jsonPath: .spec.addOwnerRef + name: Owner + type: boolean + - description: Numbers of components ready + jsonPath: .status.componentsReady + name: Ready + type: string + - description: The creation date + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: Application is the Schema for the applications API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ApplicationSpec defines the specification for an Application. + properties: + addOwnerRef: + description: AddOwnerRef objects - flag to indicate if we need to + add OwnerRefs to matching objects Matching is done by using Selector + to query all ComponentGroupKinds + type: boolean + assemblyPhase: + description: AssemblyPhase represents the current phase of the application's + assembly. An empty value is equivalent to "Succeeded". + type: string + componentKinds: + description: ComponentGroupKinds is a list of Kinds for Application's + components (e.g. Deployments, Pods, Services, CRDs). It can be used + in conjunction with the Application's Selector to list or watch + the Applications components. + items: + description: GroupKind specifies a Group and a Kind, but does not + force a version. This is useful for identifying concepts during + lookup stages without having partially valid types + properties: + group: + type: string + kind: + type: string + required: + - group + - kind + type: object + type: array + descriptor: + description: Descriptor regroups information and metadata about an + application. + properties: + description: + description: Description is a brief string description of the + Application. + type: string + icons: + description: Icons is an optional list of icons for an application. + Icon information includes the source, size, and mime type. + items: + description: ImageSpec contains information about an image used + as an icon. + properties: + size: + description: (optional) The size of the image in pixels + (e.g., 25x25). + type: string + src: + description: The source for image represented as either + an absolute URL to the image or a Data URL containing + the image. Data URLs are defined in RFC 2397. + type: string + type: + description: (optional) The mine type of the image (e.g., + "image/png"). + type: string + required: + - src + type: object + type: array + keywords: + description: Keywords is an optional list of key words associated + with the application (e.g. MySQL, RDBMS, database). + items: + type: string + type: array + links: + description: Links are a list of descriptive URLs intended to + be used to surface additional documentation, dashboards, etc. + items: + description: Link contains information about an URL to surface + documentation, dashboards, etc. + properties: + description: + description: Description is human readable content explaining + the purpose of the link. + type: string + url: + description: Url typically points at a website address. + type: string + type: object + type: array + maintainers: + description: Maintainers is an optional list of maintainers of + the application. The maintainers in this list maintain the the + source code, images, and package for the application. + items: + description: ContactData contains information about an individual + or organization. + properties: + email: + description: Email is the email address. + type: string + name: + description: Name is the descriptive name. + type: string + url: + description: Url could typically be a website address. + type: string + type: object + type: array + notes: + description: Notes contain a human readable snippets intended + as a quick start for the users of the Application. CommonMark + markdown syntax may be used for rich text representation. + type: string + owners: + description: Owners is an optional list of the owners of the installed + application. The owners of the application should be contacted + in the event of a planned or unplanned disruption affecting + the application. + items: + description: ContactData contains information about an individual + or organization. + properties: + email: + description: Email is the email address. + type: string + name: + description: Name is the descriptive name. + type: string + url: + description: Url could typically be a website address. + type: string + type: object + type: array + type: + description: Type is the type of the application (e.g. WordPress, + MySQL, Cassandra). + type: string + version: + description: Version is an optional version indicator for the + Application. + type: string + type: object + info: + description: Info contains human readable key,value pairs for the + Application. + items: + description: InfoItem is a human readable key,value pair containing + important information about how to access the Application. + properties: + name: + description: Name is a human readable title for this piece of + information. + type: string + type: + description: Type of the value for this InfoItem. + type: string + value: + description: Value is human readable content. + type: string + valueFrom: + description: ValueFrom defines a reference to derive the value + from another source. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a + valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container + that triggered the event) or if no container name + is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to + have some well-defined way of referencing a part of + an object. TODO: this design is not final and this + field is subject to change in the future.' + type: string + key: + description: The key to select. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this + reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + ingressRef: + description: Select an Ingress. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a + valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container + that triggered the event) or if no container name + is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to + have some well-defined way of referencing a part of + an object. TODO: this design is not final and this + field is subject to change in the future.' + type: string + host: + description: The optional host to select. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + path: + description: The optional HTTP path. + type: string + protocol: + description: Protocol for the ingress + type: string + resourceVersion: + description: 'Specific resourceVersion to which this + reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + secretKeyRef: + description: Selects a key of a Secret. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a + valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container + that triggered the event) or if no container name + is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to + have some well-defined way of referencing a part of + an object. TODO: this design is not final and this + field is subject to change in the future.' + type: string + key: + description: The key to select. + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this + reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + serviceRef: + description: Select a Service. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead + of an entire object, this string should contain a + valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container + within a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container + that triggered the event) or if no container name + is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to + have some well-defined way of referencing a part of + an object. TODO: this design is not final and this + field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + path: + description: The optional HTTP path. + type: string + port: + description: The optional port to select. + format: int32 + type: integer + protocol: + description: Protocol for the service + type: string + resourceVersion: + description: 'Specific resourceVersion to which this + reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + type: + description: Type of source. + type: string + type: object + type: object + type: array + selector: + description: 'Selector is a label query over kinds that created by + the application. It must match the component objects'' labels. More + info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + type: object + status: + description: ApplicationStatus defines controller's the observed state + of Application + properties: + components: + description: Object status array for all matching objects + items: + description: ObjectStatus is a generic status holder for objects + properties: + group: + description: Object group + type: string + kind: + description: Kind of object + type: string + link: + description: Link to object + type: string + name: + description: Name of object + type: string + status: + description: 'Status. Values: InProgress, Ready, Unknown' + type: string + type: object + type: array + componentsReady: + description: 'ComponentsReady: status of the components in the format + ready/total' + type: string + conditions: + description: Conditions represents the latest state of the object + items: + description: Condition describes the state of an object at a certain + point. + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another. + format: date-time + type: string + lastUpdateTime: + description: Last time the condition was probed + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + observedGeneration: + description: ObservedGeneration is the most recent generation observed. + It corresponds to the Object's generation, which is updated on mutation + by the API Server. + format: int64 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/config/argo/crd.clusterworkflowtemplates.yaml b/config/argo/crd.clusterworkflowtemplates.yaml new file mode 100644 index 000000000..cce533e63 --- /dev/null +++ b/config/argo/crd.clusterworkflowtemplates.yaml @@ -0,0 +1,38 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + internal.kpt.dev/upstream-identifier: apiextensions.k8s.io|CustomResourceDefinition|default|clusterworkflowtemplates.argoproj.io + name: clusterworkflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: ClusterWorkflowTemplate + listKind: ClusterWorkflowTemplateList + plural: clusterworkflowtemplates + shortNames: + - clusterwftmpl + - cwft + singular: clusterworkflowtemplate + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true diff --git a/config/argo/crd.cronworkflows.yaml b/config/argo/crd.cronworkflows.yaml new file mode 100644 index 000000000..7c2b6dc2d --- /dev/null +++ b/config/argo/crd.cronworkflows.yaml @@ -0,0 +1,42 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + internal.kpt.dev/upstream-identifier: apiextensions.k8s.io|CustomResourceDefinition|default|cronworkflows.argoproj.io + name: cronworkflows.argoproj.io +spec: + group: argoproj.io + names: + kind: CronWorkflow + listKind: CronWorkflowList + plural: cronworkflows + shortNames: + - cwf + - cronwf + singular: cronworkflow + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true diff --git a/config/argo/crd.scheduledworkflows.yaml b/config/argo/crd.scheduledworkflows.yaml new file mode 100644 index 000000000..9bac8cef0 --- /dev/null +++ b/config/argo/crd.scheduledworkflows.yaml @@ -0,0 +1,41 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + kubeflow/crd-install: "true" + name: scheduledworkflows.kubeflow.org +spec: + group: kubeflow.org + names: + kind: ScheduledWorkflow + listKind: ScheduledWorkflowList + plural: scheduledworkflows + shortNames: + - swf + singular: scheduledworkflow + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - spec + - status + type: object + served: true + storage: true diff --git a/config/argo/crd.viewers.yaml b/config/argo/crd.viewers.yaml new file mode 100644 index 000000000..2e58965a1 --- /dev/null +++ b/config/argo/crd.viewers.yaml @@ -0,0 +1,36 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + labels: + kubeflow/crd-install: "true" + name: viewers.kubeflow.org +spec: + group: kubeflow.org + names: + kind: Viewer + listKind: ViewerList + plural: viewers + shortNames: + - vi + singular: viewer + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - spec + type: object + served: true + storage: true diff --git a/config/argo/crd.workfloweventbinding.yaml b/config/argo/crd.workfloweventbinding.yaml new file mode 100644 index 000000000..d73961473 --- /dev/null +++ b/config/argo/crd.workfloweventbinding.yaml @@ -0,0 +1,37 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + internal.kpt.dev/upstream-identifier: apiextensions.k8s.io|CustomResourceDefinition|default|workfloweventbindings.argoproj.io + name: workfloweventbindings.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowEventBinding + listKind: WorkflowEventBindingList + plural: workfloweventbindings + shortNames: + - wfeb + singular: workfloweventbinding + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true diff --git a/config/argo/crd.workflows.yaml b/config/argo/crd.workflows.yaml new file mode 100644 index 000000000..c955e45d4 --- /dev/null +++ b/config/argo/crd.workflows.yaml @@ -0,0 +1,52 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + internal.kpt.dev/upstream-identifier: apiextensions.k8s.io|CustomResourceDefinition|default|workflows.argoproj.io + name: workflows.argoproj.io +spec: + group: argoproj.io + names: + kind: Workflow + listKind: WorkflowList + plural: workflows + shortNames: + - wf + singular: workflow + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of the workflow + jsonPath: .status.phase + name: Status + type: string + - description: When the workflow was started + format: date-time + jsonPath: .status.startedAt + name: Age + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: {} diff --git a/config/argo/crd.workflowtaskresult.yaml b/config/argo/crd.workflowtaskresult.yaml new file mode 100644 index 000000000..8ca13dc1f --- /dev/null +++ b/config/argo/crd.workflowtaskresult.yaml @@ -0,0 +1,427 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + internal.kpt.dev/upstream-identifier: apiextensions.k8s.io|CustomResourceDefinition|default|workflowtaskresults.argoproj.io + name: workflowtaskresults.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTaskResult + listKind: WorkflowTaskResultList + plural: workflowtaskresults + singular: workflowtaskresult + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + message: + type: string + metadata: + type: object + outputs: + properties: + artifacts: + items: + properties: + archive: + properties: + none: + type: object + tar: + properties: + compressionLevel: + format: int32 + type: integer + type: object + zip: + type: object + type: object + archiveLogs: + type: boolean + artifactory: + properties: + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + url: + type: string + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - url + type: object + from: + type: string + fromExpression: + type: string + gcs: + properties: + bucket: + type: string + key: + type: string + serviceAccountKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - key + type: object + git: + properties: + depth: + format: int64 + type: integer + disableSubmodules: + type: boolean + fetch: + items: + type: string + type: array + insecureIgnoreHostKey: + type: boolean + passwordSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + repo: + type: string + revision: + type: string + sshPrivateKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + usernameSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + required: + - repo + type: object + globalName: + type: string + hdfs: + properties: + addresses: + items: + type: string + type: array + force: + type: boolean + hdfsUser: + type: string + krbCCacheSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + krbConfigConfigMap: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + krbKeytabSecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + krbRealm: + type: string + krbServicePrincipalName: + type: string + krbUsername: + type: string + path: + type: string + required: + - path + type: object + http: + properties: + headers: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + url: + type: string + required: + - url + type: object + mode: + format: int32 + type: integer + name: + type: string + optional: + type: boolean + oss: + properties: + accessKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + bucket: + type: string + createBucketIfNotPresent: + type: boolean + endpoint: + type: string + key: + type: string + lifecycleRule: + properties: + markDeletionAfterDays: + format: int32 + type: integer + markInfrequentAccessAfterDays: + format: int32 + type: integer + type: object + secretKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + securityToken: + type: string + required: + - key + type: object + path: + type: string + raw: + properties: + data: + type: string + required: + - data + type: object + recurseMode: + type: boolean + s3: + properties: + accessKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + bucket: + type: string + createBucketIfNotPresent: + properties: + objectLocking: + type: boolean + type: object + encryptionOptions: + properties: + enableEncryption: + type: boolean + kmsEncryptionContext: + type: string + kmsKeyId: + type: string + serverSideCustomerKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + type: object + endpoint: + type: string + insecure: + type: boolean + key: + type: string + region: + type: string + roleARN: + type: string + secretKeySecret: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + useSDKCreds: + type: boolean + type: object + subPath: + type: string + required: + - name + type: object + type: array + exitCode: + type: string + parameters: + items: + properties: + default: + type: string + description: + type: string + enum: + items: + type: string + type: array + globalName: + type: string + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + type: string + optional: + type: boolean + required: + - key + type: object + default: + type: string + event: + type: string + expression: + type: string + jqFilter: + type: string + jsonPath: + type: string + parameter: + type: string + path: + type: string + supplied: + type: object + type: object + required: + - name + type: object + type: array + result: + type: string + type: object + phase: + type: string + progress: + type: string + required: + - metadata + type: object + served: true + storage: true diff --git a/config/argo/crd.workflowtaskset.yaml b/config/argo/crd.workflowtaskset.yaml new file mode 100644 index 000000000..4d69da345 --- /dev/null +++ b/config/argo/crd.workflowtaskset.yaml @@ -0,0 +1,43 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + internal.kpt.dev/upstream-identifier: apiextensions.k8s.io|CustomResourceDefinition|default|workflowtasksets.argoproj.io + name: workflowtasksets.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTaskSet + listKind: WorkflowTaskSetList + plural: workflowtasksets + shortNames: + - wfts + singular: workflowtaskset + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + status: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/argo/crd.workflowtemplate.yaml b/config/argo/crd.workflowtemplate.yaml new file mode 100644 index 000000000..0e60798a8 --- /dev/null +++ b/config/argo/crd.workflowtemplate.yaml @@ -0,0 +1,37 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + internal.kpt.dev/upstream-identifier: apiextensions.k8s.io|CustomResourceDefinition|default|workflowtemplates.argoproj.io + name: workflowtemplates.argoproj.io +spec: + group: argoproj.io + names: + kind: WorkflowTemplate + listKind: WorkflowTemplateList + plural: workflowtemplates + shortNames: + - wftmpl + singular: workflowtemplate + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + properties: + apiVersion: + type: string + kind: + type: string + metadata: + type: object + spec: + type: object + x-kubernetes-map-type: atomic + x-kubernetes-preserve-unknown-fields: true + required: + - metadata + - spec + type: object + served: true + storage: true diff --git a/config/argo/deployment.argo-server.yaml b/config/argo/deployment.argo-server.yaml new file mode 100644 index 000000000..b12e7c88b --- /dev/null +++ b/config/argo/deployment.argo-server.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: argo-server + namespace: argo +spec: + selector: + matchLabels: + app: argo-server + template: + metadata: + labels: + app: argo-server + spec: + containers: + - args: + - server + env: [] + image: quay.io/argoproj/argocli:v3.4.12 + name: argo-server + ports: + - containerPort: 2746 + name: web + readinessProbe: + httpGet: + path: / + port: 2746 + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 20 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + volumeMounts: + - mountPath: /tmp + name: tmp + nodeSelector: + kubernetes.io/os: linux + securityContext: + runAsNonRoot: true + serviceAccountName: argo-server + volumes: + - emptyDir: {} + name: tmp \ No newline at end of file diff --git a/config/argo/deployment.workflow-controller.yaml b/config/argo/deployment.workflow-controller.yaml new file mode 100644 index 000000000..1d5a056bf --- /dev/null +++ b/config/argo/deployment.workflow-controller.yaml @@ -0,0 +1,58 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: workflow-controller + namespace: argo +spec: + selector: + matchLabels: + app: workflow-controller + template: + metadata: + labels: + app: workflow-controller + spec: + containers: + - args: + - --configmap + - workflow-controller-configmap + - --executor-image + - gcr.io/ml-pipeline/argoexec:v3.3.10-license-compliance + - --namespaced + command: + - workflow-controller + env: + - name: LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + # image: quay.io/argoproj/workflow-controller:v3.4.12 + image: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6060 + initialDelaySeconds: 90 + periodSeconds: 60 + timeoutSeconds: 30 + name: workflow-controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 6060 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: workflow-controller + securityContext: + runAsNonRoot: true + serviceAccountName: argo \ No newline at end of file diff --git a/config/argo/kustomization.yaml b/config/argo/kustomization.yaml new file mode 100644 index 000000000..6b09a4e74 --- /dev/null +++ b/config/argo/kustomization.yaml @@ -0,0 +1,39 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: argo +resources: +# Deploy Argo Controller and Server +# TODO: Only deploy server (not WC?) +# - https://github.com/argoproj/argo-workflows/releases/download/v3.4.12/install.yaml + +# The following manifests are used in ALL Executors +- clusterrole.argo-aggregate-to-admin.yaml +- clusterrole.argo-aggregate-to-edit.yaml +- clusterrole.argo-aggregate-to-view.yaml +- clusterrole.argo-cluster-role.yaml +- clusterrole.argo-server-cluster-role.yaml +- clusterrolebinding.argo-binding.yaml +- clusterrolebinding.argo-server-binding.yaml +- configmap.workflow-controller-configmap.yaml +- deployment.argo-server.yaml +# - deployment.workflow-controller.yaml +- priorityclass.yaml +- role.argo.yaml +- rolebinding.argo-binding.yaml +- service.argo-server.yaml +- serviceaccount.argo-server.yaml +- serviceaccount.argo.yaml + +# CRDs only needed for PNS executors +- crd.applications.yaml +- crd.clusterworkflowtemplates.yaml +- crd.cronworkflows.yaml +- crd.scheduledworkflows.yaml +- crd.viewers.yaml +- crd.workfloweventbinding.yaml +- crd.workflows.yaml +- crd.workflowtaskresult.yaml +- crd.workflowtaskset.yaml +- crd.workflowtemplate.yaml + + diff --git a/config/argo/priorityclass.yaml b/config/argo/priorityclass.yaml new file mode 100644 index 000000000..e792f398d --- /dev/null +++ b/config/argo/priorityclass.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + annotations: + internal.kpt.dev/upstream-identifier: scheduling.k8s.io|PriorityClass|default|workflow-controller + labels: + application-crd-id: kubeflow-pipelines + name: workflow-controller +value: 1000000 diff --git a/config/argo/role.argo.yaml b/config/argo/role.argo.yaml new file mode 100644 index 000000000..d58c221b6 --- /dev/null +++ b/config/argo/role.argo.yaml @@ -0,0 +1,21 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-role + namespace: argo +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - secrets + verbs: + - get \ No newline at end of file diff --git a/config/argo/rolebinding.argo-binding.yaml b/config/argo/rolebinding.argo-binding.yaml new file mode 100644 index 000000000..fe64852d9 --- /dev/null +++ b/config/argo/rolebinding.argo-binding.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: argo-binding + namespace: argo +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: argo-role +subjects: +- kind: ServiceAccount + name: argo + namespace: argo \ No newline at end of file diff --git a/config/argo/service.argo-server.yaml b/config/argo/service.argo-server.yaml new file mode 100644 index 000000000..b9affd5da --- /dev/null +++ b/config/argo/service.argo-server.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: argo-server + namespace: argo +spec: + ports: + - name: web + port: 2746 + targetPort: 2746 + selector: + app: argo-server \ No newline at end of file diff --git a/config/argo/serviceaccount.argo-server.yaml b/config/argo/serviceaccount.argo-server.yaml new file mode 100644 index 000000000..27556a207 --- /dev/null +++ b/config/argo/serviceaccount.argo-server.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo-server + namespace: argo \ No newline at end of file diff --git a/config/argo/serviceaccount.argo.yaml b/config/argo/serviceaccount.argo.yaml new file mode 100644 index 000000000..2de7bc6d0 --- /dev/null +++ b/config/argo/serviceaccount.argo.yaml @@ -0,0 +1,6 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: argo + namespace: argo \ No newline at end of file diff --git a/config/overlays/make-argodeploy/kustomization.yaml b/config/overlays/make-argodeploy/kustomization.yaml new file mode 100644 index 000000000..526685e3c --- /dev/null +++ b/config/overlays/make-argodeploy/kustomization.yaml @@ -0,0 +1,5 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: argo +resources: +- ../../argo From f2e9e99a9f45f1a08bb59acb0934ba0ceda8abb8 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Mon, 23 Oct 2023 18:03:40 -0400 Subject: [PATCH 48/85] Add WorkflowController DSPA Item --- api/v1alpha1/dspipeline_types.go | 10 ++ api/v1alpha1/zz_generated.deepcopy.go | 20 +++ config/base/kustomization.yaml | 7 + config/base/params.env | 1 + config/configmaps/files/config.yaml | 1 + ...b.io_datasciencepipelinesapplications.yaml | 11 ++ .../workflow-controller/configmap.yaml.tmpl | 42 ++++++ .../workflow-controller/deployment.yaml.tmpl | 71 ++++++++++ .../workflow-controller/role.yaml.tmpl | 131 ++++++++++++++++++ .../workflow-controller/rolebinding.yaml.tmpl | 20 +++ .../internal/workflow-controller/sa.yaml.tmpl | 12 ++ .../workflow-controller/service.yaml.tmpl | 26 ++++ config/manager/manager.yaml | 2 + controllers/dspipeline_controller.go | 5 + controllers/dspipeline_params.go | 1 + controllers/workflow_controller.go | 44 ++++++ controllers/workflow_controller_test.go | 121 ++++++++++++++++ 17 files changed, 525 insertions(+) create mode 100644 config/internal/workflow-controller/configmap.yaml.tmpl create mode 100644 config/internal/workflow-controller/deployment.yaml.tmpl create mode 100644 config/internal/workflow-controller/role.yaml.tmpl create mode 100644 config/internal/workflow-controller/rolebinding.yaml.tmpl create mode 100644 config/internal/workflow-controller/sa.yaml.tmpl create mode 100644 config/internal/workflow-controller/service.yaml.tmpl create mode 100644 controllers/workflow_controller.go create mode 100644 controllers/workflow_controller_test.go diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index c4c6d7ac4..c162df06f 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -51,6 +51,9 @@ type DSPASpec struct { // +kubebuilder:validation:Optional // +kubebuilder:default:="v1" DSPVersion string `json:"dspVersion,omitempty"` + // DS Pipelines Argo Workflow Controller Configuration. + // +kubebuilder:default:={deploy: false} + *WorkflowController `json:"workflowController,omitempty"` } type APIServer struct { @@ -293,6 +296,13 @@ type VisualizationServer struct { Image string `json:"image,omitempty"` } +type WorkflowController struct { + // +kubebuilder:default:=true + // +kubebuilder:validation:Optional + Deploy bool `json:"deploy"` + Image string `json:"image,omitempty"` +} + // ResourceRequirements structures compute resource requirements. // Replaces ResourceRequirements from corev1 which also includes optional storage field. // We handle storage field separately, and should not include it as a subfield for Resources. diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 5239c76f2..db7974ddf 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -149,6 +149,11 @@ func (in *DSPASpec) DeepCopyInto(out *DSPASpec) { *out = new(VisualizationServer) **out = **in } + if in.WorkflowController != nil { + in, out := &in.WorkflowController, &out.WorkflowController + *out = new(WorkflowController) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DSPASpec. @@ -606,6 +611,21 @@ func (in *VisualizationServer) DeepCopy() *VisualizationServer { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkflowController) DeepCopyInto(out *WorkflowController) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkflowController. +func (in *WorkflowController) DeepCopy() *WorkflowController { + if in == nil { + return nil + } + out := new(WorkflowController) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Writer) DeepCopyInto(out *Writer) { *out = *in diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index a7c950c50..5518e7580 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -211,5 +211,12 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGESV2_MLMDWRITER + - name: IMAGESV2_WORKFLOWCONTROLLER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_WORKFLOWCONTROLLER configurations: - params.yaml diff --git a/config/base/params.env b/config/base/params.env index 01485c299..3350afc9a 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -21,6 +21,7 @@ IMAGESV2_MLMDGRPC=gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 IMAGESV2_MLMDWRITER=gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 IMAGESV2_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.7 IMAGESV2_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.7 +IMAGESV2_WORKFLOWCONTROLLER=gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance ZAP_LOG_LEVEL=info MAX_CONCURRENT_RECONCILES=10 DSPO_HEALTHCHECK_DATABASE_CONNECTIONTIMEOUT=15s diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index 13775132b..230bc0c0d 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -22,6 +22,7 @@ ImagesV2: MlmdEnvoy: $(IMAGESV2_MLMDENVOY) MlmdGRPC: $(IMAGESV2_MLMDGRPC) MlmdWriter: $(IMAGESV2_MLMDWRITER) + WorkflowController: $(IMAGESV2_WORKFLOWCONTROLLER) DSPO: HealthCheck: Database: diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 4972a5c9c..66171d9d0 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -760,6 +760,17 @@ spec: image: type: string type: object + workflowController: + default: + deploy: false + description: DS Pipelines Argo Workflow Controller Configuration. + properties: + deploy: + default: true + type: boolean + image: + type: string + type: object required: - objectStorage type: object diff --git a/config/internal/workflow-controller/configmap.yaml.tmpl b/config/internal/workflow-controller/configmap.yaml.tmpl new file mode 100644 index 000000000..45fdb2d70 --- /dev/null +++ b/config/internal/workflow-controller/configmap.yaml.tmpl @@ -0,0 +1,42 @@ +--- +apiVersion: v1 +kind: ConfigMap +metadata: + annotations: + internal.kpt.dev/upstream-identifier: '|ConfigMap|default|workflow-controller-configmap' + labels: + app: ds-pipeline-workflow-controller-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + name: ds-pipeline-workflow-controller-{{.Name}} + namespace: {{.Namespace}} +data: + artifactRepository: | + archiveLogs: {{.APIServer.ArchiveLogs}} + s3: + endpoint: "{{.ObjectStorageConnection.Endpoint}}" + bucket: "{{.ObjectStorageConnection.Bucket}}" + # keyFormat is a format pattern to define how artifacts will be organized in a bucket. + # It can reference workflow metadata variables such as workflow.namespace, workflow.name, + # pod.name. Can also use strftime formating of workflow.creationTimestamp so that workflow + # artifacts can be organized by date. If omitted, will use `\{\{workflow.name\}\}/\{\{pod.name\}\}`, + # which has potential for have collisions, because names do not guarantee they are unique + # over the lifetime of the cluster. + # Refer to https://kubernetes.io/docs/concepts/overview/working-with-objects/names/. + # + # The following format looks like: + # artifacts/my-workflow-abc123/2018/08/23/my-workflow-abc123-1234567890 + # Adding date into the path greatly reduces the chance of \{\{pod.name\}\} collision. + # keyFormat: "artifacts/\{\{workflow.name\}\}/\{\{workflow.creationTimestamp.Y\}\}/\{\{workflow.creationTimestamp.m\}\}/\{\{workflow.creationTimestamp.d\}\}/\{\{pod.name\}\}" # TODO + # insecure will disable TLS. Primarily used for minio installs not configured with TLS + insecure: {{.ObjectStorageConnection.Secure}} + accessKeySecret: + name: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" + key: "{{.ObjectStorageConnection.CredentialsSecret.AccessKey}}" + secretKeySecret: + name: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" + key: "{{.ObjectStorageConnection.CredentialsSecret.SecretKey}}" + containerRuntimeExecutor: emissary # TODO + executor: | + imagePullPolicy: IfNotPresent # TODO + diff --git a/config/internal/workflow-controller/deployment.yaml.tmpl b/config/internal/workflow-controller/deployment.yaml.tmpl new file mode 100644 index 000000000..b921e46ac --- /dev/null +++ b/config/internal/workflow-controller/deployment.yaml.tmpl @@ -0,0 +1,71 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + internal.kpt.dev/upstream-identifier: apps|Deployment|default|workflow-controller + labels: + app: ds-pipeline-workflow-controller-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + name: ds-pipeline-workflow-controller-{{.Name}} + namespace: {{.Namespace}} +spec: + selector: + matchLabels: + app: ds-pipeline-workflow-controller-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + template: + metadata: + labels: + app: ds-pipeline-workflow-controller-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + spec: + containers: + - args: + - --configmap + - ds-pipeline-workflow-controller-{{.Name}} + - --executor-image + - gcr.io/ml-pipeline/argoexec:v3.3.10-license-compliance + - --namespaced + command: + - workflow-controller + env: + - name: LEADER_ELECTION_IDENTITY + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 6060 + initialDelaySeconds: 90 + periodSeconds: 60 + timeoutSeconds: 30 + name: ds-pipeline-workflow-controller + ports: + - containerPort: 9090 + name: metrics + - containerPort: 6060 + resources: + requests: + cpu: 100m + memory: 500Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + runAsNonRoot: true + nodeSelector: + kubernetes.io/os: linux + priorityClassName: ds-pipelines-workflow-controller-priorityclass + securityContext: + runAsNonRoot: true + serviceAccountName: ds-pipeline-workflow-controller-{{.Name}} diff --git a/config/internal/workflow-controller/role.yaml.tmpl b/config/internal/workflow-controller/role.yaml.tmpl new file mode 100644 index 000000000..42b944ac8 --- /dev/null +++ b/config/internal/workflow-controller/role.yaml.tmpl @@ -0,0 +1,131 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + internal.kpt.dev/upstream-identifier: rbac.authorization.k8s.io|Role|default|argo-role + labels: + app: ds-pipeline-workflow-controller-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + name: ds-pipeline-workflow-controller-role-{{.Name}} + namespace: {{.Namespace}} +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/finalizers + verbs: + - create + - update + - delete + - get +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete + - create +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - list + - watch + - deletecollection +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +- apiGroups: + - argoproj.io + resources: + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - get + - delete diff --git a/config/internal/workflow-controller/rolebinding.yaml.tmpl b/config/internal/workflow-controller/rolebinding.yaml.tmpl new file mode 100644 index 000000000..dbafe5730 --- /dev/null +++ b/config/internal/workflow-controller/rolebinding.yaml.tmpl @@ -0,0 +1,20 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + annotations: + internal.kpt.dev/upstream-identifier: rbac.authorization.k8s.io|RoleBinding|default|argo-binding + labels: + app: ds-pipeline-workflow-controller-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + name: ds-pipeline-workflow-controller-rolebinding-{{.Name}} + namespace: {{.Namespace}} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ds-pipeline-workflow-controller-role-{{.Name}} +subjects: +- kind: ServiceAccount + name: ds-pipeline-workflow-controller-{{.Name}} + namespace: {{.Namespace}} diff --git a/config/internal/workflow-controller/sa.yaml.tmpl b/config/internal/workflow-controller/sa.yaml.tmpl new file mode 100644 index 000000000..4ec448294 --- /dev/null +++ b/config/internal/workflow-controller/sa.yaml.tmpl @@ -0,0 +1,12 @@ +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + annotations: + internal.kpt.dev/upstream-identifier: '|ServiceAccount|default|argo' + labels: + app: ds-pipeline-workflow-controller-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + name: ds-pipeline-workflow-controller-{{.Name}} + namespace: {{.Namespace}} diff --git a/config/internal/workflow-controller/service.yaml.tmpl b/config/internal/workflow-controller/service.yaml.tmpl new file mode 100644 index 000000000..17ef787c0 --- /dev/null +++ b/config/internal/workflow-controller/service.yaml.tmpl @@ -0,0 +1,26 @@ +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + internal.kpt.dev/upstream-identifier: '|Service|default|workflow-controller-metrics' + workflows.argoproj.io/description: | + This service is deprecated. It will be removed in v3.4. + + https://github.com/argoproj/argo-workflows/issues/8441 + labels: + app: ds-pipeline-workflow-controller-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} + name: ds-pipeline-workflow-controller-metrics-{{.Name}} + namespace: {{.Namespace}} +spec: + ports: + - name: metrics + port: 9090 + protocol: TCP + targetPort: 9090 + selector: + app: ds-pipeline-workflow-controller-{{.Name}} + component: data-science-pipelines + dspa: {{.Name}} diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 358588a2e..cfa8273e8 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -80,6 +80,8 @@ spec: value: $(IMAGESV2_MLMDGRPC) - name: IMAGESV2_MLMDWRITER value: $(IMAGESV2_MLMDWRITER) + - name: IMAGESV2_WORKFLOWCONTROLLER + value: $(IMAGESV2_WORKFLOWCONTROLLER) - name: ZAP_LOG_LEVEL value: $(ZAP_LOG_LEVEL) - name: MAX_CONCURRENT_RECONCILES diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index 4dea82896..f5e37c263 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -288,6 +288,11 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, err } + err = r.ReconcileWorkflowController(dspa, params) + if err != nil { + return ctrl.Result{}, err + } + } log.Info("Updating CR status") diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 79b8bfb00..15f8a8685 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -57,6 +57,7 @@ type DSPAParams struct { MLMD *dspa.MLMD CRDViewer *dspa.CRDViewer VisualizationServer *dspa.VisualizationServer + WorkflowController *dspa.WorkflowController DBConnection ObjectStorageConnection } diff --git a/controllers/workflow_controller.go b/controllers/workflow_controller.go new file mode 100644 index 000000000..eccbe0b20 --- /dev/null +++ b/controllers/workflow_controller.go @@ -0,0 +1,44 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" +) + +var workflowControllerTemplatesDir = "workflow-controller" + +func (r *DSPAReconciler) ReconcileWorkflowController(dsp *dspav1alpha1.DataSciencePipelinesApplication, + params *DSPAParams) error { + + log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) + + if !dsp.Spec.WorkflowController.Deploy { + log.Info("Skipping Application of Visualization Server Resources") + return nil + } + + log.Info("Applying Visualization Server Resources") + + err := r.ApplyDir(dsp, params, workflowControllerTemplatesDir) + if err != nil { + return err + } + + log.Info("Finished applying Visualization Server Resources") + return nil +} diff --git a/controllers/workflow_controller_test.go b/controllers/workflow_controller_test.go new file mode 100644 index 000000000..31a4df87d --- /dev/null +++ b/controllers/workflow_controller_test.go @@ -0,0 +1,121 @@ +//go:build test_all || test_unit + +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "testing" + + dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" +) + +func TestDeployWorkflowController(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedWorkflowControllerName := "ds-pipeline-workflow-controller-testdspa" + + // Construct DSPASpec with deployed Visualization Server + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + APIServer: &dspav1alpha1.APIServer{ + ArchiveLogs: true, + }, + WorkflowController: &dspav1alpha1.WorkflowController{ + Deploy: true, + }, + Database: &dspav1alpha1.Database{ + DisableHealthCheck: false, + MariaDB: &dspav1alpha1.MariaDB{ + Deploy: true, + }, + }, + ObjectStorage: &dspav1alpha1.ObjectStorage{ + DisableHealthCheck: false, + Minio: &dspav1alpha1.Minio{ + Deploy: false, + Image: "someimage", + }, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Namespace = testNamespace + dspa.Name = testDSPAName + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + err := params.ExtractParams(ctx, dspa, reconciler.Client, reconciler.Log) + assert.Nil(t, err) + + // Ensure Visualization Server Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedWorkflowControllerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileWorkflowController(dspa, params) + assert.Nil(t, err) + + // Ensure Visualization Server Deployment now exists + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedWorkflowControllerName, testNamespace) + assert.True(t, created) + assert.Nil(t, err) + +} + +func TestDontDeployWorkflowController(t *testing.T) { + testNamespace := "testnamespace" + testDSPAName := "testdspa" + expectedWorkflowControllerName := "ds-pipeline-workflow-controller-testdspa" + + // Construct DSPASpec with non-deployed Visualization Server + dspa := &dspav1alpha1.DataSciencePipelinesApplication{ + Spec: dspav1alpha1.DSPASpec{ + WorkflowController: &dspav1alpha1.WorkflowController{ + Deploy: false, + }, + }, + } + + // Enrich DSPA with name+namespace + dspa.Name = testDSPAName + dspa.Namespace = testNamespace + + // Create Context, Fake Controller and Params + ctx, params, reconciler := CreateNewTestObjects() + + // Ensure Visualization Server Deployment doesn't yet exist + deployment := &appsv1.Deployment{} + created, err := reconciler.IsResourceCreated(ctx, deployment, expectedWorkflowControllerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) + + // Run test reconciliation + err = reconciler.ReconcileWorkflowController(dspa, params) + assert.Nil(t, err) + + // Ensure Visualization Server Deployment still doesn't exist + deployment = &appsv1.Deployment{} + created, err = reconciler.IsResourceCreated(ctx, deployment, expectedWorkflowControllerName, testNamespace) + assert.False(t, created) + assert.Nil(t, err) +} From f25076315932896a520b7be65f21d00842d78394 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Thu, 26 Oct 2023 15:09:03 -0400 Subject: [PATCH 49/85] Env and Manifest adjustments for ArgoWf backend engine --- config/argo/priorityclass.yaml | 2 +- .../apiserver/default/deployment.yaml.tmpl | 33 +++++ .../default/service.ml-pipeline.yaml.tmpl | 28 ++++ .../internal/common/default/policy.yaml.tmpl | 62 +-------- .../default/service.minioservice.yaml.tmpl | 17 +++ .../metadata-grpc.configmap.yaml.tmpl | 10 ++ ...etadata-grpc.ml-pipeline.service.yaml.tmpl | 17 +++ .../persistence-agent/deployment.yaml.tmpl | 19 ++- config/rbac/argo_role.yaml | 125 ++++++++++++++++++ config/rbac/argo_role_binding.yaml | 14 ++ config/rbac/kustomization.yaml | 3 + 11 files changed, 265 insertions(+), 65 deletions(-) create mode 100644 config/internal/apiserver/default/service.ml-pipeline.yaml.tmpl create mode 100644 config/internal/minio/default/service.minioservice.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-grpc.configmap.yaml.tmpl create mode 100644 config/internal/ml-metadata/metadata-grpc.ml-pipeline.service.yaml.tmpl create mode 100644 config/rbac/argo_role.yaml create mode 100644 config/rbac/argo_role_binding.yaml diff --git a/config/argo/priorityclass.yaml b/config/argo/priorityclass.yaml index e792f398d..fd2d371f4 100644 --- a/config/argo/priorityclass.yaml +++ b/config/argo/priorityclass.yaml @@ -6,5 +6,5 @@ metadata: internal.kpt.dev/upstream-identifier: scheduling.k8s.io|PriorityClass|default|workflow-controller labels: application-crd-id: kubeflow-pipelines - name: workflow-controller + name: ds-pipelines-workflow-controller-priorityclass value: 1000000 diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index f6140b951..a8a6a8e59 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -22,8 +22,14 @@ spec: spec: containers: - env: + - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION + value: "{{.APIServer.AutoUpdatePipelineDefaultVersion}}" - name: POD_NAMESPACE value: "{{.Namespace}}" + - name: OBJECTSTORECONFIG_SECURE + value: "false" + - name: OBJECTSTORECONFIG_BUCKETNAME + value: "{{.ObjectStorageConnection.Bucket}}" - name: DBCONFIG_USER value: "{{.DBConnection.Username}}" - name: DBCONFIG_PASSWORD @@ -78,6 +84,31 @@ spec: value: "{{.APIServer.AutoUpdatePipelineDefaultVersion}}" - name: DBCONFIG_CONMAXLIFETIMESEC value: "{{.APIServer.DBConfigConMaxLifetimeSec}}" + - name: DB_DRIVER_NAME + value: mysql + - name: DBCONFIG_MYSQLCONFIG_USER + value: "{{.DBConnection.Username}}" + - name: DBCONFIG_MYSQLCONFIG_PASSWORD + valueFrom: + secretKeyRef: + key: "{{.DBConnection.CredentialsSecret.Key}}" + name: "{{.DBConnection.CredentialsSecret.Name}}" + - name: DBCONFIG_MYSQLCONFIG_DBNAME + value: "{{.DBConnection.DBName}}" + - name: DBCONFIG_MYSQLCONFIG_HOST + value: "{{.DBConnection.Host}}" + - name: DBCONFIG_MYSQLCONFIG_PORT + value: "{{.DBConnection.Port}}" + - name: OBJECTSTORECONFIG_ACCESSKEY + valueFrom: + secretKeyRef: + key: "{{.ObjectStorageConnection.CredentialsSecret.AccessKey}}" + name: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" + - name: OBJECTSTORECONFIG_SECRETACCESSKEY + valueFrom: + secretKeyRef: + key: "{{.ObjectStorageConnection.CredentialsSecret.SecretKey}}" + name: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST value: "ds-pipeline-visualizationserver" - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT @@ -122,6 +153,8 @@ spec: value: ds-pipeline-{{.Name}}.{{.Namespace}}.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" + - name: EXECUTIONTYPE + value: Workflow image: {{.APIServer.Image}} imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/config/internal/apiserver/default/service.ml-pipeline.yaml.tmpl b/config/internal/apiserver/default/service.ml-pipeline.yaml.tmpl new file mode 100644 index 000000000..919bf4cae --- /dev/null +++ b/config/internal/apiserver/default/service.ml-pipeline.yaml.tmpl @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: ml-pipeline + namespace: {{.Namespace}} + annotations: + service.alpha.openshift.io/serving-cert-secret-name: ds-pipelines-proxy-tls-{{.Name}} + labels: + app: ds-pipeline-{{.Name}} + component: data-science-pipelines +spec: + ports: + - name: oauth + port: 8443 + protocol: TCP + targetPort: oauth + - name: http + port: 8888 + protocol: TCP + targetPort: http + - name: grpc + port: 8887 + protocol: TCP + targetPort: 8887 + selector: + app: ds-pipeline-{{.Name}} + component: data-science-pipelines + diff --git a/config/internal/common/default/policy.yaml.tmpl b/config/internal/common/default/policy.yaml.tmpl index 8ec9aff82..c1bd56ffc 100644 --- a/config/internal/common/default/policy.yaml.tmpl +++ b/config/internal/common/default/policy.yaml.tmpl @@ -11,71 +11,11 @@ spec: policyTypes: - Ingress ingress: - # Match all sources for oauth endpoint - ports: - protocol: TCP port: 8443 - # We only allow DSPA components to communicate - # by bypassing oauth proxy, all external - # traffic should go through oauth proxy - - from: - - namespaceSelector: - matchLabels: - name: openshift-user-workload-monitoring - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: redhat-ods-monitoring - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: openshift-pipelines - - podSelector: - matchLabels: - app.kubernetes.io/managed-by: tekton-pipelines - pipelines.kubeflow.org/v2_component: 'true' - - podSelector: - matchLabels: - app: mariadb-{{.Name}} - component: data-science-pipelines - - podSelector: - matchLabels: - app: minio-{{.Name}} - component: data-science-pipelines - - podSelector: - matchLabels: - app: ds-pipeline-ui-{{.Name}} - component: data-science-pipelines - - podSelector: - matchLabels: - app: {{.PersistentAgentDefaultResourceName}} - component: data-science-pipelines - - podSelector: - matchLabels: - app: {{.ScheduledWorkflowDefaultResourceName}} - component: data-science-pipelines - - podSelector: - matchLabels: - app: ds-pipeline-metadata-envoy-{{.Name}} - component: data-science-pipelines - - podSelector: - matchLabels: - app: ds-pipeline-metadata-grpc-{{.Name}} - component: data-science-pipelines - - podSelector: - matchLabels: - app: ds-pipeline-metadata-writer-{{.Name}} - component: data-science-pipelines - ports: + - ports: - protocol: TCP port: 8888 - protocol: TCP port: 8887 - - ports: - - protocol: TCP - port: 8080 - from: - - podSelector: - matchLabels: - app.kubernetes.io/name: data-science-pipelines-operator-driver - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: openshift-pipelines diff --git a/config/internal/minio/default/service.minioservice.yaml.tmpl b/config/internal/minio/default/service.minioservice.yaml.tmpl new file mode 100644 index 000000000..a4b905296 --- /dev/null +++ b/config/internal/minio/default/service.minioservice.yaml.tmpl @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: minio-service + namespace: {{.Namespace}} + labels: + app: minio-{{.Name}} + component: data-science-pipelines +spec: + ports: + - name: http + port: 9000 + protocol: TCP + targetPort: 9000 + selector: + app: minio-{{.Name}} + component: data-science-pipelines diff --git a/config/internal/ml-metadata/metadata-grpc.configmap.yaml.tmpl b/config/internal/ml-metadata/metadata-grpc.configmap.yaml.tmpl new file mode 100644 index 000000000..a92d8133b --- /dev/null +++ b/config/internal/ml-metadata/metadata-grpc.configmap.yaml.tmpl @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: metadata-grpc-configmap + namespace: {{.Namespace}} + labels: + component: metadata-grpc-server +data: + METADATA_GRPC_SERVICE_HOST: "ds-pipeline-metadata-grpc-{{.Name}}.{{.Namespace}}.svc.cluster.local" + METADATA_GRPC_SERVICE_PORT: "8080" \ No newline at end of file diff --git a/config/internal/ml-metadata/metadata-grpc.ml-pipeline.service.yaml.tmpl b/config/internal/ml-metadata/metadata-grpc.ml-pipeline.service.yaml.tmpl new file mode 100644 index 000000000..aa1823fb6 --- /dev/null +++ b/config/internal/ml-metadata/metadata-grpc.ml-pipeline.service.yaml.tmpl @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Service +metadata: + name: metadata-grpc-service + namespace: {{.Namespace}} + labels: + app: ds-pipeline-metadata-grpc-{{.Name}} + component: data-science-pipelines +spec: + ports: + - name: grpc-api + port: {{.MLMD.GRPC.Port}} + protocol: TCP + selector: + app: ds-pipeline-metadata-grpc-{{.Name}} + component: data-science-pipelines + type: ClusterIP diff --git a/config/internal/persistence-agent/deployment.yaml.tmpl b/config/internal/persistence-agent/deployment.yaml.tmpl index 281e544cd..f4f77d839 100644 --- a/config/internal/persistence-agent/deployment.yaml.tmpl +++ b/config/internal/persistence-agent/deployment.yaml.tmpl @@ -34,8 +34,8 @@ spec: value: kubeflow-userid - name: KUBEFLOW_USERID_PREFIX value: "" - - name: EXECUTIONTYPE - value: PipelineRun + #- name: EXECUTIONTYPE + # value: PipelineRun image: "{{.PersistenceAgent.Image}}" imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent @@ -85,4 +85,17 @@ spec: memory: {{.PersistenceAgent.Resources.Limits.Memory}} {{ end }} {{ end }} - serviceAccountName: {{.PersistentAgentDefaultResourceName}} + volumeMounts: + - mountPath: /var/run/secrets/kubeflow/tokens/persistenceagent-sa-token + name: persistenceagent-sa-token + subPath: ds-pipeline-persistenceagent-{{.Name}}-token + + serviceAccountName: ds-pipeline-persistenceagent-{{.Name}} + volumes: + - name: persistenceagent-sa-token + projected: + sources: + - serviceAccountToken: + audience: pipelines.kubeflow.org + expirationSeconds: 3600 + path: ds-pipeline-persistenceagent-{{.Name}}-token diff --git a/config/rbac/argo_role.yaml b/config/rbac/argo_role.yaml new file mode 100644 index 000000000..cdabe2b46 --- /dev/null +++ b/config/rbac/argo_role.yaml @@ -0,0 +1,125 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-argo-role +rules: +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update +- apiGroups: + - "" + resources: + - pods + - pods/exec + verbs: + - create + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - watch + - list +- apiGroups: + - "" + resources: + - persistentvolumeclaims + - persistentvolumeclaims/finalizers + verbs: + - create + - update + - delete + - get +- apiGroups: + - argoproj.io + resources: + - workflows + - workflows/finalizers + - workflowtasksets + - workflowtasksets/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete + - create +- apiGroups: + - argoproj.io + resources: + - workflowtemplates + - workflowtemplates/finalizers + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - list + - watch + - deletecollection +- apiGroups: + - "" + resources: + - serviceaccounts + verbs: + - get + - list +- apiGroups: + - "" + resources: + - secrets + verbs: + - get +- apiGroups: + - argoproj.io + resources: + - cronworkflows + - cronworkflows/finalizers + verbs: + - get + - list + - watch + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - get + - delete diff --git a/config/rbac/argo_role_binding.yaml b/config/rbac/argo_role_binding.yaml new file mode 100644 index 000000000..3de651627 --- /dev/null +++ b/config/rbac/argo_role_binding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: data-science-pipelines-operator + name: manager-argo-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-argo-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: datasciencepipelinesapplications-controller diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index ec0220afb..fe94772a8 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -9,3 +9,6 @@ resources: - role_binding.yaml - role.yaml - service_account.yaml +- aggregate_dspa_role.yaml +- argo_role.yaml +- argo_role_binding.yaml From 91c6968b242c61d6292d24312e0c2ab9cc4e8159 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 1 Nov 2023 15:13:47 -0400 Subject: [PATCH 50/85] Add EngineDriver field for DSPA --- api/v1alpha1/dspipeline_types.go | 3 +++ ...ations.opendatahub.io_datasciencepipelinesapplications.yaml | 3 +++ 2 files changed, 6 insertions(+) diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index c162df06f..9e5cefcba 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -51,6 +51,9 @@ type DSPASpec struct { // +kubebuilder:validation:Optional // +kubebuilder:default:="v1" DSPVersion string `json:"dspVersion,omitempty"` + // +kubebuilder:validation:Optional + // +kubebuilder:default:="tekton" + EngineDriver string `json:"engineDriver,omitempty"` // DS Pipelines Argo Workflow Controller Configuration. // +kubebuilder:default:={deploy: false} *WorkflowController `json:"workflowController,omitempty"` diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 66171d9d0..73defd443 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -300,6 +300,9 @@ spec: dspVersion: default: v1 type: string + engineDriver: + default: tekton + type: string mlmd: default: deploy: true From 407ecddef0acbdf8e3a9cd07d8338033776cf863 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 1 Nov 2023 15:52:25 -0400 Subject: [PATCH 51/85] Add V2 Argo Images --- config/base/kustomization.yaml | 134 +++++++++++++++--- config/base/params.env | 31 ++-- config/configmaps/files/config.yaml | 36 +++-- .../visualizationserver/deployment.yaml.tmpl | 2 +- .../workflow-controller/deployment.yaml.tmpl | 2 +- config/manager/manager.yaml | 64 ++++++--- controllers/config/defaults.go | 33 +++-- controllers/dspipeline_params.go | 20 +-- kfdef/kfdef.yaml | 46 ++++-- 9 files changed, 280 insertions(+), 88 deletions(-) diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index 5518e7580..cdab43931 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -148,75 +148,173 @@ vars: apiVersion: v1 fieldref: fieldpath: data.DSPO_REQUEUE_TIME - - name: IMAGESV2_APISERVER + - name: IMAGESV2_ARGO_APISERVER objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: - fieldpath: data.IMAGESV2_APISERVER - - name: IMAGESV2_ARTIFACT + fieldpath: data.IMAGESV2_ARGO_APISERVER + - name: IMAGESV2_ARGO_ARTIFACT objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: - fieldpath: data.IMAGESV2_ARTIFACT - - name: IMAGESV2_CACHE + fieldpath: data.IMAGESV2_ARGO_ARTIFACT + - name: IMAGESV2_ARGO_PERSISTENTAGENT objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: - fieldpath: data.IMAGESV2_CACHE - - name: IMAGESV2_MOVERESULTSIMAGE + fieldpath: data.IMAGESV2_ARGO_PERSISTENTAGENT + - name: IMAGESV2_ARGO_SCHEDULEDWORKFLOW objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: - fieldpath: data.IMAGESV2_MOVERESULTSIMAGE - - name: IMAGESV2_PERSISTENTAGENT + fieldpath: data.IMAGESV2_ARGO_SCHEDULEDWORKFLOW + - name: IMAGESV2_ARGO_CACHE + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_ARGO_CACHE + - name: IMAGESV2_ARGO_MOVERESULTSIMAGE + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_ARGO_MOVERESULTSIMAGE + - name: IMAGESV2_ARGO_PERSISTENTAGENT objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: fieldpath: data.IMAGESV2_PERSISTENTAGENT - - name: IMAGESV2_SCHEDULEDWORKFLOW + - name: IMAGESV2_ARGO_SCHEDULEDWORKFLOW objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: fieldpath: data.IMAGESV2_SCHEDULEDWORKFLOW - - name: IMAGESV2_MLMDENVOY + - name: IMAGESV2_ARGO_MLMDENVOY + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_ARGO_MLMDENVOY + - name: IMAGESV2_ARGO_MLMDGRPC + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_ARGO_MLMDGRPC + - name: IMAGESV2_ARGO_MLMDWRITER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_ARGO_MLMDWRITER + - name: IMAGESV2_ARGO_VISUALIZATIONSERVER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_ARGO_VISUALIZATIONSERVER + - name: IMAGESV2_ARGO_WORKFLOWCONTROLLER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_ARGO_WORKFLOWCONTROLLER + - name: IMAGESV2_TEKTON_APISERVER + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_TEKTON_APISERVER + - name: IMAGESV2_TEKTON_ARTIFACT + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_TEKTON_ARTIFACT + - name: IMAGESV2_TEKTON_PERSISTENTAGENT + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_TEKTON_PERSISTENTAGENT + - name: IMAGESV2_TEKTON_SCHEDULEDWORKFLOW + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_TEKTON_SCHEDULEDWORKFLOW + - name: IMAGESV2_TEKTON_CACHE + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_TEKTON_CACHE + - name: IMAGESV2_TEKTON_MOVERESULTSIMAGE + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_TEKTON_MOVERESULTSIMAGE + - name: IMAGESV2_TEKTON_MLMDENVOY + objref: + kind: ConfigMap + name: dspo-parameters + apiVersion: v1 + fieldref: + fieldpath: data.IMAGESV2_TEKTON_MLMDENVOY + - name: IMAGESV2_TEKTON_MLMDGRPC objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: - fieldpath: data.IMAGESV2_MLMDENVOY - - name: IMAGESV2_MLMDGRPC + fieldpath: data.IMAGESV2_TEKTON_MLMDGRPC + - name: IMAGESV2_TEKTON_MLMDWRITER objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: - fieldpath: data.IMAGESV2_MLMDGRPC - - name: IMAGESV2_MLMDWRITER + fieldpath: data.IMAGESV2_TEKTON_MLMDWRITER + - name: IMAGESV2_TEKTON_VISUALIZATIONSERVER objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: - fieldpath: data.IMAGESV2_MLMDWRITER - - name: IMAGESV2_WORKFLOWCONTROLLER + fieldpath: data.IMAGESV2_TEKTON_VISUALIZATIONSERVER + - name: IMAGESV2_TEKTON_WORKFLOWCONTROLLER objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: - fieldpath: data.IMAGESV2_WORKFLOWCONTROLLER + fieldpath: data.IMAGESV2_TEKTON_WORKFLOWCONTROLLER configurations: - params.yaml diff --git a/config/base/params.env b/config/base/params.env index 3350afc9a..56d190d3d 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -12,16 +12,27 @@ IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 -IMAGESV2_APISERVER=quay.io/rmartine/apiserver:v2 -IMAGESV2_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:main -IMAGESV2_PERSISTENTAGENT=quay.io/rmartine/persistenceagent-dev:6b8723529 -IMAGESV2_SCHEDULEDWORKFLOW=quay.io/rmartine/swf-dev:6b8723529 -IMAGESV2_MLMDENVOY=gcr.io/ml-pipeline/metadata-envoy:2.0.0-rc.2 -IMAGESV2_MLMDGRPC=gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 -IMAGESV2_MLMDWRITER=gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 -IMAGESV2_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.7 -IMAGESV2_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.7 -IMAGESV2_WORKFLOWCONTROLLER=gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance +IMAGESV2_ARGO_APISERVER=gcr.io/ml-pipeline/api-server:2.0.2 +IMAGESV2_ARGO_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:main +IMAGESV2_ARGO_PERSISTENTAGENT=gcr.io/ml-pipeline/persistenceagent:2.0.2 +IMAGESV2_ARGO_SCHEDULEDWORKFLOW=gcr.io/ml-pipeline/scheduledworkflow:2.0.2 +IMAGESV2_ARGO_MLMDENVOY=gcr.io/ml-pipeline/metadata-envoy:2.0.2 +IMAGESV2_ARGO_MLMDGRPC=gcr.io/tfx-oss-public/ml_metadata_store_server:1.14.0 +IMAGESV2_ARGO_MLMDWRITER=gcr.io/ml-pipeline/metadata-writer:2.0.2 +IMAGESV2_ARGO_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.7 +IMAGESV2_ARGO_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.7 +IMAGESV2_ARGO_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.2 +IMAGESV2_ARGO_WORKFLOWCONTROLLER=gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance +IMAGESV2_TEKTON_APISERVER=quay.io/rmartine/apiserver:v2 +IMAGESV2_TEKTON_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:main +IMAGESV2_TEKTON_PERSISTENTAGENT=quay.io/rmartine/persistenceagent-dev:6b8723529 +IMAGESV2_TEKTON_SCHEDULEDWORKFLOW=quay.io/rmartine/swf-dev:6b8723529 +IMAGESV2_TEKTON_MLMDENVOY=gcr.io/ml-pipeline/metadata-envoy:2.0.0-rc.2 +IMAGESV2_TEKTON_MLMDGRPC=gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 +IMAGESV2_TEKTON_MLMDWRITER=gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 +IMAGESV2_TEKTON_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.7 +IMAGESV2_TEKTON_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.7 +IMAGESV2_TEKTON_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 ZAP_LOG_LEVEL=info MAX_CONCURRENT_RECONCILES=10 DSPO_HEALTHCHECK_DATABASE_CONNECTIONTIMEOUT=15s diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index 230bc0c0d..dc5d0ef6f 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -13,16 +13,32 @@ Images: CRDViewer: $(IMAGES_CRDVIEWER) VisualizationServer: $(IMAGES_VISUALIZATIONSERVER) ImagesV2: - ApiServer: $(IMAGESV2_APISERVER) - Artifact: $(IMAGESV2_ARTIFACT) - Cache: $(IMAGESV2_CACHE) - MoveResultsImage: $(IMAGESV2_MOVERESULTSIMAGE) - PersistentAgent: $(IMAGESV2_PERSISTENTAGENT) - ScheduledWorkflow: $(IMAGESV2_SCHEDULEDWORKFLOW) - MlmdEnvoy: $(IMAGESV2_MLMDENVOY) - MlmdGRPC: $(IMAGESV2_MLMDGRPC) - MlmdWriter: $(IMAGESV2_MLMDWRITER) - WorkflowController: $(IMAGESV2_WORKFLOWCONTROLLER) + Argo: + ApiServer: $(IMAGESV2_ARGO_APISERVER) + Artifact: $(IMAGESV2_ARGO_ARTIFACT) + Cache: $(IMAGESV2_ARGO_CACHE) + MoveResultsImage: $(IMAGESV2_ARGO_MOVERESULTSIMAGE) + PersistentAgent: $(IMAGESV2_ARGO_PERSISTENTAGENT) + ScheduledWorkflow: $(IMAGESV2_ARGO_SCHEDULEDWORKFLOW) + MlmdEnvoy: $(IMAGESV2_ARGO_MLMDENVOY) + MlmdGRPC: $(IMAGESV2_ARGO_MLMDGRPC) + MlmdWriter: $(IMAGESV2_ARGO_MLMDWRITER) + VisualizationServer: $(IMAGES_ARGO_VISUALIZATIONSERVER) + WorkflowController: $(IMAGESV2_ARGO_WORKFLOWCONTROLLER) + Tekton: + ApiServer: $(IMAGESV2_TEKTON_APISERVER) + Artifact: $(IMAGESV2_TEKTON_ARTIFACT) + Cache: $(IMAGESV2_TEKTON_CACHE) + MoveResultsImage: $(IMAGESV2_TEKTON_MOVERESULTSIMAGE) + PersistentAgent: $(IMAGESV2_TEKTON_PERSISTENTAGENT) + ScheduledWorkflow: $(IMAGESV2_TEKTON_SCHEDULEDWORKFLOW) + MlmdEnvoy: $(IMAGESV2_TEKTON_MLMDENVOY) + MlmdGRPC: $(IMAGESV2_TEKTON_MLMDGRPC) + MlmdWriter: $(IMAGESV2_TEKTON_MLMDWRITER) + VisualizationServer: $(IMAGES_TEKTON_VISUALIZATIONSERVER) + # WorkflowController is an argo-only component + # Using argo image here only for fault tolerance, but should handle this in code. + WorkflowController: $(IMAGESV2_ARGO_WORKFLOWCONTROLLER) DSPO: HealthCheck: Database: diff --git a/config/internal/visualizationserver/deployment.yaml.tmpl b/config/internal/visualizationserver/deployment.yaml.tmpl index 26abdeedc..46524439b 100644 --- a/config/internal/visualizationserver/deployment.yaml.tmpl +++ b/config/internal/visualizationserver/deployment.yaml.tmpl @@ -23,7 +23,7 @@ spec: dspa: {{.Name}} spec: containers: - - image: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 + - image: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 # TODO imagePullPolicy: IfNotPresent livenessProbe: exec: diff --git a/config/internal/workflow-controller/deployment.yaml.tmpl b/config/internal/workflow-controller/deployment.yaml.tmpl index b921e46ac..1ae3a1c7f 100644 --- a/config/internal/workflow-controller/deployment.yaml.tmpl +++ b/config/internal/workflow-controller/deployment.yaml.tmpl @@ -38,7 +38,7 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.name - image: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance + image: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance # TODO livenessProbe: failureThreshold: 3 httpGet: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index cfa8273e8..7e2dcca65 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -62,26 +62,50 @@ spec: value: $(IMAGES_CRDVIEWER) - name: IMAGES_VISUALIZATIONSERVER value: $(IMAGES_VISUALIZATIONSERVER) - - name: IMAGESV2_APISERVER - value: $(IMAGESV2_APISERVER) - - name: IMAGESV2_ARTIFACT - value: $(IMAGESV2_ARTIFACT) - - name: IMAGESV2_PERSISTENTAGENT - value: $(IMAGESV2_PERSISTENTAGENT) - - name: IMAGESV2_SCHEDULEDWORKFLOW - value: $(IMAGESV2_SCHEDULEDWORKFLOW) - - name: IMAGESV2_CACHE - value: $(IMAGESV2_CACHE) - - name: IMAGESV2_MOVERESULTSIMAGE - value: $(IMAGESV2_MOVERESULTSIMAGE) - - name: IMAGESV2_MLMDENVOY - value: $(IMAGESV2_MLMDENVOY) - - name: IMAGESV2_MLMDGRPC - value: $(IMAGESV2_MLMDGRPC) - - name: IMAGESV2_MLMDWRITER - value: $(IMAGESV2_MLMDWRITER) - - name: IMAGESV2_WORKFLOWCONTROLLER - value: $(IMAGESV2_WORKFLOWCONTROLLER) + - name: IMAGESV2_ARGO_APISERVER + value: $(IMAGESV2_ARGO_APISERVER) + - name: IMAGESV2_ARGO_ARTIFACT + value: $(IMAGESV2_ARGO_ARTIFACT) + - name: IMAGESV2_ARGO_PERSISTENTAGENT + value: $(IMAGESV2_ARGO_PERSISTENTAGENT) + - name: IMAGESV2_ARGO_SCHEDULEDWORKFLOW + value: $(IMAGESV2_ARGO_SCHEDULEDWORKFLOW) + - name: IMAGESV2_ARGO_CACHE + value: $(IMAGESV2_ARGO_CACHE) + - name: IMAGESV2_ARGO_MOVERESULTSIMAGE + value: $(IMAGESV2_ARGO_MOVERESULTSIMAGE) + - name: IMAGESV2_ARGO_MLMDENVOY + value: $(IMAGESV2_ARGO_MLMDENVOY) + - name: IMAGESV2_ARGO_MLMDGRPC + value: $(IMAGESV2_ARGO_MLMDGRPC) + - name: IMAGESV2_ARGO_MLMDWRITER + value: $(IMAGESV2_ARGO_MLMDWRITER) + - name: IMAGESV2_ARGO_VISUALIZATIONSERVER + value: $(IMAGESV2_ARGO_VISUALIZATIONSERVER) + - name: IMAGESV2_ARGO_WORKFLOWCONTROLLER + value: $(IMAGESV2_ARGO_WORKFLOWCONTROLLER) + - name: IMAGESV2_TEKTON_APISERVER + value: $(IMAGESV2_TEKTON_APISERVER) + - name: IMAGESV2_TEKTON_ARTIFACT + value: $(IMAGESV2_TEKTON_ARTIFACT) + - name: IMAGESV2_TEKTON_PERSISTENTAGENT + value: $(IMAGESV2_TEKTON_PERSISTENTAGENT) + - name: IMAGESV2_TEKTON_SCHEDULEDWORKFLOW + value: $(IMAGESV2_TEKTON_SCHEDULEDWORKFLOW) + - name: IMAGESV2_TEKTON_CACHE + value: $(IMAGESV2_TEKTON_CACHE) + - name: IMAGESV2_TEKTON_MOVERESULTSIMAGE + value: $(IMAGESV2_TEKTON_MOVERESULTSIMAGE) + - name: IMAGESV2_TEKTON_MLMDENVOY + value: $(IMAGESV2_TEKTON_MLMDENVOY) + - name: IMAGESV2_TEKTON_MLMDGRPC + value: $(IMAGESV2_TEKTON_MLMDGRPC) + - name: IMAGESV2_TEKTON_MLMDWRITER + value: $(IMAGESV2_TEKTON_MLMDWRITER) + - name: IMAGESV2_TEKTON_VISUALIZATIONSERVER + value: $(IMAGESV2_TEKTON_VISUALIZATIONSERVER) + - name: IMAGESV2_TEKTON_WORKFLOWCONTROLLER + value: $(IMAGESV2_TEKTON_WORKFLOWCONTROLLER) - name: ZAP_LOG_LEVEL value: $(ZAP_LOG_LEVEL) - name: MAX_CONCURRENT_RECONCILES diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index 6d6f0d827..5c5bf238b 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -76,17 +76,30 @@ const ( RequeueTimeConfigName = "DSPO.RequeueTime" ) -// DSPV2 Image Paths +// DSPV2-Argo Image Paths const ( - APIServerImagePathV2 = "ImagesV2.ApiServer" - APIServerArtifactImagePathV2 = "ImagesV2.Artifact" - APIServerCacheImagePathV2 = "ImagesV2.Cache" - APIServerMoveResultsImagePathV2 = "ImagesV2.MoveResultsImage" - PersistenceAgentImagePathV2 = "ImagesV2.PersistentAgent" - ScheduledWorkflowImagePathV2 = "ImagesV2.ScheduledWorkflow" - MlmdEnvoyImagePathV2 = "ImagesV2.MlmdEnvoy" - MlmdGRPCImagePathV2 = "ImagesV2.MlmdGRPC" - MlmdWriterImagePathV2 = "ImagesV2.MlmdWriter" + APIServerImagePathV2Argo = "ImagesV2.Argo.ApiServer" + APIServerArtifactImagePathV2Argo = "ImagesV2.Argo.Artifact" + APIServerCacheImagePathV2Argo = "ImagesV2.Argo.Cache" + APIServerMoveResultsImagePathV2Argo = "ImagesV2.Argo.MoveResultsImage" + PersistenceAgentImagePathV2Argo = "ImagesV2.Argo.PersistentAgent" + ScheduledWorkflowImagePathV2Argo = "ImagesV2.Argo.ScheduledWorkflow" + MlmdEnvoyImagePathV2Argo = "ImagesV2.Argo.MlmdEnvoy" + MlmdGRPCImagePathV2Argo = "ImagesV2.Argo.MlmdGRPC" + MlmdWriterImagePathV2Argo = "ImagesV2.Argo.MlmdWriter" +) + +// DSPV2-Tekton Image Paths +const ( + APIServerImagePathV2Tekton = "ImagesV2.Tekton.ApiServer" + APIServerArtifactImagePathV2Tekton = "ImagesV2.Tekton.Artifact" + APIServerCacheImagePathV2Tekton = "ImagesV2.Tekton.Cache" + APIServerMoveResultsImagePathV2Tekton = "ImagesV2.Tekton.MoveResultsImage" + PersistenceAgentImagePathV2Tekton = "ImagesV2.Tekton.PersistentAgent" + ScheduledWorkflowImagePathV2Tekton = "ImagesV2.Tekton.ScheduledWorkflow" + MlmdEnvoyImagePathV2Tekton = "ImagesV2.Tekton.MlmdEnvoy" + MlmdGRPCImagePathV2Tekton = "ImagesV2.Tekton.MlmdGRPC" + MlmdWriterImagePathV2Tekton = "ImagesV2.Tekton.MlmdWriter" ) // DSPA Status Condition Types diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 15f8a8685..366641299 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -368,9 +368,9 @@ func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelin MlmdGRPCImagePath := config.MlmdGRPCImagePath MlmdWriterImagePath := config.MlmdWriterImagePath if p.UsingV2Pipelines(dsp) { - MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2 - MlmdGRPCImagePath = config.MlmdGRPCImagePathV2 - MlmdWriterImagePath = config.MlmdWriterImagePathV2 + MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2Tekton + MlmdGRPCImagePath = config.MlmdGRPCImagePathV2Tekton + MlmdWriterImagePath = config.MlmdWriterImagePathV2Tekton } if p.MLMD.Envoy == nil { p.MLMD.Envoy = &dspa.Envoy{ @@ -445,10 +445,10 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip APIServerCacheImagePath := config.APIServerCacheImagePath APIServerMoveResultsImagePath := config.APIServerMoveResultsImagePath if pipelinesV2Images { - APIServerImagePath = config.APIServerImagePathV2 - APIServerArtifactImagePath = config.APIServerArtifactImagePathV2 - APIServerCacheImagePath = config.APIServerCacheImagePathV2 - APIServerMoveResultsImagePath = config.APIServerMoveResultsImagePathV2 + APIServerImagePath = config.APIServerImagePathV2Tekton + APIServerArtifactImagePath = config.APIServerArtifactImagePathV2Tekton + APIServerCacheImagePath = config.APIServerCacheImagePathV2Tekton + APIServerMoveResultsImagePath = config.APIServerMoveResultsImagePathV2Tekton } serverImageFromConfig := config.GetStringConfigWithDefault(APIServerImagePath, config.DefaultImageValue) @@ -486,7 +486,7 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip if p.PersistenceAgent != nil { PersistenceAgentImagePath := config.PersistenceAgentImagePath if pipelinesV2Images { - PersistenceAgentImagePath = config.PersistenceAgentImagePathV2 + PersistenceAgentImagePath = config.PersistenceAgentImagePathV2Tekton } persistenceAgentImageFromConfig := config.GetStringConfigWithDefault(PersistenceAgentImagePath, config.DefaultImageValue) setStringDefault(persistenceAgentImageFromConfig, &p.PersistenceAgent.Image) @@ -495,7 +495,7 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip if p.ScheduledWorkflow != nil { ScheduledWorkflowImagePath := config.ScheduledWorkflowImagePath if pipelinesV2Images { - ScheduledWorkflowImagePath = config.ScheduledWorkflowImagePathV2 + ScheduledWorkflowImagePath = config.ScheduledWorkflowImagePathV2Tekton } scheduledWorkflowImageFromConfig := config.GetStringConfigWithDefault(ScheduledWorkflowImagePath, config.DefaultImageValue) setStringDefault(scheduledWorkflowImageFromConfig, &p.ScheduledWorkflow.Image) @@ -510,6 +510,8 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip setResourcesDefault(config.MlPipelineUIResourceRequirements, &p.MlPipelineUI.Resources) } + // TODO (gfrasca): believe we need to set default VisualizationServer and WorkflowController Images here + err := p.SetupMLMD(ctx, dsp, client, log) if err != nil { return err diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index 22157c3b9..289afee13 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -34,24 +34,52 @@ spec: value: gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 - name: IMAGES_VISUALIZATIONSERVER value: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 - - name: IMAGESV2_APISERVER + - name: IMAGESV2_ARGO_APISERVER + value: gcr.io/ml-pipeline/api-server:2.0.2 + - name: IMAGESV2_ARGO_ARTIFACT + value: quay.io/opendatahub/ds-pipelines-artifact-manager:main + - name: IMAGESV2_ARGO_PERSISTENTAGENT + value: gcr.io/ml-pipeline/persistenceagent:2.0.2 + - name: IMAGESV2_ARGO_SCHEDULEDWORKFLOW + value: gcr.io/ml-pipeline/scheduledworkflow:2.0.2 + - name: IMAGESV2_ARGO_CACHE + value: registry.access.redhat.com/ubi8/ubi-minimal:8.7 + - name: IMAGESV2_ARGO_CRDVIEWER + value: gcr.io/ml-pipeline/viewer-crd-controller:2.0.2 + - name: IMAGESV2_ARGO_MOVERESULTSIMAGE + value: registry.access.redhat.com/ubi8/ubi-micro:8.7 + - name: IMAGESV2_ARGO_MLMDENVOY + value: gcr.io/ml-pipeline/metadata-envoy:2.0.2 + - name: IMAGESV2_ARGO_MLMDGRPC + value: gcr.io/tfx-oss-public/ml_metadata_store_server:1.14.0 + - name: IMAGESV2_ARGO_MLMDWRITER + value: gcr.io/ml-pipeline/metadata-writer:2.0.2 + - name: IMAGESV2_ARGO_VISUALIZATIONSERVER + value: gcr.io/ml-pipeline/visualization-server:2.0.2 + - name: IMAGESV2_ARGO_WORKFLOWCONTROLLER + value: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance + - name: IMAGESV2_TEKTON_APISERVER value: quay.io/rmartine/apiserver:v2 - - name: IMAGESV2_ARTIFACT + - name: IMAGESV2_TEKTON_ARTIFACT value: quay.io/opendatahub/ds-pipelines-artifact-manager:main - - name: IMAGESV2_PERSISTENTAGENT + - name: IMAGESV2_TEKTON_PERSISTENTAGENT value: quay.io/rmartine/persistenceagent-dev:6b8723529 - - name: IMAGESV2_SCHEDULEDWORKFLOW + - name: IMAGESV2_TEKTON_SCHEDULEDWORKFLOW value: quay.io/rmartine/swf-dev:6b8723529 - - name: IMAGESV2_CACHE + - name: IMAGESV2_TEKTON_CACHE value: registry.access.redhat.com/ubi8/ubi-minimal:8.7 - - name: IMAGESV2_MOVERESULTSIMAGE + - name: IMAGESV2_TEKTON_MOVERESULTSIMAGE value: registry.access.redhat.com/ubi8/ubi-micro:8.7 - - name: IMAGESV2_MLMDENVOY + - name: IMAGESV2_TEKTON_MLMDENVOY value: gcr.io/ml-pipeline/metadata-envoy:2.0.0-rc.2 - - name: IMAGESV2_MLMDGRPC + - name: IMAGESV2_TEKTON_MLMDGRPC value: gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 - - name: IMAGESV2_MLMDWRITER + - name: IMAGESV2_TEKTON_MLMDWRITER value: gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 + - name: IMAGESV2_TEKTON_VISUALIZATIONSERVER + value: gcr.io/ml-pipeline/visualization-server:2.0.2 + - name: IMAGESV2_TEKTON_WORKFLOWCONTROLLER + value: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance repoRef: name: manifests From 5d37266dd564c6ca58d63703450616302108a0ef Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 1 Nov 2023 16:07:37 -0400 Subject: [PATCH 52/85] Update Images based on EngineDriver specified in DSPA --- controllers/dspipeline_params.go | 55 ++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 9 deletions(-) diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 366641299..fa8511e0e 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -87,6 +87,14 @@ func (p *DSPAParams) UsingV2Pipelines(dsp *dspa.DataSciencePipelinesApplication) return dsp.Spec.DSPVersion == "v2" } +func (p *DSPAParams) UsingArgoEngineDriver(dsp *dspa.DataSciencePipelinesApplication) bool { + return dsp.Spec.EngineDriver == "argo" +} + +func (p *DSPAParams) UsingTektonEngineDriver(dsp *dspa.DataSciencePipelinesApplication) bool { + return dsp.Spec.DSPVersion == "tekton" +} + // UsingExternalDB will return true if an external Database is specified in the CR, otherwise false. func (p *DSPAParams) UsingExternalDB(dsp *dspa.DataSciencePipelinesApplication) bool { if dsp.Spec.Database != nil && dsp.Spec.Database.ExternalDB != nil { @@ -368,9 +376,17 @@ func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelin MlmdGRPCImagePath := config.MlmdGRPCImagePath MlmdWriterImagePath := config.MlmdWriterImagePath if p.UsingV2Pipelines(dsp) { - MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2Tekton - MlmdGRPCImagePath = config.MlmdGRPCImagePathV2Tekton - MlmdWriterImagePath = config.MlmdWriterImagePathV2Tekton + if p.UsingArgoEngineDriver(dsp) { + MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2Argo + MlmdGRPCImagePath = config.MlmdGRPCImagePathV2Argo + MlmdWriterImagePath = config.MlmdWriterImagePathV2Argo + } else if p.UsingTektonEngineDriver(dsp) { + MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2Tekton + MlmdGRPCImagePath = config.MlmdGRPCImagePathV2Tekton + MlmdWriterImagePath = config.MlmdWriterImagePathV2Tekton + } else { + return fmt.Errorf(fmt.Sprintf("Illegal Engine Driver (%s) specified, cannot continue.", dsp.Spec.EngineDriver)) + } } if p.MLMD.Envoy == nil { p.MLMD.Envoy = &dspa.Envoy{ @@ -438,6 +454,11 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip p.PiplinesCABundleMountPath = config.PiplinesCABundleMountPath pipelinesV2Images := p.UsingV2Pipelines(dsp) + usingArgoEngine := p.UsingArgoEngineDriver(dsp) + usingTektonEngine := p.UsingTektonEngineDriver(dsp) + // if !usingArgoEngine && !usingTektonEngine { + // return fmt.Errorf(fmt.Sprintf("Illegal Engine Driver (%s) specified, cannot continue.", dsp.Spec.EngineDriver)) + // } if p.APIServer != nil { APIServerImagePath := config.APIServerImagePath @@ -445,10 +466,17 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip APIServerCacheImagePath := config.APIServerCacheImagePath APIServerMoveResultsImagePath := config.APIServerMoveResultsImagePath if pipelinesV2Images { - APIServerImagePath = config.APIServerImagePathV2Tekton - APIServerArtifactImagePath = config.APIServerArtifactImagePathV2Tekton - APIServerCacheImagePath = config.APIServerCacheImagePathV2Tekton - APIServerMoveResultsImagePath = config.APIServerMoveResultsImagePathV2Tekton + if usingArgoEngine { + APIServerImagePath = config.APIServerImagePathV2Argo + APIServerArtifactImagePath = config.APIServerArtifactImagePathV2Argo + APIServerCacheImagePath = config.APIServerCacheImagePathV2Argo + APIServerMoveResultsImagePath = config.APIServerMoveResultsImagePathV2Argo + } else if usingTektonEngine { + APIServerImagePath = config.APIServerImagePathV2Tekton + APIServerArtifactImagePath = config.APIServerArtifactImagePathV2Tekton + APIServerCacheImagePath = config.APIServerCacheImagePathV2Tekton + APIServerMoveResultsImagePath = config.APIServerMoveResultsImagePathV2Tekton + } } serverImageFromConfig := config.GetStringConfigWithDefault(APIServerImagePath, config.DefaultImageValue) @@ -486,7 +514,11 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip if p.PersistenceAgent != nil { PersistenceAgentImagePath := config.PersistenceAgentImagePath if pipelinesV2Images { - PersistenceAgentImagePath = config.PersistenceAgentImagePathV2Tekton + if usingArgoEngine { + PersistenceAgentImagePath = config.PersistenceAgentImagePathV2Argo + } else if usingTektonEngine { + PersistenceAgentImagePath = config.PersistenceAgentImagePathV2Tekton + } } persistenceAgentImageFromConfig := config.GetStringConfigWithDefault(PersistenceAgentImagePath, config.DefaultImageValue) setStringDefault(persistenceAgentImageFromConfig, &p.PersistenceAgent.Image) @@ -495,7 +527,12 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip if p.ScheduledWorkflow != nil { ScheduledWorkflowImagePath := config.ScheduledWorkflowImagePath if pipelinesV2Images { - ScheduledWorkflowImagePath = config.ScheduledWorkflowImagePathV2Tekton + if usingArgoEngine { + ScheduledWorkflowImagePath = config.ScheduledWorkflowImagePathV2Argo + + } else if usingTektonEngine { + ScheduledWorkflowImagePath = config.ScheduledWorkflowImagePathV2Tekton + } } scheduledWorkflowImageFromConfig := config.GetStringConfigWithDefault(ScheduledWorkflowImagePath, config.DefaultImageValue) setStringDefault(scheduledWorkflowImageFromConfig, &p.ScheduledWorkflow.Image) From 5bc68fdade357bc62375b793169956c8d6165727 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 1 Nov 2023 16:20:43 -0400 Subject: [PATCH 53/85] Handle dynamically deploying NetworkPolicy based on EngineDriver --- .../common/{default => argo}/policy.yaml.tmpl | 0 .../internal/common/tekton/policy.yaml.tmpl | 81 +++++++++++++++++++ controllers/common.go | 12 +++ controllers/common_test.go | 1 + controllers/dspipeline_params.go | 4 +- 5 files changed, 95 insertions(+), 3 deletions(-) rename config/internal/common/{default => argo}/policy.yaml.tmpl (100%) create mode 100644 config/internal/common/tekton/policy.yaml.tmpl diff --git a/config/internal/common/default/policy.yaml.tmpl b/config/internal/common/argo/policy.yaml.tmpl similarity index 100% rename from config/internal/common/default/policy.yaml.tmpl rename to config/internal/common/argo/policy.yaml.tmpl diff --git a/config/internal/common/tekton/policy.yaml.tmpl b/config/internal/common/tekton/policy.yaml.tmpl new file mode 100644 index 000000000..892868cba --- /dev/null +++ b/config/internal/common/tekton/policy.yaml.tmpl @@ -0,0 +1,81 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: ds-pipelines-{{.Name}} + namespace: {{.Namespace}} +spec: + podSelector: + matchLabels: + app: ds-pipeline-{{.Name}} + component: data-science-pipelines + policyTypes: + - Ingress + ingress: + # Match all sources for oauth endpoint + - ports: + - protocol: TCP + port: 8443 + # We only allow DSPA components to communicate + # by bypassing oauth proxy, all external + # traffic should go through oauth proxy + - from: + - namespaceSelector: + matchLabels: + name: openshift-user-workload-monitoring + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: redhat-ods-monitoring + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: openshift-pipelines + - podSelector: + matchLabels: + app.kubernetes.io/managed-by: tekton-pipelines + pipelines.kubeflow.org/v2_component: 'true' + - podSelector: + matchLabels: + app: mariadb-{{.Name}} + component: data-science-pipelines + - podSelector: + matchLabels: + app: minio-{{.Name}} + component: data-science-pipelines + - podSelector: + matchLabels: + app: ds-pipeline-ui-{{.Name}} + component: data-science-pipelines + - podSelector: + matchLabels: + app: ds-pipeline-persistenceagent-{{.Name}} + component: data-science-pipelines + - podSelector: + matchLabels: + app: ds-pipeline-scheduledworkflow-{{.Name}} + component: data-science-pipelines + - podSelector: + matchLabels: + app: ds-pipeline-metadata-envoy-{{.Name}} + component: data-science-pipelines + - podSelector: + matchLabels: + app: ds-pipeline-metadata-grpc-{{.Name}} + component: data-science-pipelines + - podSelector: + matchLabels: + app: ds-pipeline-metadata-writer-{{.Name}} + component: data-science-pipelines + ports: + - protocol: TCP + port: 8888 + - protocol: TCP + port: 8887 + - ports: + - protocol: TCP + port: 8080 + from: + - podSelector: + matchLabels: + app.kubernetes.io/name: data-science-pipelines-operator-driver + namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: openshift-pipelines diff --git a/controllers/common.go b/controllers/common.go index b68787b6f..fa55f2c00 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -20,6 +20,8 @@ import ( ) var commonTemplatesDir = "common/default" +var argoOnlyCommonTemplatesDir = "common/argo" +var tektonOnlyCommonTemplatesDir = "common/argo" const commonCusterRolebindingTemplate = "common/no-owner/clusterrolebinding.yaml.tmpl" @@ -32,6 +34,16 @@ func (r *DSPAReconciler) ReconcileCommon(dsp *dspav1alpha1.DataSciencePipelinesA return err } + log.Info("Applying Engine-Specific Common Resources") + if params.UsingArgoEngineDriver(dsp) { + err = r.ApplyDir(dsp, params, argoOnlyCommonTemplatesDir) + } else if params.UsingTektonEngineDriver(dsp) { + err = r.ApplyDir(dsp, params, tektonOnlyCommonTemplatesDir) + } + if err != nil { + return err + } + err = r.ApplyWithoutOwner(params, commonCusterRolebindingTemplate) if err != nil { return err diff --git a/controllers/common_test.go b/controllers/common_test.go index c0b411668..f9e2b3273 100644 --- a/controllers/common_test.go +++ b/controllers/common_test.go @@ -34,6 +34,7 @@ func TestDeployCommonPolicies(t *testing.T) { // Construct Basic DSPA Spec dspa := &dspav1alpha1.DataSciencePipelinesApplication{ Spec: dspav1alpha1.DSPASpec{ + EngineDriver: "tekton", Database: &dspav1alpha1.Database{ DisableHealthCheck: false, MariaDB: &dspav1alpha1.MariaDB{ diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index fa8511e0e..f06e1cfec 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -92,7 +92,7 @@ func (p *DSPAParams) UsingArgoEngineDriver(dsp *dspa.DataSciencePipelinesApplica } func (p *DSPAParams) UsingTektonEngineDriver(dsp *dspa.DataSciencePipelinesApplication) bool { - return dsp.Spec.DSPVersion == "tekton" + return dsp.Spec.EngineDriver == "tekton" } // UsingExternalDB will return true if an external Database is specified in the CR, otherwise false. @@ -384,8 +384,6 @@ func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelin MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2Tekton MlmdGRPCImagePath = config.MlmdGRPCImagePathV2Tekton MlmdWriterImagePath = config.MlmdWriterImagePathV2Tekton - } else { - return fmt.Errorf(fmt.Sprintf("Illegal Engine Driver (%s) specified, cannot continue.", dsp.Spec.EngineDriver)) } } if p.MLMD.Envoy == nil { From 776e669366af458384882ce120a889e5ee495a7d Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 1 Nov 2023 16:26:44 -0400 Subject: [PATCH 54/85] Engine-Agnostic Manifests: PersistenceAgent --- .../internal/persistence-agent/deployment.yaml.tmpl | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/config/internal/persistence-agent/deployment.yaml.tmpl b/config/internal/persistence-agent/deployment.yaml.tmpl index f4f77d839..b9e6debde 100644 --- a/config/internal/persistence-agent/deployment.yaml.tmpl +++ b/config/internal/persistence-agent/deployment.yaml.tmpl @@ -34,8 +34,12 @@ spec: value: kubeflow-userid - name: KUBEFLOW_USERID_PREFIX value: "" - #- name: EXECUTIONTYPE - # value: PipelineRun + - name: EXECUTIONTYPE + {{ if (eq .EngineDriver "tekton") }} + value: PipelineRun + {{ else }} + value: Workflow + {{ end }} image: "{{.PersistenceAgent.Image}}" imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent @@ -85,12 +89,14 @@ spec: memory: {{.PersistenceAgent.Resources.Limits.Memory}} {{ end }} {{ end }} + {{ if (eq .EngineDriver "argo") }} volumeMounts: - mountPath: /var/run/secrets/kubeflow/tokens/persistenceagent-sa-token name: persistenceagent-sa-token subPath: ds-pipeline-persistenceagent-{{.Name}}-token - + {{ end }} serviceAccountName: ds-pipeline-persistenceagent-{{.Name}} + {{ if (eq .EngineDriver "argo") }} volumes: - name: persistenceagent-sa-token projected: @@ -99,3 +105,4 @@ spec: audience: pipelines.kubeflow.org expirationSeconds: 3600 path: ds-pipeline-persistenceagent-{{.Name}}-token + {{ end }} \ No newline at end of file From ae80f250fb4d336eba7dae2e2a1a772930a39a9c Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 1 Nov 2023 16:41:30 -0400 Subject: [PATCH 55/85] Engine-Agnostic Manifests: APIServer --- .../apiserver/default/deployment.yaml.tmpl | 97 +++++++++++++------ .../persistence-agent/deployment.yaml.tmpl | 6 +- controllers/common.go | 2 +- controllers/dspipeline_params.go | 3 + 4 files changed, 72 insertions(+), 36 deletions(-) diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index a8a6a8e59..4e5e498ad 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -22,14 +22,8 @@ spec: spec: containers: - env: - - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION - value: "{{.APIServer.AutoUpdatePipelineDefaultVersion}}" - name: POD_NAMESPACE value: "{{.Namespace}}" - - name: OBJECTSTORECONFIG_SECURE - value: "false" - - name: OBJECTSTORECONFIG_BUCKETNAME - value: "{{.ObjectStorageConnection.Bucket}}" - name: DBCONFIG_USER value: "{{.DBConnection.Username}}" - name: DBCONFIG_PASSWORD @@ -43,6 +37,7 @@ spec: value: "{{.DBConnection.Host}}" - name: DBCONFIG_PORT value: "{{.DBConnection.Port}}" +<<<<<<< HEAD - name: ARTIFACT_BUCKET value: "{{.ObjectStorageConnection.Bucket}}" - name: ARTIFACT_ENDPOINT @@ -82,43 +77,25 @@ spec: value: "{{.APIServer.TerminateStatus}}" - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION value: "{{.APIServer.AutoUpdatePipelineDefaultVersion}}" +======= +>>>>>>> 22b6380 (Engine-Agnostic Manifests: APIServer) - name: DBCONFIG_CONMAXLIFETIMESEC value: "{{.APIServer.DBConfigConMaxLifetimeSec}}" - - name: DB_DRIVER_NAME - value: mysql - - name: DBCONFIG_MYSQLCONFIG_USER - value: "{{.DBConnection.Username}}" - - name: DBCONFIG_MYSQLCONFIG_PASSWORD - valueFrom: - secretKeyRef: - key: "{{.DBConnection.CredentialsSecret.Key}}" - name: "{{.DBConnection.CredentialsSecret.Name}}" - - name: DBCONFIG_MYSQLCONFIG_DBNAME - value: "{{.DBConnection.DBName}}" - - name: DBCONFIG_MYSQLCONFIG_HOST - value: "{{.DBConnection.Host}}" - - name: DBCONFIG_MYSQLCONFIG_PORT - value: "{{.DBConnection.Port}}" - - name: OBJECTSTORECONFIG_ACCESSKEY - valueFrom: - secretKeyRef: - key: "{{.ObjectStorageConnection.CredentialsSecret.AccessKey}}" - name: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" - - name: OBJECTSTORECONFIG_SECRETACCESSKEY - valueFrom: - secretKeyRef: - key: "{{.ObjectStorageConnection.CredentialsSecret.SecretKey}}" - name: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST value: "ds-pipeline-visualizationserver" - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT value: "8888" +<<<<<<< HEAD - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY value: "{{.ObjectStorageConnection.CredentialsSecret.AccessKey}}" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY value: "{{.ObjectStorageConnection.CredentialsSecret.SecretKey}}" +======= + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-{{.Name}}" +>>>>>>> 22b6380 (Engine-Agnostic Manifests: APIServer) - name: OBJECTSTORECONFIG_BUCKETNAME value: "{{.ObjectStorageConnection.Bucket}}" - name: OBJECTSTORECONFIG_ACCESSKEY @@ -141,6 +118,7 @@ spec: value: "{{.APIServer.CacheImage}}" - name: MOVERESULTS_IMAGE value: "{{.APIServer.MoveResultsImage}}" + ## Env Vars to only include if MLMD Deployed ## {{ if .MLMD.Deploy }} - name: METADATA_GRPC_SERVICE_SERVICE_HOST value: "ds-pipeline-metadata-grpc-{{.Name}}.{{.Namespace}}.svc.cluster.local" @@ -150,11 +128,66 @@ spec: {{ end }} {{ end }} - name: ML_PIPELINE_SERVICE_HOST - value: ds-pipeline-{{.Name}}.{{.Namespace}}.svc.cluster.local + value: "ds-pipeline-{{.Name}}.{{.Namespace}}.svc.cluster.local" - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" + ## Values change based on Engine Driver ## - name: EXECUTIONTYPE + {{ if (eq .EngineDriver "tekton") }} + value: PipelineRun + {{ else }} value: Workflow + {{ end }} + ## Argo-Specific Env Vars ## + - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION + value: "{{.APIServer.AutoUpdatePipelineDefaultVersion}}" + - name: OBJECTSTORECONFIG_SECURE + value: "false" + - name: OBJECTSTORECONFIG_BUCKETNAME + value: "{{.ObjectStorageConnection.Bucket}}" + - name: DB_DRIVER_NAME + value: mysql + - name: DBCONFIG_MYSQLCONFIG_USER + value: "{{.DBConnection.Username}}" + - name: DBCONFIG_MYSQLCONFIG_PASSWORD + valueFrom: + secretKeyRef: + key: "{{.DBConnection.CredentialsSecret.Key}}" + name: "{{.DBConnection.CredentialsSecret.Name}}" + - name: DBCONFIG_MYSQLCONFIG_DBNAME + value: "{{.DBConnection.DBName}}" + - name: DBCONFIG_MYSQLCONFIG_HOST + value: "{{.DBConnection.Host}}" + - name: DBCONFIG_MYSQLCONFIG_PORT + value: "{{.DBConnection.Port}}" + ## Tekton-Specific Env Vars ## + {{ if (eq .EngineDriver "tekton") }} + - name: ARTIFACT_BUCKET + value: "{{.ObjectStorageConnection.Bucket}}" + - name: ARTIFACT_ENDPOINT + value: "{{.ObjectStorageConnection.Endpoint}}" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "{{ .APIServer.ArtifactScriptConfigMap.Key }}" + name: "{{ .APIServer.ArtifactScriptConfigMap.Name }}" + - name: ARTIFACT_IMAGE + value: "{{.APIServer.ArtifactImage}}" + - name: ARCHIVE_LOGS + value: "{{.APIServer.ArchiveLogs}}" + - name: TRACK_ARTIFACTS + value: "{{.APIServer.TrackArtifacts}}" + - name: STRIP_EOF + value: "{{.APIServer.StripEOF}}" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: INJECT_DEFAULT_SCRIPT + value: "{{.APIServer.InjectDefaultScript}}" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "{{.APIServer.ApplyTektonCustomResource}}" + - name: TERMINATE_STATUS + value: "{{.APIServer.TerminateStatus}}" + {{ end }} image: {{.APIServer.Image}} imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/config/internal/persistence-agent/deployment.yaml.tmpl b/config/internal/persistence-agent/deployment.yaml.tmpl index b9e6debde..02d56835b 100644 --- a/config/internal/persistence-agent/deployment.yaml.tmpl +++ b/config/internal/persistence-agent/deployment.yaml.tmpl @@ -35,7 +35,7 @@ spec: - name: KUBEFLOW_USERID_PREFIX value: "" - name: EXECUTIONTYPE - {{ if (eq .EngineDriver "tekton") }} + {{ if eq .EngineDriver "tekton" }} value: PipelineRun {{ else }} value: Workflow @@ -89,14 +89,14 @@ spec: memory: {{.PersistenceAgent.Resources.Limits.Memory}} {{ end }} {{ end }} - {{ if (eq .EngineDriver "argo") }} + {{ if eq .EngineDriver "argo" }} volumeMounts: - mountPath: /var/run/secrets/kubeflow/tokens/persistenceagent-sa-token name: persistenceagent-sa-token subPath: ds-pipeline-persistenceagent-{{.Name}}-token {{ end }} serviceAccountName: ds-pipeline-persistenceagent-{{.Name}} - {{ if (eq .EngineDriver "argo") }} + {{ if eq .EngineDriver "argo" }} volumes: - name: persistenceagent-sa-token projected: diff --git a/controllers/common.go b/controllers/common.go index fa55f2c00..1e9ea6753 100644 --- a/controllers/common.go +++ b/controllers/common.go @@ -21,7 +21,7 @@ import ( var commonTemplatesDir = "common/default" var argoOnlyCommonTemplatesDir = "common/argo" -var tektonOnlyCommonTemplatesDir = "common/argo" +var tektonOnlyCommonTemplatesDir = "common/tekton" const commonCusterRolebindingTemplate = "common/no-owner/clusterrolebinding.yaml.tmpl" diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index f06e1cfec..2c296e0b5 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -36,10 +36,12 @@ import ( ) type DSPAParams struct { +<<<<<<< HEAD Name string Namespace string Owner mf.Owner DSPVersion string + EngineDriver string APIServer *dspa.APIServer APIServerPiplinesCABundleMountPath string PiplinesCABundleMountPath string @@ -435,6 +437,7 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip p.Name = dsp.Name p.Namespace = dsp.Namespace p.DSPVersion = dsp.Spec.DSPVersion + p.EngineDriver = dsp.Spec.EngineDriver p.Owner = dsp p.APIServer = dsp.Spec.APIServer.DeepCopy() p.APIServerDefaultResourceName = apiServerDefaultResourceNamePrefix + dsp.Name From 85d2e0f37ea39bb2488392600483ad71f4f1548d Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 21 Nov 2023 11:29:11 -0500 Subject: [PATCH 56/85] Remove EngineDriver field --- Makefile | 12 --- api/v1alpha1/dspipeline_types.go | 3 - config/base/kustomization.yaml | 77 ---------------- config/base/params.env | 10 --- .../apiserver/default/deployment.yaml.tmpl | 43 +-------- .../persistence-agent/deployment.yaml.tmpl | 10 +-- config/manager/manager.yaml | 22 ----- .../overlays/make-deploy/kustomization.yaml | 2 +- config/v2/cache/clusterrolebinding.yaml | 12 --- config/v2/cache/kustomization.yaml | 4 - config/v2/cache/serviceaccount.yaml | 10 --- config/v2/driver/clusterrole.yaml | 63 ------------- config/v2/driver/clusterrolebinding.yaml | 17 ---- config/v2/driver/deployment.yaml | 69 --------------- config/v2/driver/kustomization.yaml | 8 -- config/v2/driver/role.yaml | 77 ---------------- config/v2/driver/rolebinding.yaml | 17 ---- config/v2/driver/service.yaml | 24 ----- config/v2/driver/serviceaccount.yaml | 10 --- .../clusterrolebinding.clusteraccess.yaml | 16 ---- .../clusterrolebinding.leaderelection.yaml | 16 ---- .../clusterrolebinding.tenantaccess.yaml | 16 ---- .../v2/exithandler/controller/deployment.yaml | 60 ------------- .../exithandler/controller/kustomization.yaml | 10 --- config/v2/exithandler/controller/role.yaml | 37 -------- .../exithandler/controller/rolebinding.yaml | 16 ---- .../controller/serviceaccount.yaml | 10 --- config/v2/exithandler/crd.yaml | 29 ------ config/v2/exithandler/kustomization.yaml | 5 -- .../webhook/clusterrole.clusteraccess.yaml | 88 ------------------- .../clusterrolebinding.clusteraccess.yaml | 16 ---- config/v2/exithandler/webhook/deployment.yaml | 71 --------------- .../webhook/mutatingwebhookconfig.yaml | 19 ---- config/v2/exithandler/webhook/role.yaml | 53 ----------- .../v2/exithandler/webhook/rolebinding.yaml | 16 ---- config/v2/exithandler/webhook/service.yaml | 30 ------- .../exithandler/webhook/serviceaccount.yaml | 10 --- .../webhook/validatingwebhookconfig.yaml | 19 ---- .../controller/clusterrole.clusteraccess.yaml | 66 -------------- .../controller/clusterrole.tenantaccess.yaml | 21 ----- .../clusterrolebinding.clusteraccess.yaml | 16 ---- .../clusterrolebinding.leaderelection.yaml | 16 ---- .../clusterrolebinding.tenantaccess.yaml | 16 ---- config/v2/kfptask/controller/deployment.yaml | 60 ------------- .../v2/kfptask/controller/kustomization.yaml | 10 --- config/v2/kfptask/controller/role.yaml | 38 -------- config/v2/kfptask/controller/rolebinding.yaml | 17 ---- .../v2/kfptask/controller/serviceaccount.yaml | 10 --- config/v2/kfptask/crd.yaml | 29 ------ config/v2/kfptask/kustomization.yaml | 5 -- .../clusterrolebinding.clusteraccess.yaml | 16 ---- .../clusterrolebinding.leaderelection.yaml | 16 ---- config/v2/kfptask/webhook/deployment.yaml | 71 --------------- .../webhook/mutatingwebhookconfig.yaml | 19 ---- config/v2/kfptask/webhook/role.yaml | 53 ----------- config/v2/kfptask/webhook/rolebinding.yaml | 16 ---- config/v2/kfptask/webhook/service.yaml | 30 ------- config/v2/kfptask/webhook/serviceaccount.yaml | 10 --- .../webhook/validatingwebhookconfig.yaml | 19 ---- config/v2/kustomization.yaml | 12 --- .../controller/clusterrole.clusteraccess.yaml | 66 -------------- .../clusterrolebinding.clusteraccess.yaml | 16 ---- .../clusterrolebinding.leaderelection.yaml | 16 ---- .../clusterrolebinding.tenantaccess.yaml | 16 ---- .../pipelineloop/controller/deployment.yaml | 60 ------------- .../controller/kustomization.yaml | 10 --- config/v2/pipelineloop/controller/role.yaml | 36 -------- .../pipelineloop/controller/rolebinding.yaml | 16 ---- .../controller/serviceaccount.yaml | 10 --- config/v2/pipelineloop/crd.yaml | 29 ------ config/v2/pipelineloop/kustomization.yaml | 5 -- .../webhook/clusterrole.clusteraccess.yaml | 88 ------------------- .../clusterrolebinding.clusteraccess.yaml | 16 ---- .../clusterrolebinding.leaderelection.yaml | 16 ---- .../v2/pipelineloop/webhook/deployment.yaml | 71 --------------- .../pipelineloop/webhook/kustomization.yaml | 11 --- .../webhook/mutatingwebhookconfig.yaml | 19 ---- config/v2/pipelineloop/webhook/role.yaml | 52 ----------- .../v2/pipelineloop/webhook/rolebinding.yaml | 16 ---- config/v2/pipelineloop/webhook/service.yaml | 30 ------- .../pipelineloop/webhook/serviceaccount.yaml | 10 --- .../webhook/validatingwebhookconfig.yaml | 19 ---- config/v2/tektoncrds/crd.yaml | 28 ------ config/v2/tektoncrds/kustomization.yaml | 2 - controllers/common_test.go | 1 - controllers/dspipeline_params.go | 11 +-- kfdef/kfdef.yaml | 23 ----- 87 files changed, 13 insertions(+), 2273 deletions(-) delete mode 100644 config/v2/cache/clusterrolebinding.yaml delete mode 100644 config/v2/cache/kustomization.yaml delete mode 100644 config/v2/cache/serviceaccount.yaml delete mode 100644 config/v2/driver/clusterrole.yaml delete mode 100644 config/v2/driver/clusterrolebinding.yaml delete mode 100644 config/v2/driver/deployment.yaml delete mode 100644 config/v2/driver/kustomization.yaml delete mode 100644 config/v2/driver/role.yaml delete mode 100644 config/v2/driver/rolebinding.yaml delete mode 100644 config/v2/driver/service.yaml delete mode 100644 config/v2/driver/serviceaccount.yaml delete mode 100644 config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml delete mode 100644 config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml delete mode 100644 config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml delete mode 100644 config/v2/exithandler/controller/deployment.yaml delete mode 100644 config/v2/exithandler/controller/kustomization.yaml delete mode 100644 config/v2/exithandler/controller/role.yaml delete mode 100644 config/v2/exithandler/controller/rolebinding.yaml delete mode 100644 config/v2/exithandler/controller/serviceaccount.yaml delete mode 100644 config/v2/exithandler/crd.yaml delete mode 100644 config/v2/exithandler/kustomization.yaml delete mode 100644 config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml delete mode 100644 config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml delete mode 100644 config/v2/exithandler/webhook/deployment.yaml delete mode 100644 config/v2/exithandler/webhook/mutatingwebhookconfig.yaml delete mode 100644 config/v2/exithandler/webhook/role.yaml delete mode 100644 config/v2/exithandler/webhook/rolebinding.yaml delete mode 100644 config/v2/exithandler/webhook/service.yaml delete mode 100644 config/v2/exithandler/webhook/serviceaccount.yaml delete mode 100644 config/v2/exithandler/webhook/validatingwebhookconfig.yaml delete mode 100644 config/v2/kfptask/controller/clusterrole.clusteraccess.yaml delete mode 100644 config/v2/kfptask/controller/clusterrole.tenantaccess.yaml delete mode 100644 config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml delete mode 100644 config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml delete mode 100644 config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml delete mode 100644 config/v2/kfptask/controller/deployment.yaml delete mode 100644 config/v2/kfptask/controller/kustomization.yaml delete mode 100644 config/v2/kfptask/controller/role.yaml delete mode 100644 config/v2/kfptask/controller/rolebinding.yaml delete mode 100644 config/v2/kfptask/controller/serviceaccount.yaml delete mode 100644 config/v2/kfptask/crd.yaml delete mode 100644 config/v2/kfptask/kustomization.yaml delete mode 100644 config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml delete mode 100644 config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml delete mode 100644 config/v2/kfptask/webhook/deployment.yaml delete mode 100644 config/v2/kfptask/webhook/mutatingwebhookconfig.yaml delete mode 100644 config/v2/kfptask/webhook/role.yaml delete mode 100644 config/v2/kfptask/webhook/rolebinding.yaml delete mode 100644 config/v2/kfptask/webhook/service.yaml delete mode 100644 config/v2/kfptask/webhook/serviceaccount.yaml delete mode 100644 config/v2/kfptask/webhook/validatingwebhookconfig.yaml delete mode 100644 config/v2/kustomization.yaml delete mode 100644 config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml delete mode 100644 config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml delete mode 100644 config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml delete mode 100644 config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml delete mode 100644 config/v2/pipelineloop/controller/deployment.yaml delete mode 100644 config/v2/pipelineloop/controller/kustomization.yaml delete mode 100644 config/v2/pipelineloop/controller/role.yaml delete mode 100644 config/v2/pipelineloop/controller/rolebinding.yaml delete mode 100644 config/v2/pipelineloop/controller/serviceaccount.yaml delete mode 100644 config/v2/pipelineloop/crd.yaml delete mode 100644 config/v2/pipelineloop/kustomization.yaml delete mode 100644 config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml delete mode 100644 config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml delete mode 100644 config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml delete mode 100644 config/v2/pipelineloop/webhook/deployment.yaml delete mode 100644 config/v2/pipelineloop/webhook/kustomization.yaml delete mode 100644 config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml delete mode 100644 config/v2/pipelineloop/webhook/role.yaml delete mode 100644 config/v2/pipelineloop/webhook/rolebinding.yaml delete mode 100644 config/v2/pipelineloop/webhook/service.yaml delete mode 100644 config/v2/pipelineloop/webhook/serviceaccount.yaml delete mode 100644 config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml delete mode 100644 config/v2/tektoncrds/crd.yaml delete mode 100644 config/v2/tektoncrds/kustomization.yaml diff --git a/Makefile b/Makefile index 4dcc3f02b..b10268b02 100644 --- a/Makefile +++ b/Makefile @@ -163,18 +163,6 @@ undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/confi cd config/overlays/make-deploy && $(KUSTOMIZE) edit set namespace ${OPERATOR_NS} $(KUSTOMIZE) build config/overlays/make-deploy | kubectl delete --ignore-not-found=$(ignore-not-found) -f - -.PHONY: v2deploy -v2deploy: manifests kustomize - cd config/overlays/make-v2deploy \ - && $(KUSTOMIZE) edit set namespace ${V2INFRA_NS} - $(KUSTOMIZE) build config/overlays/make-v2deploy | kubectl apply -f - - -.PHONY: v2undeploy -v2undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. - cd config/overlays/make-v2deploy \ - && $(KUSTOMIZE) edit set namespace ${V2INFRA_NS} - $(KUSTOMIZE) build config/overlays/make-v2deploy | kubectl delete --ignore-not-found=$(ignore-not-found) -f - - .PHONY: argodeploy argodeploy: manifests kustomize cd config/overlays/make-argodeploy \ diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index 9e5cefcba..32d94b287 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -53,9 +53,6 @@ type DSPASpec struct { DSPVersion string `json:"dspVersion,omitempty"` // +kubebuilder:validation:Optional // +kubebuilder:default:="tekton" - EngineDriver string `json:"engineDriver,omitempty"` - // DS Pipelines Argo Workflow Controller Configuration. - // +kubebuilder:default:={deploy: false} *WorkflowController `json:"workflowController,omitempty"` } diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index cdab43931..157c11d90 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -239,82 +239,5 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGESV2_ARGO_WORKFLOWCONTROLLER - - name: IMAGESV2_TEKTON_APISERVER - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_APISERVER - - name: IMAGESV2_TEKTON_ARTIFACT - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_ARTIFACT - - name: IMAGESV2_TEKTON_PERSISTENTAGENT - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_PERSISTENTAGENT - - name: IMAGESV2_TEKTON_SCHEDULEDWORKFLOW - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_SCHEDULEDWORKFLOW - - name: IMAGESV2_TEKTON_CACHE - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_CACHE - - name: IMAGESV2_TEKTON_MOVERESULTSIMAGE - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_MOVERESULTSIMAGE - - name: IMAGESV2_TEKTON_MLMDENVOY - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_MLMDENVOY - - name: IMAGESV2_TEKTON_MLMDGRPC - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_MLMDGRPC - - name: IMAGESV2_TEKTON_MLMDWRITER - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_MLMDWRITER - - name: IMAGESV2_TEKTON_VISUALIZATIONSERVER - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_VISUALIZATIONSERVER - - name: IMAGESV2_TEKTON_WORKFLOWCONTROLLER - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_TEKTON_WORKFLOWCONTROLLER configurations: - params.yaml diff --git a/config/base/params.env b/config/base/params.env index 56d190d3d..856b2e0b3 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -23,16 +23,6 @@ IMAGESV2_ARGO_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.7 IMAGESV2_ARGO_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.7 IMAGESV2_ARGO_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.2 IMAGESV2_ARGO_WORKFLOWCONTROLLER=gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance -IMAGESV2_TEKTON_APISERVER=quay.io/rmartine/apiserver:v2 -IMAGESV2_TEKTON_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:main -IMAGESV2_TEKTON_PERSISTENTAGENT=quay.io/rmartine/persistenceagent-dev:6b8723529 -IMAGESV2_TEKTON_SCHEDULEDWORKFLOW=quay.io/rmartine/swf-dev:6b8723529 -IMAGESV2_TEKTON_MLMDENVOY=gcr.io/ml-pipeline/metadata-envoy:2.0.0-rc.2 -IMAGESV2_TEKTON_MLMDGRPC=gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 -IMAGESV2_TEKTON_MLMDWRITER=gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 -IMAGESV2_TEKTON_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.7 -IMAGESV2_TEKTON_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.7 -IMAGESV2_TEKTON_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 ZAP_LOG_LEVEL=info MAX_CONCURRENT_RECONCILES=10 DSPO_HEALTHCHECK_DATABASE_CONNECTIONTIMEOUT=15s diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index 4e5e498ad..60c5a5ed9 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -37,22 +37,6 @@ spec: value: "{{.DBConnection.Host}}" - name: DBCONFIG_PORT value: "{{.DBConnection.Port}}" -<<<<<<< HEAD - - name: ARTIFACT_BUCKET - value: "{{.ObjectStorageConnection.Bucket}}" - - name: ARTIFACT_ENDPOINT - value: "{{.ObjectStorageConnection.Endpoint}}" - - name: ARTIFACT_SCRIPT - valueFrom: - configMapKeyRef: - key: "{{ .APIServer.ArtifactScriptConfigMap.Key }}" - name: "{{ .APIServer.ArtifactScriptConfigMap.Name }}" - - name: ARTIFACT_IMAGE - value: "{{.APIServer.ArtifactImage}}" - - name: ARCHIVE_LOGS - value: "{{.APIServer.ArchiveLogs}}" - - name: EXECUTIONTYPE - value: PipelineRun {{ if .APIServer.CABundle }} - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_NAME value: "{{.APIServer.CABundle.ConfigMapName}}" @@ -61,41 +45,22 @@ spec: - name: ARTIFACT_COPY_STEP_CABUNDLE_MOUNTPATH value: {{ .PiplinesCABundleMountPath }} {{ end }} - - name: TRACK_ARTIFACTS - value: "{{.APIServer.TrackArtifacts}}" - - name: STRIP_EOF - value: "{{.APIServer.StripEOF}}" - - name: PIPELINE_RUNTIME - value: "tekton" - - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT - value: "pipeline-runner-{{.Name}}" - - name: INJECT_DEFAULT_SCRIPT - value: "{{.APIServer.InjectDefaultScript}}" - - name: APPLY_TEKTON_CUSTOM_RESOURCE - value: "{{.APIServer.ApplyTektonCustomResource}}" - - name: TERMINATE_STATUS - value: "{{.APIServer.TerminateStatus}}" - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION value: "{{.APIServer.AutoUpdatePipelineDefaultVersion}}" -======= ->>>>>>> 22b6380 (Engine-Agnostic Manifests: APIServer) - name: DBCONFIG_CONMAXLIFETIMESEC value: "{{.APIServer.DBConfigConMaxLifetimeSec}}" - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST value: "ds-pipeline-visualizationserver" - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT value: "8888" -<<<<<<< HEAD - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY value: "{{.ObjectStorageConnection.CredentialsSecret.AccessKey}}" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY value: "{{.ObjectStorageConnection.CredentialsSecret.SecretKey}}" -======= - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT value: "pipeline-runner-{{.Name}}" ->>>>>>> 22b6380 (Engine-Agnostic Manifests: APIServer) - name: OBJECTSTORECONFIG_BUCKETNAME value: "{{.ObjectStorageConnection.Bucket}}" - name: OBJECTSTORECONFIG_ACCESSKEY @@ -133,10 +98,10 @@ spec: value: "8887" ## Values change based on Engine Driver ## - name: EXECUTIONTYPE - {{ if (eq .EngineDriver "tekton") }} - value: PipelineRun - {{ else }} + {{ if (eq .DSPVersion "v2") }} value: Workflow + {{ else }} + value: PipelineRun {{ end }} ## Argo-Specific Env Vars ## - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION @@ -161,7 +126,7 @@ spec: - name: DBCONFIG_MYSQLCONFIG_PORT value: "{{.DBConnection.Port}}" ## Tekton-Specific Env Vars ## - {{ if (eq .EngineDriver "tekton") }} + {{ if (eq .DSPVersion "v1") }} - name: ARTIFACT_BUCKET value: "{{.ObjectStorageConnection.Bucket}}" - name: ARTIFACT_ENDPOINT diff --git a/config/internal/persistence-agent/deployment.yaml.tmpl b/config/internal/persistence-agent/deployment.yaml.tmpl index 02d56835b..b2ca5f507 100644 --- a/config/internal/persistence-agent/deployment.yaml.tmpl +++ b/config/internal/persistence-agent/deployment.yaml.tmpl @@ -35,10 +35,10 @@ spec: - name: KUBEFLOW_USERID_PREFIX value: "" - name: EXECUTIONTYPE - {{ if eq .EngineDriver "tekton" }} - value: PipelineRun - {{ else }} + {{ if eq .DSPVersion "v2" }} value: Workflow + {{ else }} + value: PipelineRun {{ end }} image: "{{.PersistenceAgent.Image}}" imagePullPolicy: IfNotPresent @@ -89,14 +89,14 @@ spec: memory: {{.PersistenceAgent.Resources.Limits.Memory}} {{ end }} {{ end }} - {{ if eq .EngineDriver "argo" }} + {{ if eq .DSPVersion "v2" }} volumeMounts: - mountPath: /var/run/secrets/kubeflow/tokens/persistenceagent-sa-token name: persistenceagent-sa-token subPath: ds-pipeline-persistenceagent-{{.Name}}-token {{ end }} serviceAccountName: ds-pipeline-persistenceagent-{{.Name}} - {{ if eq .EngineDriver "argo" }} + {{ if eq .DSPVersion "v2" }} volumes: - name: persistenceagent-sa-token projected: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 7e2dcca65..cb0896126 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -84,28 +84,6 @@ spec: value: $(IMAGESV2_ARGO_VISUALIZATIONSERVER) - name: IMAGESV2_ARGO_WORKFLOWCONTROLLER value: $(IMAGESV2_ARGO_WORKFLOWCONTROLLER) - - name: IMAGESV2_TEKTON_APISERVER - value: $(IMAGESV2_TEKTON_APISERVER) - - name: IMAGESV2_TEKTON_ARTIFACT - value: $(IMAGESV2_TEKTON_ARTIFACT) - - name: IMAGESV2_TEKTON_PERSISTENTAGENT - value: $(IMAGESV2_TEKTON_PERSISTENTAGENT) - - name: IMAGESV2_TEKTON_SCHEDULEDWORKFLOW - value: $(IMAGESV2_TEKTON_SCHEDULEDWORKFLOW) - - name: IMAGESV2_TEKTON_CACHE - value: $(IMAGESV2_TEKTON_CACHE) - - name: IMAGESV2_TEKTON_MOVERESULTSIMAGE - value: $(IMAGESV2_TEKTON_MOVERESULTSIMAGE) - - name: IMAGESV2_TEKTON_MLMDENVOY - value: $(IMAGESV2_TEKTON_MLMDENVOY) - - name: IMAGESV2_TEKTON_MLMDGRPC - value: $(IMAGESV2_TEKTON_MLMDGRPC) - - name: IMAGESV2_TEKTON_MLMDWRITER - value: $(IMAGESV2_TEKTON_MLMDWRITER) - - name: IMAGESV2_TEKTON_VISUALIZATIONSERVER - value: $(IMAGESV2_TEKTON_VISUALIZATIONSERVER) - - name: IMAGESV2_TEKTON_WORKFLOWCONTROLLER - value: $(IMAGESV2_TEKTON_WORKFLOWCONTROLLER) - name: ZAP_LOG_LEVEL value: $(ZAP_LOG_LEVEL) - name: MAX_CONCURRENT_RECONCILES diff --git a/config/overlays/make-deploy/kustomization.yaml b/config/overlays/make-deploy/kustomization.yaml index 7814f52a5..1ec5fdeb4 100644 --- a/config/overlays/make-deploy/kustomization.yaml +++ b/config/overlays/make-deploy/kustomization.yaml @@ -8,4 +8,4 @@ patchesStrategicMerge: images: - name: controller newName: quay.io/opendatahub/data-science-pipelines-operator - newTag: main + newTag: pr-479 diff --git a/config/v2/cache/clusterrolebinding.yaml b/config/v2/cache/clusterrolebinding.yaml deleted file mode 100644 index 8c1e030fd..000000000 --- a/config/v2/cache/clusterrolebinding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: cache-deployer-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cache-deployer-clusterrole -subjects: -- kind: ServiceAccount - name: cache-deployer-sa - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/cache/kustomization.yaml b/config/v2/cache/kustomization.yaml deleted file mode 100644 index 51229db72..000000000 --- a/config/v2/cache/kustomization.yaml +++ /dev/null @@ -1,4 +0,0 @@ -resources: -- clusterrole.yaml -- clusterrolebinding.yaml -- serviceaccount.yaml diff --git a/config/v2/cache/serviceaccount.yaml b/config/v2/cache/serviceaccount.yaml deleted file mode 100644 index f1702bc71..000000000 --- a/config/v2/cache/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - namespace: datasciencepipelinesapplications-controller - name: cache-deployer-sa diff --git a/config/v2/driver/clusterrole.yaml b/config/v2/driver/clusterrole.yaml deleted file mode 100644 index 3468389c2..000000000 --- a/config/v2/driver/clusterrole.yaml +++ /dev/null @@ -1,63 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: kfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kubeflow-pipeline - name: driver-cluster-access-clusterrole -rules: -- apiGroups: - - tekton.dev - resources: - - runs - - customruns - - runs/finalizers - - customruns/finalizers - - runs/status - - customruns/status - - pipelineruns - - task - - taskruns - - conditions - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - watch - - list diff --git a/config/v2/driver/clusterrolebinding.yaml b/config/v2/driver/clusterrolebinding.yaml deleted file mode 100644 index c2bf01c40..000000000 --- a/config/v2/driver/clusterrolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: kfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kubeflow-pipeline - name: driver-cluster-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: driver-cluster-access-clusterrole -subjects: -- kind: ServiceAccount - name: driver - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/driver/deployment.yaml b/config/v2/driver/deployment.yaml deleted file mode 100644 index 2ea4f2bf9..000000000 --- a/config/v2/driver/deployment.yaml +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: ckfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/name: driver - app.kubernetes.io/part-of: kubeflow-pipeline - app.kubernetes.io/version: devel - name: driver -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/component: kfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/name: driver - app.kubernetes.io/part-of: kubeflow-pipeline - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "false" - labels: - app: kfp-driver - app.kubernetes.io/component: kfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/name: driver - app.kubernetes.io/part-of: kubeflow-pipeline - app.kubernetes.io/version: devel - spec: - containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LEADERELECTION_NAME - value: config-leader-election - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: tekton.dev/pipeline - - name: ML_PIPELINE_SERVICE_HOST - value: ds-pipeline-sample.data-science-pipelines-application-v2.svc.cluster.local - - name: ML_PIPELINE_SERVICE_PORT_GRPC - value: '8887' - - name: MINIO_SERVICE_SERVICE_HOST - value: minio-sample.data-science-pipelines-application-v2.svc.cluster.local - - name: MINIO_SERVICE_SERVICE_PORT - value: '9000' - - name: METADATA_GRPC_SERVICE_SERVICE_HOST - value: ds-pipeline-metadata-grpc-sample.data-science-pipelines-application-v2.svc.cluster.local - - name: METADATA_GRPC_SERVICE_SERVICE_PORT - value: '8080' - image: quay.io/internaldatahub/tekton-driver:2.0.0 - imagePullPolicy: Always - name: driver - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsGroup: 65532 - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: driver diff --git a/config/v2/driver/kustomization.yaml b/config/v2/driver/kustomization.yaml deleted file mode 100644 index aa8114fa8..000000000 --- a/config/v2/driver/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -resources: -- clusterrole.yaml -- clusterrolebinding.yaml -- deployment.yaml -- role.yaml -- rolebinding.yaml -- service.yaml -- serviceaccount.yaml diff --git a/config/v2/driver/role.yaml b/config/v2/driver/role.yaml deleted file mode 100644 index dc1be8689..000000000 --- a/config/v2/driver/role.yaml +++ /dev/null @@ -1,77 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: kfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kubeflow-pipeline - namespace: datasciencepipelinesapplications-controller - name: driver-role -rules: -- apiGroups: - - "" - resources: - - secrets - verbs: - - get -- apiGroups: - - "" - resources: - - persistentvolumes - - persistentvolumeclaims - verbs: - - '*' -- apiGroups: - - snapshot.storage.k8s.io - resources: - - volumesnapshots - verbs: - - create - - delete - - get -- apiGroups: - - "" - resources: - - pods - - pods/exec - - pods/log - - services - verbs: - - '*' -- apiGroups: - - "" - - apps - - extensions - resources: - - deployments - - replicasets - verbs: - - '*' -- apiGroups: - - kubeflow.org - resources: - - '*' - verbs: - - '*' -- apiGroups: - - batch - resources: - - jobs - verbs: - - '*' -- apiGroups: - - machinelearning.seldon.io - resources: - - seldondeployments - verbs: - - '*' -- apiGroups: - - policy - resourceNames: - - tekton-pipelines - - openshift-pipelines - resources: - - podsecuritypolicies - verbs: - - use diff --git a/config/v2/driver/rolebinding.yaml b/config/v2/driver/rolebinding.yaml deleted file mode 100644 index d882b3713..000000000 --- a/config/v2/driver/rolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: kfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kubeflow-pipeline - name: driver-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: driver-role -subjects: -- kind: ServiceAccount - name: driver - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/driver/service.yaml b/config/v2/driver/service.yaml deleted file mode 100644 index f57b423c3..000000000 --- a/config/v2/driver/service.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: kfp-driver - app.kubernetes.io/component: kfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/name: driver - app.kubernetes.io/part-of: kubeflow-pipeline - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: driver -spec: - ports: - - name: http-metrics - port: 9090 - protocol: TCP - targetPort: 9090 - selector: - app.kubernetes.io/component: kfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/name: driver - app.kubernetes.io/part-of: kubeflow-pipeline diff --git a/config/v2/driver/serviceaccount.yaml b/config/v2/driver/serviceaccount.yaml deleted file mode 100644 index 87d53f272..000000000 --- a/config/v2/driver/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: kfp-driver - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kubeflow-pipeline - namespace: datasciencepipelinesapplications-controller - name: driver diff --git a/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml deleted file mode 100644 index 047393dcc..000000000 --- a/config/v2/exithandler/controller/clusterrolebinding.clusteraccess.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: exithandler-controller-cluster-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: exithandler-controller-cluster-access-clusterrole -subjects: -- kind: ServiceAccount - name: exithandler-controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml b/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml deleted file mode 100644 index 98320d612..000000000 --- a/config/v2/exithandler/controller/clusterrolebinding.leaderelection.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: exithandler-controller-leaderelection-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: exithandler-leader-election-clusterrole -subjects: -- kind: ServiceAccount - name: exithandler-controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml deleted file mode 100644 index 7682ba12b..000000000 --- a/config/v2/exithandler/controller/clusterrolebinding.tenantaccess.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: exithandler-controller-tenant-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: exithandler-controller-tenant-access-clusterrole -subjects: -- kind: ServiceAccount - name: exithandler-controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/deployment.yaml b/config/v2/exithandler/controller/deployment.yaml deleted file mode 100644 index 843c478fa..000000000 --- a/config/v2/exithandler/controller/deployment.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/name: controller - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: exithandler-controller -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/name: controller - app.kubernetes.io/part-of: kfp-tekton - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "false" - labels: - app: kfp-exithandler-controller - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/name: controller - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - spec: - containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LEADERELECTION_NAME - value: config-leader-election - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: tekton.dev/pipeline - image: quay.io/internaldatahub/tekton-exithandler-controller:2.0.0 - name: exithandler-controller - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsGroup: 65532 - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: exithandler-controller diff --git a/config/v2/exithandler/controller/kustomization.yaml b/config/v2/exithandler/controller/kustomization.yaml deleted file mode 100644 index 54449bfd8..000000000 --- a/config/v2/exithandler/controller/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -resources: -- clusterrole.clusteraccess.yaml -- clusterrole.tenantaccess.yaml -- clusterrolebinding.clusteraccess.yaml -- clusterrolebinding.leaderelection.yaml -- clusterrolebinding.tenantaccess.yaml -- deployment.yaml -- role.yaml -- rolebinding.yaml -- serviceaccount.yaml diff --git a/config/v2/exithandler/controller/role.yaml b/config/v2/exithandler/controller/role.yaml deleted file mode 100644 index adb1ebed5..000000000 --- a/config/v2/exithandler/controller/role.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: exithandler-controller-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - config-leader-election - - config-logging - - config-observability - - object-store-config - - cache-config - resources: - - configmaps - verbs: - - get -- apiGroups: - - policy - resourceNames: - - tekton-pipelines - - openshift-pipelines - resources: - - podsecuritypolicies - verbs: - - use diff --git a/config/v2/exithandler/controller/rolebinding.yaml b/config/v2/exithandler/controller/rolebinding.yaml deleted file mode 100644 index af8c564db..000000000 --- a/config/v2/exithandler/controller/rolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: exithandler-controller-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: exithandler-controller-role -subjects: -- kind: ServiceAccount - name: exithandler-controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/controller/serviceaccount.yaml b/config/v2/exithandler/controller/serviceaccount.yaml deleted file mode 100644 index 69823ccd6..000000000 --- a/config/v2/exithandler/controller/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - namespace: datasciencepipelinesapplications-controller - name: exithandler-controller diff --git a/config/v2/exithandler/crd.yaml b/config/v2/exithandler/crd.yaml deleted file mode 100644 index da184975b..000000000 --- a/config/v2/exithandler/crd.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - pipeline.tekton.dev/release: devel - version: devel - name: exithandlers.custom.tekton.dev -spec: - group: custom.tekton.dev - names: - categories: - - tekton - - tekton-pipelines - - openshift-pipelines - kind: ExitHandler - plural: exithandlers - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - type: object - x-kubernetes-preserve-unknown-fields: true - served: true - storage: true - subresources: - status: {} diff --git a/config/v2/exithandler/kustomization.yaml b/config/v2/exithandler/kustomization.yaml deleted file mode 100644 index 4c7cb015a..000000000 --- a/config/v2/exithandler/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -resources: -- crd.yaml -- clusterrole.leaderelection.yaml -- ./controller -- ./webhook diff --git a/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml b/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml deleted file mode 100644 index b773d1ca2..000000000 --- a/config/v2/exithandler/webhook/clusterrole.clusteraccess.yaml +++ /dev/null @@ -1,88 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: exithandler-webhook-cluster-access-clusterrole -rules: -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - - customresourcedefinitions/status - verbs: - - get - - list - - update - - patch - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - update - - patch - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - list - - watch -- apiGroups: - - admissionregistration.k8s.io - resourceNames: - - webhook.exithandler.custom.tekton.dev - resources: - - mutatingwebhookconfigurations - verbs: - - get - - update - - delete -- apiGroups: - - apps - resources: - - deployments - - deployments/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - "" - resources: - - namespaces/finalizers - resourceNames: - - openshift-pipelines - verbs: - - update -- apiGroups: - - admissionregistration.k8s.io - resourceNames: - - validation.webhook.exithandler.custom.tekton.dev - resources: - - validatingwebhookconfigurations - verbs: - - get - - update - - delete -- apiGroups: - - policy - resourceNames: - - tekton-pipelines - - openshift-pipelines - resources: - - podsecuritypolicies - verbs: - - use diff --git a/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml deleted file mode 100644 index 2df37eade..000000000 --- a/config/v2/exithandler/webhook/clusterrolebinding.clusteraccess.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: exithandler-webhook-cluster-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: exithandler-webhook-cluster-access-clusterrole -subjects: -- kind: ServiceAccount - name: exithandler-webhook - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/webhook/deployment.yaml b/config/v2/exithandler/webhook/deployment.yaml deleted file mode 100644 index 35ddfad35..000000000 --- a/config/v2/exithandler/webhook/deployment.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: exithandler-webhook -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "false" - labels: - app: tekton-pipelines-webhook - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - spec: - containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: CONFIG_LEADERELECTION_NAME - value: config-leader-election - - name: WEBHOOK_SERVICE_NAME - value: kfp-exithandler-webhook - - name: WEBHOOK_SECRET_NAME - value: kfp-exithandler-webhook-certs - - name: METRICS_DOMAIN - value: tekton.dev/pipeline - image: quay.io/internaldatahub/tekton-exithandler-webhook:2.0.0 - name: webhook - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - - containerPort: 8443 - name: https-webhook - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsGroup: 65532 - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: exithandler-webhook diff --git a/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml b/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml deleted file mode 100644 index 655ab1c78..000000000 --- a/config/v2/exithandler/webhook/mutatingwebhookconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - pipeline.tekton.dev/release: devel - name: webhook.exithandler.custom.tekton.dev -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: exithandler-webhook - namespace: datasciencepipelinesapplications-controller - failurePolicy: Fail - name: webhook.exithandler.custom.tekton.dev - sideEffects: None diff --git a/config/v2/exithandler/webhook/role.yaml b/config/v2/exithandler/webhook/role.yaml deleted file mode 100644 index d8fbb1139..000000000 --- a/config/v2/exithandler/webhook/role.yaml +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: exithandler-webhook-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - config-logging - - config-observability - - config-leader-election - - object-store-config - - cache-config - resources: - - configmaps - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - kfp-exithandler-webhook-certs - resources: - - secrets - verbs: - - get - - update -- apiGroups: - - policy - resourceNames: - - tekton-pipelines - - openshift-pipelines - resources: - - podsecuritypolicies - verbs: - - use diff --git a/config/v2/exithandler/webhook/rolebinding.yaml b/config/v2/exithandler/webhook/rolebinding.yaml deleted file mode 100644 index e7baa905d..000000000 --- a/config/v2/exithandler/webhook/rolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: exithandler-webhook-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: exithandler-webhook-role -subjects: -- kind: ServiceAccount - name: exithandler-webhook - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/exithandler/webhook/service.yaml b/config/v2/exithandler/webhook/service.yaml deleted file mode 100644 index ad2e06a1d..000000000 --- a/config/v2/exithandler/webhook/service.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: tekton-pipelines-webhook - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: exithandler-webhook - namespace: datasciencepipelinesapplications-controller -spec: - ports: - - name: http-metrics - port: 9090 - targetPort: 9090 - - name: http-profiling - port: 8008 - targetPort: 8008 - - name: https-webhook - port: 443 - targetPort: 8443 - selector: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton diff --git a/config/v2/exithandler/webhook/serviceaccount.yaml b/config/v2/exithandler/webhook/serviceaccount.yaml deleted file mode 100644 index e4ae47bf0..000000000 --- a/config/v2/exithandler/webhook/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - namespace: datasciencepipelinesapplications-controller - name: exithandler-webhook diff --git a/config/v2/exithandler/webhook/validatingwebhookconfig.yaml b/config/v2/exithandler/webhook/validatingwebhookconfig.yaml deleted file mode 100644 index f18912178..000000000 --- a/config/v2/exithandler/webhook/validatingwebhookconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - pipeline.tekton.dev/release: devel - name: validation.webhook.exithandler.custom.tekton.dev -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: exithandler-webhook - namespace: datasciencepipelinesapplications-controller - failurePolicy: Fail - name: validation.webhook.exithandler.custom.tekton.dev - sideEffects: None diff --git a/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml b/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml deleted file mode 100644 index d95fd6141..000000000 --- a/config/v2/kfptask/controller/clusterrole.clusteraccess.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: controller-cluster-access-clusterrole -rules: -- apiGroups: - - tekton.dev - resources: - - runs - - customruns - - taskruns - - pipelineruns - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - tekton.dev - resources: - - runs/status - - customruns/status - - taskruns/status - - pipelineruns/status - - runs/finalizers - - customruns/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - custom.tekton.dev - resources: - - kfptasks - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - apps - resources: - - deployments - - deployments/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch diff --git a/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml b/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml deleted file mode 100644 index 789553259..000000000 --- a/config/v2/kfptask/controller/clusterrole.tenantaccess.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: controller-tenant-access-clusterrole -rules: -- apiGroups: - - "" - resources: - - events - verbs: - - get - - list - - create - - update - - delete - - patch - - watch diff --git a/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml deleted file mode 100644 index 660c52cc5..000000000 --- a/config/v2/kfptask/controller/clusterrolebinding.clusteraccess.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: controller-cluster-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: controller-cluster-access-clusterrole -subjects: -- kind: ServiceAccount - name: controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml b/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml deleted file mode 100644 index d6449e36d..000000000 --- a/config/v2/kfptask/controller/clusterrolebinding.leaderelection.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: controller-leaderelection-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: leader-election-clusterrole -subjects: -- kind: ServiceAccount - name: controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml deleted file mode 100644 index 4827d6ebf..000000000 --- a/config/v2/kfptask/controller/clusterrolebinding.tenantaccess.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: controller-tenant-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: controller-tenant-access-clusterrole -subjects: -- kind: ServiceAccount - name: controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/deployment.yaml b/config/v2/kfptask/controller/deployment.yaml deleted file mode 100644 index 57576974b..000000000 --- a/config/v2/kfptask/controller/deployment.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: controller -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/name: controller - app.kubernetes.io/part-of: kfp-tekton - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "false" - labels: - app: kfptask-controller - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/name: controller - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - spec: - containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LEADERELECTION_NAME - value: config-leader-election - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: tekton.dev/pipeline - image: quay.io/internaldatahub/tekton-kfptask-controller:2.0.0 - name: controller - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsGroup: 65532 - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: controller diff --git a/config/v2/kfptask/controller/kustomization.yaml b/config/v2/kfptask/controller/kustomization.yaml deleted file mode 100644 index 54449bfd8..000000000 --- a/config/v2/kfptask/controller/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -resources: -- clusterrole.clusteraccess.yaml -- clusterrole.tenantaccess.yaml -- clusterrolebinding.clusteraccess.yaml -- clusterrolebinding.leaderelection.yaml -- clusterrolebinding.tenantaccess.yaml -- deployment.yaml -- role.yaml -- rolebinding.yaml -- serviceaccount.yaml diff --git a/config/v2/kfptask/controller/role.yaml b/config/v2/kfptask/controller/role.yaml deleted file mode 100644 index 275f63232..000000000 --- a/config/v2/kfptask/controller/role.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: controller-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - config-leader-election - - config-logging - - config-observability - - object-store-config - - cache-config - resources: - - configmaps - verbs: - - get -- apiGroups: - - policy - resourceNames: - - tekton-pipelines - - openshift-pipelines - resources: - - podsecuritypolicies - verbs: - - use diff --git a/config/v2/kfptask/controller/rolebinding.yaml b/config/v2/kfptask/controller/rolebinding.yaml deleted file mode 100644 index 569eb7f83..000000000 --- a/config/v2/kfptask/controller/rolebinding.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: controller-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: controller-role -subjects: -- kind: ServiceAccount - name: controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/controller/serviceaccount.yaml b/config/v2/kfptask/controller/serviceaccount.yaml deleted file mode 100644 index 99a950cb2..000000000 --- a/config/v2/kfptask/controller/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - namespace: datasciencepipelinesapplications-controller - name: controller diff --git a/config/v2/kfptask/crd.yaml b/config/v2/kfptask/crd.yaml deleted file mode 100644 index a3ec9de60..000000000 --- a/config/v2/kfptask/crd.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - pipeline.tekton.dev/release: devel - version: devel - name: kfptasks.custom.tekton.dev -spec: - group: custom.tekton.dev - names: - categories: - - tekton - - tekton-pipelines - - openshift-pipelines - kind: KfpTask - plural: kfptasks - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - type: object - x-kubernetes-preserve-unknown-fields: true - served: true - storage: true - subresources: - status: {} diff --git a/config/v2/kfptask/kustomization.yaml b/config/v2/kfptask/kustomization.yaml deleted file mode 100644 index 4c7cb015a..000000000 --- a/config/v2/kfptask/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -resources: -- crd.yaml -- clusterrole.leaderelection.yaml -- ./controller -- ./webhook diff --git a/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml deleted file mode 100644 index 58b4bb31d..000000000 --- a/config/v2/kfptask/webhook/clusterrolebinding.clusteraccess.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: webhook-cluster-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: webhook-cluster-access-clusterrole -subjects: -- kind: ServiceAccount - name: webhook - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml b/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml deleted file mode 100644 index acd0b5c3e..000000000 --- a/config/v2/kfptask/webhook/clusterrolebinding.leaderelection.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: webhook-leaderelection-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: leader-election-clusterrole -subjects: -- kind: ServiceAccount - name: webhook - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/deployment.yaml b/config/v2/kfptask/webhook/deployment.yaml deleted file mode 100644 index 2f15a509a..000000000 --- a/config/v2/kfptask/webhook/deployment.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: webhook -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "false" - labels: - app: tekton-pipelines-webhook - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - spec: - containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: CONFIG_LEADERELECTION_NAME - value: config-leader-election - - name: WEBHOOK_SERVICE_NAME - value: kfptask-webhook - - name: WEBHOOK_SECRET_NAME - value: kfptask-webhook-certs - - name: METRICS_DOMAIN - value: tekton.dev/pipeline - image: quay.io/internaldatahub/tekton-kfptask-webhook:2.0.0 - name: webhook - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - - containerPort: 8443 - name: https-webhook - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsGroup: 65532 - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: webhook diff --git a/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml b/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml deleted file mode 100644 index b1ff8fec0..000000000 --- a/config/v2/kfptask/webhook/mutatingwebhookconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - pipeline.tekton.dev/release: devel - name: webhook.kfptask.custom.tekton.dev -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook - namespace: datasciencepipelinesapplications-controller - failurePolicy: Fail - name: webhook.kfptask.custom.tekton.dev - sideEffects: None diff --git a/config/v2/kfptask/webhook/role.yaml b/config/v2/kfptask/webhook/role.yaml deleted file mode 100644 index d179a5cfb..000000000 --- a/config/v2/kfptask/webhook/role.yaml +++ /dev/null @@ -1,53 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: webhook-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - config-logging - - config-observability - - config-leader-election - - object-store-config - - cache-config - resources: - - configmaps - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - kfptask-webhook-certs - resources: - - secrets - verbs: - - get - - update -- apiGroups: - - policy - resourceNames: - - tekton-pipelines - - openshift-pipelines - resources: - - podsecuritypolicies - verbs: - - use diff --git a/config/v2/kfptask/webhook/rolebinding.yaml b/config/v2/kfptask/webhook/rolebinding.yaml deleted file mode 100644 index f62b1cdfa..000000000 --- a/config/v2/kfptask/webhook/rolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - name: webhook-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: webhook-role -subjects: -- kind: ServiceAccount - name: webhook - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/kfptask/webhook/service.yaml b/config/v2/kfptask/webhook/service.yaml deleted file mode 100644 index 40434576f..000000000 --- a/config/v2/kfptask/webhook/service.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: tekton-pipelines-webhook - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: webhook - namespace: datasciencepipelinesapplications-controller -spec: - ports: - - name: http-metrics - port: 9090 - targetPort: 9090 - - name: http-profiling - port: 8008 - targetPort: 8008 - - name: https-webhook - port: 443 - targetPort: 8443 - selector: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: kfp-tekton diff --git a/config/v2/kfptask/webhook/serviceaccount.yaml b/config/v2/kfptask/webhook/serviceaccount.yaml deleted file mode 100644 index bf030f189..000000000 --- a/config/v2/kfptask/webhook/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/name: data-science-pipelines-operator - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - namespace: datasciencepipelinesapplications-controller - name: webhook diff --git a/config/v2/kfptask/webhook/validatingwebhookconfig.yaml b/config/v2/kfptask/webhook/validatingwebhookconfig.yaml deleted file mode 100644 index 3f72469a2..000000000 --- a/config/v2/kfptask/webhook/validatingwebhookconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: kfp-tekton - pipeline.tekton.dev/release: devel - name: validation.webhook.kfptask.custom.tekton.dev -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: webhook - namespace: datasciencepipelinesapplications-controller - failurePolicy: Fail - name: validation.webhook.kfptask.custom.tekton.dev - sideEffects: None diff --git a/config/v2/kustomization.yaml b/config/v2/kustomization.yaml deleted file mode 100644 index 8b3c8c0a7..000000000 --- a/config/v2/kustomization.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization -namespace: openshift-pipelines -namePrefix: data-science-pipelines-operator- - -resources: -- ./cache -- ./driver -- ./exithandler -- ./kfptask -- ./pipelineloop -- ./tektoncrds diff --git a/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml b/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml deleted file mode 100644 index ea5988dd0..000000000 --- a/config/v2/pipelineloop/controller/clusterrole.clusteraccess.yaml +++ /dev/null @@ -1,66 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-controller-cluster-access-clusterrole -rules: -- apiGroups: - - tekton.dev - resources: - - runs - - customruns - - taskruns - - pipelineruns - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - tekton.dev - resources: - - runs/status - - customruns/status - - taskruns/status - - pipelineruns/status - - runs/finalizers - - customruns/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - custom.tekton.dev - resources: - - pipelineloops - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - apps - resources: - - deployments - - deployments/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml deleted file mode 100644 index adf99c4bf..000000000 --- a/config/v2/pipelineloop/controller/clusterrolebinding.clusteraccess.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-controller-cluster-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pipelineloop-controller-cluster-access-clusterrole -subjects: -- kind: ServiceAccount - name: pipelineloop-controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml deleted file mode 100644 index 2b1189a48..000000000 --- a/config/v2/pipelineloop/controller/clusterrolebinding.leaderelection.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-controller-leaderelection-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pipelineloop-leader-election-clusterrole -subjects: -- kind: ServiceAccount - name: pipelineloop-controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml b/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml deleted file mode 100644 index 1b395017e..000000000 --- a/config/v2/pipelineloop/controller/clusterrolebinding.tenantaccess.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-controller-tenant-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pipelineloop-controller-tenant-access-clusterrole -subjects: -- kind: ServiceAccount - name: pipelineloop-controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/deployment.yaml b/config/v2/pipelineloop/controller/deployment.yaml deleted file mode 100644 index 1263b8ac2..000000000 --- a/config/v2/pipelineloop/controller/deployment.yaml +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/name: controller - app.kubernetes.io/part-of: tekton-pipeline-loops - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: pipelineloop-controller -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/name: controller - app.kubernetes.io/part-of: tekton-pipeline-loops - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "false" - labels: - app: tektonpipelineloop-controller - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/name: controller - app.kubernetes.io/part-of: tekton-pipeline-loops - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - spec: - containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LEADERELECTION_NAME - value: config-leader-election - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: METRICS_DOMAIN - value: tekton.dev/pipeline - image: quay.io/internaldatahub/tekton-pipelineloop-controller:2.0.0 - name: pipelineloop-controller - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsGroup: 65532 - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: pipelineloop-controller diff --git a/config/v2/pipelineloop/controller/kustomization.yaml b/config/v2/pipelineloop/controller/kustomization.yaml deleted file mode 100644 index 54449bfd8..000000000 --- a/config/v2/pipelineloop/controller/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -resources: -- clusterrole.clusteraccess.yaml -- clusterrole.tenantaccess.yaml -- clusterrolebinding.clusteraccess.yaml -- clusterrolebinding.leaderelection.yaml -- clusterrolebinding.tenantaccess.yaml -- deployment.yaml -- role.yaml -- rolebinding.yaml -- serviceaccount.yaml diff --git a/config/v2/pipelineloop/controller/role.yaml b/config/v2/pipelineloop/controller/role.yaml deleted file mode 100644 index 6b7818e29..000000000 --- a/config/v2/pipelineloop/controller/role.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-controller-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - config-leader-election - - config-logging - - config-observability - - object-store-config - resources: - - configmaps - verbs: - - get -- apiGroups: - - policy - resourceNames: - - tekton-pipelines - - openshift-pipelines - resources: - - podsecuritypolicies - verbs: - - use diff --git a/config/v2/pipelineloop/controller/rolebinding.yaml b/config/v2/pipelineloop/controller/rolebinding.yaml deleted file mode 100644 index 24feb66bf..000000000 --- a/config/v2/pipelineloop/controller/rolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-controller-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pipelineloop-controller-role -subjects: -- kind: ServiceAccount - name: pipelineloop-controller - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/controller/serviceaccount.yaml b/config/v2/pipelineloop/controller/serviceaccount.yaml deleted file mode 100644 index 7e09fdc8d..000000000 --- a/config/v2/pipelineloop/controller/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: controller - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - app.kubernetes.io/name: data-science-pipelines-operator - namespace: datasciencepipelinesapplications-controller - name: pipelineloop-controller diff --git a/config/v2/pipelineloop/crd.yaml b/config/v2/pipelineloop/crd.yaml deleted file mode 100644 index 860c6a7b7..000000000 --- a/config/v2/pipelineloop/crd.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - pipeline.tekton.dev/release: devel - version: devel - name: pipelineloops.custom.tekton.dev -spec: - group: custom.tekton.dev - names: - categories: - - tekton - - tekton-pipelines - - openshift-pipelines - kind: PipelineLoop - plural: pipelineloops - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - type: object - x-kubernetes-preserve-unknown-fields: true - served: true - storage: true - subresources: - status: {} diff --git a/config/v2/pipelineloop/kustomization.yaml b/config/v2/pipelineloop/kustomization.yaml deleted file mode 100644 index 4c7cb015a..000000000 --- a/config/v2/pipelineloop/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -resources: -- crd.yaml -- clusterrole.leaderelection.yaml -- ./controller -- ./webhook diff --git a/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml b/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml deleted file mode 100644 index 2af9cd61d..000000000 --- a/config/v2/pipelineloop/webhook/clusterrole.clusteraccess.yaml +++ /dev/null @@ -1,88 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-webhook-cluster-access-clusterrole -rules: -- apiGroups: - - apiextensions.k8s.io - resources: - - customresourcedefinitions - - customresourcedefinitions/status - verbs: - - get - - list - - update - - patch - - watch -- apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - update - - patch - - watch -- apiGroups: - - admissionregistration.k8s.io - resources: - - mutatingwebhookconfigurations - - validatingwebhookconfigurations - verbs: - - list - - watch -- apiGroups: - - admissionregistration.k8s.io - resourceNames: - - webhook.pipelineloop.custom.tekton.dev - resources: - - mutatingwebhookconfigurations - verbs: - - get - - update - - delete -- apiGroups: - - apps - resources: - - deployments - - deployments/finalizers - verbs: - - get - - list - - create - - update - - delete - - patch - - watch -- apiGroups: - - "" - resources: - - namespaces/finalizers - resourceNames: - - openshift-pipelines - verbs: - - update -- apiGroups: - - admissionregistration.k8s.io - resourceNames: - - validation.webhook.pipelineloop.custom.tekton.dev - resources: - - validatingwebhookconfigurations - verbs: - - get - - update - - delete -- apiGroups: - - policy - resourceNames: - - tekton-pipelines - - openshift-pipelines - resources: - - podsecuritypolicies - verbs: - - use diff --git a/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml b/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml deleted file mode 100644 index 63587376d..000000000 --- a/config/v2/pipelineloop/webhook/clusterrolebinding.clusteraccess.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-webhook-cluster-access-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pipelineloop-webhook-cluster-access-clusterrole -subjects: -- kind: ServiceAccount - name: pipelineloop-webhook - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml b/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml deleted file mode 100644 index b164f1cb6..000000000 --- a/config/v2/pipelineloop/webhook/clusterrolebinding.leaderelection.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-webhook-leaderelection-clusterrolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: pipelineloop-leader-election-clusterrole -subjects: -- kind: ServiceAccount - name: pipelineloop-webhook - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/deployment.yaml b/config/v2/pipelineloop/webhook/deployment.yaml deleted file mode 100644 index 1fcbd4c5f..000000000 --- a/config/v2/pipelineloop/webhook/deployment.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: tekton-pipeline-loops - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: pipelineloop-webhook -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: tekton-pipeline-loops - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "false" - labels: - app: tekton-pipelines-webhook - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: tekton-pipeline-loops - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - spec: - containers: - - env: - - name: SYSTEM_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: CONFIG_LOGGING_NAME - value: config-logging - - name: CONFIG_OBSERVABILITY_NAME - value: config-observability - - name: CONFIG_LEADERELECTION_NAME - value: config-leader-election - - name: WEBHOOK_SERVICE_NAME - value: tektonpipelineloop-webhook - - name: WEBHOOK_SECRET_NAME - value: tektonpipelineloop-webhook-certs - - name: METRICS_DOMAIN - value: tekton.dev/pipeline - image: quay.io/internaldatahub/tekton-pipelineloop-webhook:2.0.0 - name: webhook - ports: - - containerPort: 9090 - name: metrics - - containerPort: 8008 - name: profiling - - containerPort: 8443 - name: https-webhook - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - runAsGroup: 65532 - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - serviceAccountName: pipelineloop-webhook diff --git a/config/v2/pipelineloop/webhook/kustomization.yaml b/config/v2/pipelineloop/webhook/kustomization.yaml deleted file mode 100644 index df691ded5..000000000 --- a/config/v2/pipelineloop/webhook/kustomization.yaml +++ /dev/null @@ -1,11 +0,0 @@ -resources: -- clusterrole.clusteraccess.yaml -- clusterrolebinding.clusteraccess.yaml -- clusterrolebinding.leaderelection.yaml -- deployment.yaml -- mutatingwebhookconfig.yaml -- role.yaml -- rolebinding.yaml -- service.yaml -- serviceaccount.yaml -- validatingwebhookconfig.yaml diff --git a/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml b/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml deleted file mode 100644 index 761454a3e..000000000 --- a/config/v2/pipelineloop/webhook/mutatingwebhookconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - pipeline.tekton.dev/release: devel - name: webhook.pipelineloop.custom.tekton.dev -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: pipelineloop-webhook - namespace: datasciencepipelinesapplications-controller - failurePolicy: Fail - name: webhook.pipelineloop.custom.tekton.dev - sideEffects: None diff --git a/config/v2/pipelineloop/webhook/role.yaml b/config/v2/pipelineloop/webhook/role.yaml deleted file mode 100644 index e4c8c0d1a..000000000 --- a/config/v2/pipelineloop/webhook/role.yaml +++ /dev/null @@ -1,52 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-webhook-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - config-logging - - config-observability - - config-leader-election - - object-store-config - resources: - - configmaps - verbs: - - get -- apiGroups: - - "" - resources: - - secrets - verbs: - - list - - watch -- apiGroups: - - "" - resourceNames: - - tektonpipelineloop-webhook-certs - resources: - - secrets - verbs: - - get - - update -- apiGroups: - - policy - resourceNames: - - tekton-pipelines - - openshift-pipelines - resources: - - podsecuritypolicies - verbs: - - use diff --git a/config/v2/pipelineloop/webhook/rolebinding.yaml b/config/v2/pipelineloop/webhook/rolebinding.yaml deleted file mode 100644 index d5df12f0c..000000000 --- a/config/v2/pipelineloop/webhook/rolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - name: pipelineloop-webhook-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: pipelineloop-webhook-role -subjects: -- kind: ServiceAccount - name: pipelineloop-webhook - namespace: datasciencepipelinesapplications-controller diff --git a/config/v2/pipelineloop/webhook/service.yaml b/config/v2/pipelineloop/webhook/service.yaml deleted file mode 100644 index e073bc3cd..000000000 --- a/config/v2/pipelineloop/webhook/service.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - app: tekton-pipelines-webhook - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: tekton-pipeline-loops - app.kubernetes.io/version: devel - pipeline.tekton.dev/release: devel - version: devel - name: pipelineloop-webhook - namespace: datasciencepipelinesapplications-controller -spec: - ports: - - name: http-metrics - port: 9090 - targetPort: 9090 - - name: http-profiling - port: 8008 - targetPort: 8008 - - name: https-webhook - port: 443 - targetPort: 8443 - selector: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/name: webhook - app.kubernetes.io/part-of: tekton-pipeline-loops diff --git a/config/v2/pipelineloop/webhook/serviceaccount.yaml b/config/v2/pipelineloop/webhook/serviceaccount.yaml deleted file mode 100644 index bd71350d6..000000000 --- a/config/v2/pipelineloop/webhook/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - app.kubernetes.io/name: data-science-pipelines-operator - namespace: datasciencepipelinesapplications-controller - name: pipelineloop-webhook diff --git a/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml b/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml deleted file mode 100644 index f5fad8ae3..000000000 --- a/config/v2/pipelineloop/webhook/validatingwebhookconfig.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - labels: - app.kubernetes.io/component: webhook - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - pipeline.tekton.dev/release: devel - name: validation.webhook.pipelineloop.custom.tekton.dev -webhooks: -- admissionReviewVersions: - - v1beta1 - clientConfig: - service: - name: pipelineloop-webhook - namespace: datasciencepipelinesapplications-controller - failurePolicy: Fail - name: validation.webhook.pipelineloop.custom.tekton.dev - sideEffects: None diff --git a/config/v2/tektoncrds/crd.yaml b/config/v2/tektoncrds/crd.yaml deleted file mode 100644 index 155c675a3..000000000 --- a/config/v2/tektoncrds/crd.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - labels: - app.kubernetes.io/instance: default - app.kubernetes.io/part-of: tekton-pipeline-loops - pipeline.tekton.dev/release: devel - version: devel - name: breaktasks.custom.tekton.dev -spec: - group: custom.tekton.dev - names: - categories: - - tekton - - tekton-pipelines - kind: BreakTask - plural: breaktasks - scope: Namespaced - versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - type: object - x-kubernetes-preserve-unknown-fields: true - served: true - storage: true - subresources: - status: {} diff --git a/config/v2/tektoncrds/kustomization.yaml b/config/v2/tektoncrds/kustomization.yaml deleted file mode 100644 index 1d3cbf0f8..000000000 --- a/config/v2/tektoncrds/kustomization.yaml +++ /dev/null @@ -1,2 +0,0 @@ -resources: -- crd.yaml diff --git a/controllers/common_test.go b/controllers/common_test.go index f9e2b3273..c0b411668 100644 --- a/controllers/common_test.go +++ b/controllers/common_test.go @@ -34,7 +34,6 @@ func TestDeployCommonPolicies(t *testing.T) { // Construct Basic DSPA Spec dspa := &dspav1alpha1.DataSciencePipelinesApplication{ Spec: dspav1alpha1.DSPASpec{ - EngineDriver: "tekton", Database: &dspav1alpha1.Database{ DisableHealthCheck: false, MariaDB: &dspav1alpha1.MariaDB{ diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 2c296e0b5..81edee2f9 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -36,12 +36,11 @@ import ( ) type DSPAParams struct { -<<<<<<< HEAD Name string Namespace string Owner mf.Owner DSPVersion string - EngineDriver string + EngineDriver string APIServer *dspa.APIServer APIServerPiplinesCABundleMountPath string PiplinesCABundleMountPath string @@ -90,11 +89,11 @@ func (p *DSPAParams) UsingV2Pipelines(dsp *dspa.DataSciencePipelinesApplication) } func (p *DSPAParams) UsingArgoEngineDriver(dsp *dspa.DataSciencePipelinesApplication) bool { - return dsp.Spec.EngineDriver == "argo" + return p.UsingV2Pipelines(dsp) } func (p *DSPAParams) UsingTektonEngineDriver(dsp *dspa.DataSciencePipelinesApplication) bool { - return dsp.Spec.EngineDriver == "tekton" + return !p.UsingV2Pipelines(dsp) } // UsingExternalDB will return true if an external Database is specified in the CR, otherwise false. @@ -437,7 +436,6 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip p.Name = dsp.Name p.Namespace = dsp.Namespace p.DSPVersion = dsp.Spec.DSPVersion - p.EngineDriver = dsp.Spec.EngineDriver p.Owner = dsp p.APIServer = dsp.Spec.APIServer.DeepCopy() p.APIServerDefaultResourceName = apiServerDefaultResourceNamePrefix + dsp.Name @@ -457,9 +455,6 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip pipelinesV2Images := p.UsingV2Pipelines(dsp) usingArgoEngine := p.UsingArgoEngineDriver(dsp) usingTektonEngine := p.UsingTektonEngineDriver(dsp) - // if !usingArgoEngine && !usingTektonEngine { - // return fmt.Errorf(fmt.Sprintf("Illegal Engine Driver (%s) specified, cannot continue.", dsp.Spec.EngineDriver)) - // } if p.APIServer != nil { APIServerImagePath := config.APIServerImagePath diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index 289afee13..697f2aed1 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -58,29 +58,6 @@ spec: value: gcr.io/ml-pipeline/visualization-server:2.0.2 - name: IMAGESV2_ARGO_WORKFLOWCONTROLLER value: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance - - name: IMAGESV2_TEKTON_APISERVER - value: quay.io/rmartine/apiserver:v2 - - name: IMAGESV2_TEKTON_ARTIFACT - value: quay.io/opendatahub/ds-pipelines-artifact-manager:main - - name: IMAGESV2_TEKTON_PERSISTENTAGENT - value: quay.io/rmartine/persistenceagent-dev:6b8723529 - - name: IMAGESV2_TEKTON_SCHEDULEDWORKFLOW - value: quay.io/rmartine/swf-dev:6b8723529 - - name: IMAGESV2_TEKTON_CACHE - value: registry.access.redhat.com/ubi8/ubi-minimal:8.7 - - name: IMAGESV2_TEKTON_MOVERESULTSIMAGE - value: registry.access.redhat.com/ubi8/ubi-micro:8.7 - - name: IMAGESV2_TEKTON_MLMDENVOY - value: gcr.io/ml-pipeline/metadata-envoy:2.0.0-rc.2 - - name: IMAGESV2_TEKTON_MLMDGRPC - value: gcr.io/tfx-oss-public/ml_metadata_store_server:1.5.0 - - name: IMAGESV2_TEKTON_MLMDWRITER - value: gcr.io/ml-pipeline/metadata-writer:2.0.0-rc.2 - - name: IMAGESV2_TEKTON_VISUALIZATIONSERVER - value: gcr.io/ml-pipeline/visualization-server:2.0.2 - - name: IMAGESV2_TEKTON_WORKFLOWCONTROLLER - value: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance - repoRef: name: manifests path: config From 4333b16c3dc092d5e146c17390e76725d036924f Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 21 Nov 2023 11:46:03 -0500 Subject: [PATCH 57/85] Simplify ImagePath retrieval in DSPAParams --- controllers/config/defaults.go | 1 + controllers/dspipeline_params.go | 73 +++++++++----------------------- 2 files changed, 22 insertions(+), 52 deletions(-) diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index 5c5bf238b..99ca0306d 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -90,6 +90,7 @@ const ( ) // DSPV2-Tekton Image Paths +// Note: These won't exist in config but aren't used, adding in case of future support const ( APIServerImagePathV2Tekton = "ImagesV2.Tekton.ApiServer" APIServerArtifactImagePathV2Tekton = "ImagesV2.Tekton.Artifact" diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 81edee2f9..57e6e8f2f 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -96,6 +96,17 @@ func (p *DSPAParams) UsingTektonEngineDriver(dsp *dspa.DataSciencePipelinesAppli return !p.UsingV2Pipelines(dsp) } +func (p *DSPAParams) GetImageForComponent(dsp *dspa.DataSciencePipelinesApplication, v1Image, v2ArgoImage, v2TektonImage string) string { + if p.UsingV2Pipelines(dsp) { + if p.UsingArgoEngineDriver(dsp) { + return v2ArgoImage + } else { + return v2TektonImage + } + } + return v1Image +} + // UsingExternalDB will return true if an external Database is specified in the CR, otherwise false. func (p *DSPAParams) UsingExternalDB(dsp *dspa.DataSciencePipelinesApplication) bool { if dsp.Spec.Database != nil && dsp.Spec.Database.ExternalDB != nil { @@ -373,20 +384,10 @@ func (p *DSPAParams) SetupObjectParams(ctx context.Context, dsp *dspa.DataScienc func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelinesApplication, client client.Client, log logr.Logger) error { if p.MLMD != nil { - MlmdEnvoyImagePath := config.MlmdEnvoyImagePath - MlmdGRPCImagePath := config.MlmdGRPCImagePath - MlmdWriterImagePath := config.MlmdWriterImagePath - if p.UsingV2Pipelines(dsp) { - if p.UsingArgoEngineDriver(dsp) { - MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2Argo - MlmdGRPCImagePath = config.MlmdGRPCImagePathV2Argo - MlmdWriterImagePath = config.MlmdWriterImagePathV2Argo - } else if p.UsingTektonEngineDriver(dsp) { - MlmdEnvoyImagePath = config.MlmdEnvoyImagePathV2Tekton - MlmdGRPCImagePath = config.MlmdGRPCImagePathV2Tekton - MlmdWriterImagePath = config.MlmdWriterImagePathV2Tekton - } - } + MlmdEnvoyImagePath := p.GetImageForComponent(dsp, config.MlmdEnvoyImagePath, config.MlmdEnvoyImagePathV2Argo, config.MlmdEnvoyImagePathV2Tekton) + MlmdGRPCImagePath := p.GetImageForComponent(dsp, config.MlmdGRPCImagePath, config.MlmdGRPCImagePathV2Argo, config.MlmdGRPCImagePathV2Tekton) + MlmdWriterImagePath := p.GetImageForComponent(dsp, config.MlmdWriterImagePath, config.MlmdWriterImagePathV2Argo, config.MlmdWriterImagePathV2Tekton) + if p.MLMD.Envoy == nil { p.MLMD.Envoy = &dspa.Envoy{ Image: config.GetStringConfigWithDefault(MlmdEnvoyImagePath, config.DefaultImageValue), @@ -452,28 +453,11 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip p.APIServerPiplinesCABundleMountPath = config.APIServerPiplinesCABundleMountPath p.PiplinesCABundleMountPath = config.PiplinesCABundleMountPath - pipelinesV2Images := p.UsingV2Pipelines(dsp) - usingArgoEngine := p.UsingArgoEngineDriver(dsp) - usingTektonEngine := p.UsingTektonEngineDriver(dsp) - if p.APIServer != nil { - APIServerImagePath := config.APIServerImagePath - APIServerArtifactImagePath := config.APIServerArtifactImagePath - APIServerCacheImagePath := config.APIServerCacheImagePath - APIServerMoveResultsImagePath := config.APIServerMoveResultsImagePath - if pipelinesV2Images { - if usingArgoEngine { - APIServerImagePath = config.APIServerImagePathV2Argo - APIServerArtifactImagePath = config.APIServerArtifactImagePathV2Argo - APIServerCacheImagePath = config.APIServerCacheImagePathV2Argo - APIServerMoveResultsImagePath = config.APIServerMoveResultsImagePathV2Argo - } else if usingTektonEngine { - APIServerImagePath = config.APIServerImagePathV2Tekton - APIServerArtifactImagePath = config.APIServerArtifactImagePathV2Tekton - APIServerCacheImagePath = config.APIServerCacheImagePathV2Tekton - APIServerMoveResultsImagePath = config.APIServerMoveResultsImagePathV2Tekton - } - } + APIServerImagePath := p.GetImageForComponent(dsp, config.APIServerImagePath, config.APIServerImagePathV2Argo, config.APIServerImagePathV2Tekton) + APIServerArtifactImagePath := p.GetImageForComponent(dsp, config.APIServerArtifactImagePath, config.APIServerArtifactImagePathV2Argo, config.APIServerArtifactImagePathV2Tekton) + APIServerCacheImagePath := p.GetImageForComponent(dsp, config.APIServerCacheImagePath, config.APIServerCacheImagePathV2Argo, config.APIServerCacheImagePathV2Tekton) + APIServerMoveResultsImagePath := p.GetImageForComponent(dsp, config.APIServerMoveResultsImagePath, config.APIServerMoveResultsImagePathV2Argo, config.APIServerMoveResultsImagePathV2Tekton) serverImageFromConfig := config.GetStringConfigWithDefault(APIServerImagePath, config.DefaultImageValue) artifactImageFromConfig := config.GetStringConfigWithDefault(APIServerArtifactImagePath, config.DefaultImageValue) @@ -508,28 +492,13 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip } if p.PersistenceAgent != nil { - PersistenceAgentImagePath := config.PersistenceAgentImagePath - if pipelinesV2Images { - if usingArgoEngine { - PersistenceAgentImagePath = config.PersistenceAgentImagePathV2Argo - } else if usingTektonEngine { - PersistenceAgentImagePath = config.PersistenceAgentImagePathV2Tekton - } - } + PersistenceAgentImagePath := p.GetImageForComponent(dsp, config.PersistenceAgentImagePath, config.PersistenceAgentImagePathV2Argo, config.PersistenceAgentImagePathV2Tekton) persistenceAgentImageFromConfig := config.GetStringConfigWithDefault(PersistenceAgentImagePath, config.DefaultImageValue) setStringDefault(persistenceAgentImageFromConfig, &p.PersistenceAgent.Image) setResourcesDefault(config.PersistenceAgentResourceRequirements, &p.PersistenceAgent.Resources) } if p.ScheduledWorkflow != nil { - ScheduledWorkflowImagePath := config.ScheduledWorkflowImagePath - if pipelinesV2Images { - if usingArgoEngine { - ScheduledWorkflowImagePath = config.ScheduledWorkflowImagePathV2Argo - - } else if usingTektonEngine { - ScheduledWorkflowImagePath = config.ScheduledWorkflowImagePathV2Tekton - } - } + ScheduledWorkflowImagePath := p.GetImageForComponent(dsp, config.ScheduledWorkflowImagePath, config.ScheduledWorkflowImagePathV2Argo, config.ScheduledWorkflowImagePathV2Tekton) scheduledWorkflowImageFromConfig := config.GetStringConfigWithDefault(ScheduledWorkflowImagePath, config.DefaultImageValue) setStringDefault(scheduledWorkflowImageFromConfig, &p.ScheduledWorkflow.Image) setResourcesDefault(config.ScheduledWorkflowResourceRequirements, &p.ScheduledWorkflow.Resources) From 1f880eb6e639474019783d2761c9e938f4eda0a3 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 5 Dec 2023 11:55:32 -0500 Subject: [PATCH 58/85] Fix YAML formatting --- config/argo/clusterrole.argo-aggregate-to-admin.yaml | 2 +- config/argo/clusterrole.argo-aggregate-to-edit.yaml | 2 +- config/argo/clusterrole.argo-aggregate-to-view.yaml | 2 +- config/argo/clusterrole.argo-cluster-role.yaml | 2 +- config/argo/clusterrole.argo-server-cluster-role.yaml | 2 +- config/argo/clusterrolebinding.argo-binding.yaml | 2 +- config/argo/clusterrolebinding.argo-server-binding.yaml | 2 +- config/argo/configmap.workflow-controller-configmap.yaml | 2 +- config/argo/deployment.argo-server.yaml | 2 +- config/argo/deployment.workflow-controller.yaml | 2 +- config/argo/kustomization.yaml | 4 +--- config/argo/role.argo.yaml | 2 +- config/argo/rolebinding.argo-binding.yaml | 2 +- config/argo/service.argo-server.yaml | 2 +- config/argo/serviceaccount.argo-server.yaml | 2 +- config/argo/serviceaccount.argo.yaml | 2 +- config/internal/apiserver/default/deployment.yaml.tmpl | 2 +- .../internal/apiserver/default/service.ml-pipeline.yaml.tmpl | 1 - config/internal/ml-metadata/metadata-grpc.configmap.yaml.tmpl | 2 +- config/internal/persistence-agent/deployment.yaml.tmpl | 2 +- config/internal/workflow-controller/configmap.yaml.tmpl | 1 - config/internal/workflow-controller/rolebinding.yaml.tmpl | 2 +- 22 files changed, 20 insertions(+), 24 deletions(-) diff --git a/config/argo/clusterrole.argo-aggregate-to-admin.yaml b/config/argo/clusterrole.argo-aggregate-to-admin.yaml index f978dca0c..3d669135b 100644 --- a/config/argo/clusterrole.argo-aggregate-to-admin.yaml +++ b/config/argo/clusterrole.argo-aggregate-to-admin.yaml @@ -31,4 +31,4 @@ rules: - list - patch - update - - watch \ No newline at end of file + - watch diff --git a/config/argo/clusterrole.argo-aggregate-to-edit.yaml b/config/argo/clusterrole.argo-aggregate-to-edit.yaml index 4797d0a1b..aec9b1871 100644 --- a/config/argo/clusterrole.argo-aggregate-to-edit.yaml +++ b/config/argo/clusterrole.argo-aggregate-to-edit.yaml @@ -29,4 +29,4 @@ rules: - list - patch - update - - watch \ No newline at end of file + - watch diff --git a/config/argo/clusterrole.argo-aggregate-to-view.yaml b/config/argo/clusterrole.argo-aggregate-to-view.yaml index 318097cf0..20b6b2d89 100644 --- a/config/argo/clusterrole.argo-aggregate-to-view.yaml +++ b/config/argo/clusterrole.argo-aggregate-to-view.yaml @@ -24,4 +24,4 @@ rules: verbs: - get - list - - watch \ No newline at end of file + - watch diff --git a/config/argo/clusterrole.argo-cluster-role.yaml b/config/argo/clusterrole.argo-cluster-role.yaml index 8e7410107..a71cf985e 100644 --- a/config/argo/clusterrole.argo-cluster-role.yaml +++ b/config/argo/clusterrole.argo-cluster-role.yaml @@ -103,4 +103,4 @@ rules: verbs: - create - get - - delete \ No newline at end of file + - delete diff --git a/config/argo/clusterrole.argo-server-cluster-role.yaml b/config/argo/clusterrole.argo-server-cluster-role.yaml index 699c36c41..0ad3b0465 100644 --- a/config/argo/clusterrole.argo-server-cluster-role.yaml +++ b/config/argo/clusterrole.argo-server-cluster-role.yaml @@ -63,4 +63,4 @@ rules: - watch - update - patch - - delete \ No newline at end of file + - delete diff --git a/config/argo/clusterrolebinding.argo-binding.yaml b/config/argo/clusterrolebinding.argo-binding.yaml index 05a9369b2..a927fdae1 100644 --- a/config/argo/clusterrolebinding.argo-binding.yaml +++ b/config/argo/clusterrolebinding.argo-binding.yaml @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: argo - namespace: argo \ No newline at end of file + namespace: argo diff --git a/config/argo/clusterrolebinding.argo-server-binding.yaml b/config/argo/clusterrolebinding.argo-server-binding.yaml index 81f664337..79b8df515 100644 --- a/config/argo/clusterrolebinding.argo-server-binding.yaml +++ b/config/argo/clusterrolebinding.argo-server-binding.yaml @@ -10,4 +10,4 @@ roleRef: subjects: - kind: ServiceAccount name: argo-server - namespace: argo \ No newline at end of file + namespace: argo diff --git a/config/argo/configmap.workflow-controller-configmap.yaml b/config/argo/configmap.workflow-controller-configmap.yaml index 86379fcbe..f46690911 100644 --- a/config/argo/configmap.workflow-controller-configmap.yaml +++ b/config/argo/configmap.workflow-controller-configmap.yaml @@ -3,4 +3,4 @@ apiVersion: v1 kind: ConfigMap metadata: name: workflow-controller-configmap - namespace: argo \ No newline at end of file + namespace: argo diff --git a/config/argo/deployment.argo-server.yaml b/config/argo/deployment.argo-server.yaml index b12e7c88b..889f05c18 100644 --- a/config/argo/deployment.argo-server.yaml +++ b/config/argo/deployment.argo-server.yaml @@ -46,4 +46,4 @@ spec: serviceAccountName: argo-server volumes: - emptyDir: {} - name: tmp \ No newline at end of file + name: tmp diff --git a/config/argo/deployment.workflow-controller.yaml b/config/argo/deployment.workflow-controller.yaml index 1d5a056bf..b91ee6390 100644 --- a/config/argo/deployment.workflow-controller.yaml +++ b/config/argo/deployment.workflow-controller.yaml @@ -55,4 +55,4 @@ spec: priorityClassName: workflow-controller securityContext: runAsNonRoot: true - serviceAccountName: argo \ No newline at end of file + serviceAccountName: argo diff --git a/config/argo/kustomization.yaml b/config/argo/kustomization.yaml index 6b09a4e74..d7d64e58d 100644 --- a/config/argo/kustomization.yaml +++ b/config/argo/kustomization.yaml @@ -2,7 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization namespace: argo resources: -# Deploy Argo Controller and Server +# Deploy Argo Controller and Server # TODO: Only deploy server (not WC?) # - https://github.com/argoproj/argo-workflows/releases/download/v3.4.12/install.yaml @@ -35,5 +35,3 @@ resources: - crd.workflowtaskresult.yaml - crd.workflowtaskset.yaml - crd.workflowtemplate.yaml - - diff --git a/config/argo/role.argo.yaml b/config/argo/role.argo.yaml index d58c221b6..4c3cf0475 100644 --- a/config/argo/role.argo.yaml +++ b/config/argo/role.argo.yaml @@ -18,4 +18,4 @@ rules: resources: - secrets verbs: - - get \ No newline at end of file + - get diff --git a/config/argo/rolebinding.argo-binding.yaml b/config/argo/rolebinding.argo-binding.yaml index fe64852d9..3e056c348 100644 --- a/config/argo/rolebinding.argo-binding.yaml +++ b/config/argo/rolebinding.argo-binding.yaml @@ -11,4 +11,4 @@ roleRef: subjects: - kind: ServiceAccount name: argo - namespace: argo \ No newline at end of file + namespace: argo diff --git a/config/argo/service.argo-server.yaml b/config/argo/service.argo-server.yaml index b9affd5da..9afb910c0 100644 --- a/config/argo/service.argo-server.yaml +++ b/config/argo/service.argo-server.yaml @@ -10,4 +10,4 @@ spec: port: 2746 targetPort: 2746 selector: - app: argo-server \ No newline at end of file + app: argo-server diff --git a/config/argo/serviceaccount.argo-server.yaml b/config/argo/serviceaccount.argo-server.yaml index 27556a207..4d7a55bdb 100644 --- a/config/argo/serviceaccount.argo-server.yaml +++ b/config/argo/serviceaccount.argo-server.yaml @@ -3,4 +3,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: argo-server - namespace: argo \ No newline at end of file + namespace: argo diff --git a/config/argo/serviceaccount.argo.yaml b/config/argo/serviceaccount.argo.yaml index 2de7bc6d0..8ee79ed1c 100644 --- a/config/argo/serviceaccount.argo.yaml +++ b/config/argo/serviceaccount.argo.yaml @@ -3,4 +3,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: argo - namespace: argo \ No newline at end of file + namespace: argo diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index 60c5a5ed9..8dafd0c7c 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -109,7 +109,7 @@ spec: - name: OBJECTSTORECONFIG_SECURE value: "false" - name: OBJECTSTORECONFIG_BUCKETNAME - value: "{{.ObjectStorageConnection.Bucket}}" + value: "{{.ObjectStorageConnection.Bucket}}" - name: DB_DRIVER_NAME value: mysql - name: DBCONFIG_MYSQLCONFIG_USER diff --git a/config/internal/apiserver/default/service.ml-pipeline.yaml.tmpl b/config/internal/apiserver/default/service.ml-pipeline.yaml.tmpl index 919bf4cae..753507b92 100644 --- a/config/internal/apiserver/default/service.ml-pipeline.yaml.tmpl +++ b/config/internal/apiserver/default/service.ml-pipeline.yaml.tmpl @@ -25,4 +25,3 @@ spec: selector: app: ds-pipeline-{{.Name}} component: data-science-pipelines - diff --git a/config/internal/ml-metadata/metadata-grpc.configmap.yaml.tmpl b/config/internal/ml-metadata/metadata-grpc.configmap.yaml.tmpl index a92d8133b..1442b59fb 100644 --- a/config/internal/ml-metadata/metadata-grpc.configmap.yaml.tmpl +++ b/config/internal/ml-metadata/metadata-grpc.configmap.yaml.tmpl @@ -7,4 +7,4 @@ metadata: component: metadata-grpc-server data: METADATA_GRPC_SERVICE_HOST: "ds-pipeline-metadata-grpc-{{.Name}}.{{.Namespace}}.svc.cluster.local" - METADATA_GRPC_SERVICE_PORT: "8080" \ No newline at end of file + METADATA_GRPC_SERVICE_PORT: "8080" diff --git a/config/internal/persistence-agent/deployment.yaml.tmpl b/config/internal/persistence-agent/deployment.yaml.tmpl index b2ca5f507..831809961 100644 --- a/config/internal/persistence-agent/deployment.yaml.tmpl +++ b/config/internal/persistence-agent/deployment.yaml.tmpl @@ -105,4 +105,4 @@ spec: audience: pipelines.kubeflow.org expirationSeconds: 3600 path: ds-pipeline-persistenceagent-{{.Name}}-token - {{ end }} \ No newline at end of file + {{ end }} diff --git a/config/internal/workflow-controller/configmap.yaml.tmpl b/config/internal/workflow-controller/configmap.yaml.tmpl index 45fdb2d70..7d7788bcc 100644 --- a/config/internal/workflow-controller/configmap.yaml.tmpl +++ b/config/internal/workflow-controller/configmap.yaml.tmpl @@ -39,4 +39,3 @@ data: containerRuntimeExecutor: emissary # TODO executor: | imagePullPolicy: IfNotPresent # TODO - diff --git a/config/internal/workflow-controller/rolebinding.yaml.tmpl b/config/internal/workflow-controller/rolebinding.yaml.tmpl index dbafe5730..4e388df4d 100644 --- a/config/internal/workflow-controller/rolebinding.yaml.tmpl +++ b/config/internal/workflow-controller/rolebinding.yaml.tmpl @@ -17,4 +17,4 @@ roleRef: subjects: - kind: ServiceAccount name: ds-pipeline-workflow-controller-{{.Name}} - namespace: {{.Namespace}} + namespace: {{.Namespace}} From e5e04d2fcc0c020078bfe68fdef311328ade731b Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 5 Dec 2023 12:09:30 -0500 Subject: [PATCH 59/85] Fix and Re-generate DataSciencePipelinesApplication CRD --- api/v1alpha1/dspipeline_types.go | 2 +- ...tions.opendatahub.io_datasciencepipelinesapplications.yaml | 4 ---- 2 files changed, 1 insertion(+), 5 deletions(-) diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index 32d94b287..bc84557fe 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -52,7 +52,7 @@ type DSPASpec struct { // +kubebuilder:default:="v1" DSPVersion string `json:"dspVersion,omitempty"` // +kubebuilder:validation:Optional - // +kubebuilder:default:="tekton" + // +kubebuilder:default:={deploy: false} *WorkflowController `json:"workflowController,omitempty"` } diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 73defd443..499293c2d 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -300,9 +300,6 @@ spec: dspVersion: default: v1 type: string - engineDriver: - default: tekton - type: string mlmd: default: deploy: true @@ -766,7 +763,6 @@ spec: workflowController: default: deploy: false - description: DS Pipelines Argo Workflow Controller Configuration. properties: deploy: default: true From b3014b09e61622b284ce7a5a62cbed74263dc10c Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 5 Dec 2023 14:17:11 -0500 Subject: [PATCH 60/85] Only apply Argo-specfic env vars if on v2 --- .../apiserver/default/deployment.yaml.tmpl | 20 ++++++------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index 8dafd0c7c..ea19d4340 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -95,21 +95,11 @@ spec: - name: ML_PIPELINE_SERVICE_HOST value: "ds-pipeline-{{.Name}}.{{.Namespace}}.svc.cluster.local" - name: ML_PIPELINE_SERVICE_PORT_GRPC - value: "8887" - ## Values change based on Engine Driver ## - - name: EXECUTIONTYPE + value: "8887" {{ if (eq .DSPVersion "v2") }} - value: Workflow - {{ else }} - value: PipelineRun - {{ end }} ## Argo-Specific Env Vars ## - - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION - value: "{{.APIServer.AutoUpdatePipelineDefaultVersion}}" - - name: OBJECTSTORECONFIG_SECURE - value: "false" - - name: OBJECTSTORECONFIG_BUCKETNAME - value: "{{.ObjectStorageConnection.Bucket}}" + - name: EXECUTIONTYPE + value: Workflow - name: DB_DRIVER_NAME value: mysql - name: DBCONFIG_MYSQLCONFIG_USER @@ -125,8 +115,10 @@ spec: value: "{{.DBConnection.Host}}" - name: DBCONFIG_MYSQLCONFIG_PORT value: "{{.DBConnection.Port}}" + {{ else }} ## Tekton-Specific Env Vars ## - {{ if (eq .DSPVersion "v1") }} + - name: EXECUTIONTYPE + value: PipelineRun - name: ARTIFACT_BUCKET value: "{{.ObjectStorageConnection.Bucket}}" - name: ARTIFACT_ENDPOINT From c909d669ed932bcdc21ec328ef6701a7743b01e1 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 5 Dec 2023 14:17:37 -0500 Subject: [PATCH 61/85] Fix/Update Functional Tests to accomodate v2-argo --- .../apiserver/default/deployment.yaml.tmpl | 2 +- .../created/apiserver_deployment.yaml | 58 +++++++++---------- .../created/apiserver_deployment.yaml | 58 +++++++++---------- .../created/apiserver_deployment.yaml | 58 +++++++++---------- .../created/apiserver_deployment.yaml | 58 +++++++++---------- .../created/apiserver_deployment.yaml | 58 +++++++++---------- .../created/apiserver_deployment.yaml | 58 +++++++++---------- .../created/apiserver_deployment.yaml | 48 ++++++--------- .../created/persistence-agent_deployment.yaml | 15 ++++- 9 files changed, 208 insertions(+), 205 deletions(-) diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index ea19d4340..85adaa00b 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -95,7 +95,7 @@ spec: - name: ML_PIPELINE_SERVICE_HOST value: "ds-pipeline-{{.Name}}.{{.Namespace}}.svc.cluster.local" - name: ML_PIPELINE_SERVICE_PORT_GRPC - value: "8887" + value: "8887" {{ if (eq .DSPVersion "v2") }} ## Argo-Specific Env Vars ## - name: EXECUTIONTYPE diff --git a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml index 38e5b1e58..15a850e3a 100644 --- a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml @@ -37,35 +37,6 @@ spec: value: "mariadb-testdsp0.default.svc.cluster.local" - name: DBCONFIG_PORT value: "3306" - - name: ARTIFACT_BUCKET - value: "mlpipeline" - - name: ARTIFACT_ENDPOINT - value: "http://minio-testdsp0.default.svc.cluster.local:9000" - - name: ARTIFACT_SCRIPT - valueFrom: - configMapKeyRef: - key: "artifact_script" - name: "ds-pipeline-artifact-script-testdsp0" - - name: ARTIFACT_IMAGE - value: "artifact-manager:test0" - - name: ARCHIVE_LOGS - value: "false" - - name: EXECUTIONTYPE - value: PipelineRun - - name: TRACK_ARTIFACTS - value: "true" - - name: STRIP_EOF - value: "true" - - name: PIPELINE_RUNTIME - value: "tekton" - - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT - value: "pipeline-runner-testdsp0" - - name: INJECT_DEFAULT_SCRIPT - value: "true" - - name: APPLY_TEKTON_CUSTOM_RESOURCE - value: "true" - - name: TERMINATE_STATUS - value: "Cancelled" - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC @@ -80,6 +51,8 @@ spec: value: "accesskey" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY value: "secretkey" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp0" - name: OBJECTSTORECONFIG_BUCKETNAME value: "mlpipeline" - name: OBJECTSTORECONFIG_ACCESSKEY @@ -110,6 +83,33 @@ spec: value: ds-pipeline-testdsp0.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" + - name: EXECUTIONTYPE + value: PipelineRun + - name: ARTIFACT_BUCKET + value: "mlpipeline" + - name: ARTIFACT_ENDPOINT + value: "http://minio-testdsp0.default.svc.cluster.local:9000" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "artifact_script" + name: "ds-pipeline-artifact-script-testdsp0" + - name: ARTIFACT_IMAGE + value: "artifact-manager:test0" + - name: ARCHIVE_LOGS + value: "false" + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" image: api-server:test0 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml index 85901a309..54378eadd 100644 --- a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml @@ -37,35 +37,6 @@ spec: value: "mariadb-testdsp2.default.svc.cluster.local" - name: DBCONFIG_PORT value: "3306" - - name: ARTIFACT_BUCKET - value: "mlpipeline" - - name: ARTIFACT_ENDPOINT - value: "http://minio-testdsp2.default.svc.cluster.local:9000" - - name: ARTIFACT_SCRIPT - valueFrom: - configMapKeyRef: - key: "artifact_script" - name: "ds-pipeline-artifact-script-testdsp2" - - name: ARTIFACT_IMAGE - value: "artifact-manager:test2" - - name: ARCHIVE_LOGS - value: "false" - - name: EXECUTIONTYPE - value: PipelineRun - - name: TRACK_ARTIFACTS - value: "true" - - name: STRIP_EOF - value: "true" - - name: PIPELINE_RUNTIME - value: "tekton" - - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT - value: "pipeline-runner-testdsp2" - - name: INJECT_DEFAULT_SCRIPT - value: "true" - - name: APPLY_TEKTON_CUSTOM_RESOURCE - value: "true" - - name: TERMINATE_STATUS - value: "Cancelled" - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC @@ -80,6 +51,8 @@ spec: value: "accesskey" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY value: "secretkey" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp2" - name: OBJECTSTORECONFIG_BUCKETNAME value: "mlpipeline" - name: OBJECTSTORECONFIG_ACCESSKEY @@ -110,6 +83,33 @@ spec: value: ds-pipeline-testdsp2.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" + - name: EXECUTIONTYPE + value: PipelineRun + - name: ARTIFACT_BUCKET + value: "mlpipeline" + - name: ARTIFACT_ENDPOINT + value: "http://minio-testdsp2.default.svc.cluster.local:9000" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "artifact_script" + name: "ds-pipeline-artifact-script-testdsp2" + - name: ARTIFACT_IMAGE + value: "artifact-manager:test2" + - name: ARCHIVE_LOGS + value: "false" + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" image: api-server:test2 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml index 24fb7955d..a191ef260 100644 --- a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml @@ -37,35 +37,6 @@ spec: value: "testdbhost3" - name: DBCONFIG_PORT value: "test3" - - name: ARTIFACT_BUCKET - value: "testbucket3" - - name: ARTIFACT_ENDPOINT - value: "https://teststoragehost3:80" - - name: ARTIFACT_SCRIPT - valueFrom: - configMapKeyRef: - key: "somekey" - name: "doesnotexist" - - name: ARTIFACT_IMAGE - value: artifact-manager:test3 - - name: ARCHIVE_LOGS - value: "false" - - name: EXECUTIONTYPE - value: PipelineRun - - name: TRACK_ARTIFACTS - value: "true" - - name: STRIP_EOF - value: "true" - - name: PIPELINE_RUNTIME - value: "tekton" - - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT - value: "pipeline-runner-testdsp3" - - name: INJECT_DEFAULT_SCRIPT - value: "true" - - name: APPLY_TEKTON_CUSTOM_RESOURCE - value: "true" - - name: TERMINATE_STATUS - value: "Cancelled" - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC @@ -80,6 +51,8 @@ spec: value: "testaccesskey3" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY value: "testsecretkey3" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp3" - name: OBJECTSTORECONFIG_BUCKETNAME value: "testbucket3" - name: OBJECTSTORECONFIG_ACCESSKEY @@ -110,6 +83,33 @@ spec: value: ds-pipeline-testdsp3.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" + - name: EXECUTIONTYPE + value: PipelineRun + - name: ARTIFACT_BUCKET + value: "testbucket3" + - name: ARTIFACT_ENDPOINT + value: "https://teststoragehost3:80" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "somekey" + name: "doesnotexist" + - name: ARTIFACT_IMAGE + value: artifact-manager:test3 + - name: ARCHIVE_LOGS + value: "false" + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" image: api-server:test3 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml index 074518c69..c1e74562b 100644 --- a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml @@ -37,35 +37,6 @@ spec: value: "mariadb-testdsp4.default.svc.cluster.local" - name: DBCONFIG_PORT value: "3306" - - name: ARTIFACT_BUCKET - value: "mlpipeline" - - name: ARTIFACT_ENDPOINT - value: "http://minio-testdsp4.default.svc.cluster.local:9000" - - name: ARTIFACT_SCRIPT - valueFrom: - configMapKeyRef: - key: "artifact_script" - name: "ds-pipeline-artifact-script-testdsp4" - - name: ARTIFACT_IMAGE - value: "this-artifact-manager-image-from-cr-should-be-used:test4" - - name: ARCHIVE_LOGS - value: "false" - - name: EXECUTIONTYPE - value: PipelineRun - - name: TRACK_ARTIFACTS - value: "true" - - name: STRIP_EOF - value: "true" - - name: PIPELINE_RUNTIME - value: "tekton" - - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT - value: "pipeline-runner-testdsp4" - - name: INJECT_DEFAULT_SCRIPT - value: "true" - - name: APPLY_TEKTON_CUSTOM_RESOURCE - value: "true" - - name: TERMINATE_STATUS - value: "Cancelled" - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC @@ -80,6 +51,8 @@ spec: value: "accesskey" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY value: "secretkey" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp4" - name: OBJECTSTORECONFIG_BUCKETNAME value: "mlpipeline" - name: OBJECTSTORECONFIG_ACCESSKEY @@ -110,6 +83,33 @@ spec: value: ds-pipeline-testdsp4.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" + - name: EXECUTIONTYPE + value: PipelineRun + - name: ARTIFACT_BUCKET + value: "mlpipeline" + - name: ARTIFACT_ENDPOINT + value: "http://minio-testdsp4.default.svc.cluster.local:9000" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "artifact_script" + name: "ds-pipeline-artifact-script-testdsp4" + - name: ARTIFACT_IMAGE + value: "this-artifact-manager-image-from-cr-should-be-used:test4" + - name: ARCHIVE_LOGS + value: "false" + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" image: this-apiserver-image-from-cr-should-be-used:test4 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml index 8bf8bc81d..39cb194fc 100644 --- a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml @@ -37,35 +37,6 @@ spec: value: "mariadb-testdsp5.default.svc.cluster.local" - name: DBCONFIG_PORT value: "3306" - - name: ARTIFACT_BUCKET - value: "mlpipeline" - - name: ARTIFACT_ENDPOINT - value: "http://minio-testdsp5.default.svc.cluster.local:9000" - - name: ARTIFACT_SCRIPT - valueFrom: - configMapKeyRef: - key: "artifact_script" - name: "ds-pipeline-artifact-script-testdsp5" - - name: ARTIFACT_IMAGE - value: "artifact-manager:test5" - - name: ARCHIVE_LOGS - value: "false" - - name: EXECUTIONTYPE - value: PipelineRun - - name: TRACK_ARTIFACTS - value: "true" - - name: STRIP_EOF - value: "true" - - name: PIPELINE_RUNTIME - value: "tekton" - - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT - value: "pipeline-runner-testdsp5" - - name: INJECT_DEFAULT_SCRIPT - value: "true" - - name: APPLY_TEKTON_CUSTOM_RESOURCE - value: "true" - - name: TERMINATE_STATUS - value: "Cancelled" - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC @@ -80,6 +51,8 @@ spec: value: "accesskey" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY value: "secretkey" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp5" - name: OBJECTSTORECONFIG_BUCKETNAME value: "mlpipeline" - name: OBJECTSTORECONFIG_ACCESSKEY @@ -110,6 +83,33 @@ spec: value: ds-pipeline-testdsp5.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" + - name: EXECUTIONTYPE + value: PipelineRun + - name: ARTIFACT_BUCKET + value: "mlpipeline" + - name: ARTIFACT_ENDPOINT + value: "http://minio-testdsp5.default.svc.cluster.local:9000" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "artifact_script" + name: "ds-pipeline-artifact-script-testdsp5" + - name: ARTIFACT_IMAGE + value: "artifact-manager:test5" + - name: ARCHIVE_LOGS + value: "false" + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" image: api-server:test5 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml index 2cbeda30d..f7893dccc 100644 --- a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml @@ -37,41 +37,12 @@ spec: value: "mariadb-testdsp6.default.svc.cluster.local" - name: DBCONFIG_PORT value: "3306" - - name: ARTIFACT_BUCKET - value: "mlpipeline" - - name: ARTIFACT_ENDPOINT - value: "http://minio-testdsp6.default.svc.cluster.local:9000" - - name: ARTIFACT_SCRIPT - valueFrom: - configMapKeyRef: - key: "artifact_script" - name: "ds-pipeline-artifact-script-testdsp6" - - name: ARTIFACT_IMAGE - value: "artifact-manager:test6" - - name: ARCHIVE_LOGS - value: "false" - - name: EXECUTIONTYPE - value: PipelineRun - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_NAME value: testcabundleconfigmap6 - name: ARTIFACT_COPY_STEP_CABUNDLE_CONFIGMAP_KEY value: testcabundleconfigmapkey6.crt - name: ARTIFACT_COPY_STEP_CABUNDLE_MOUNTPATH value: /etc/pki/tls/certs - - name: TRACK_ARTIFACTS - value: "true" - - name: STRIP_EOF - value: "true" - - name: PIPELINE_RUNTIME - value: "tekton" - - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT - value: "pipeline-runner-testdsp6" - - name: INJECT_DEFAULT_SCRIPT - value: "true" - - name: APPLY_TEKTON_CUSTOM_RESOURCE - value: "true" - - name: TERMINATE_STATUS - value: "Cancelled" - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC @@ -86,6 +57,8 @@ spec: value: "accesskey" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY value: "secretkey" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp6" - name: OBJECTSTORECONFIG_BUCKETNAME value: "mlpipeline" - name: OBJECTSTORECONFIG_ACCESSKEY @@ -112,6 +85,33 @@ spec: value: ds-pipeline-testdsp6.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" + - name: EXECUTIONTYPE + value: PipelineRun + - name: ARTIFACT_BUCKET + value: "mlpipeline" + - name: ARTIFACT_ENDPOINT + value: "http://minio-testdsp6.default.svc.cluster.local:9000" + - name: ARTIFACT_SCRIPT + valueFrom: + configMapKeyRef: + key: "artifact_script" + name: "ds-pipeline-artifact-script-testdsp6" + - name: ARTIFACT_IMAGE + value: "artifact-manager:test6" + - name: ARCHIVE_LOGS + value: "false" + - name: TRACK_ARTIFACTS + value: "true" + - name: STRIP_EOF + value: "true" + - name: PIPELINE_RUNTIME + value: "tekton" + - name: INJECT_DEFAULT_SCRIPT + value: "true" + - name: APPLY_TEKTON_CUSTOM_RESOURCE + value: "true" + - name: TERMINATE_STATUS + value: "Cancelled" image: api-server:test6 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml index 567e79bbf..090f30d5e 100644 --- a/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml @@ -37,35 +37,6 @@ spec: value: "mariadb-testdsp7.default.svc.cluster.local" - name: DBCONFIG_PORT value: "3306" - - name: ARTIFACT_BUCKET - value: "mlpipeline" - - name: ARTIFACT_ENDPOINT - value: "http://minio-testdsp7.default.svc.cluster.local:9000" - - name: ARTIFACT_SCRIPT - valueFrom: - configMapKeyRef: - key: "artifact_script" - name: "ds-pipeline-artifact-script-testdsp7" - - name: ARTIFACT_IMAGE - value: "artifact-manager:test7" - - name: ARCHIVE_LOGS - value: "false" - - name: EXECUTIONTYPE - value: PipelineRun - - name: TRACK_ARTIFACTS - value: "true" - - name: STRIP_EOF - value: "true" - - name: PIPELINE_RUNTIME - value: "tekton" - - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT - value: "pipeline-runner-testdsp7" - - name: INJECT_DEFAULT_SCRIPT - value: "true" - - name: APPLY_TEKTON_CUSTOM_RESOURCE - value: "true" - - name: TERMINATE_STATUS - value: "Cancelled" - name: AUTO_UPDATE_PIPELINE_DEFAULT_VERSION value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC @@ -80,6 +51,8 @@ spec: value: "accesskey" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY value: "secretkey" + - name: DEFAULTPIPELINERUNNERSERVICEACCOUNT + value: "pipeline-runner-testdsp7" - name: OBJECTSTORECONFIG_BUCKETNAME value: "mlpipeline" - name: OBJECTSTORECONFIG_ACCESSKEY @@ -110,6 +83,23 @@ spec: value: ds-pipeline-testdsp7.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC value: "8887" + - name: EXECUTIONTYPE + value: Workflow + - name: DB_DRIVER_NAME + value: mysql + - name: DBCONFIG_MYSQLCONFIG_USER + value: testuser + - name: DBCONFIG_MYSQLCONFIG_PASSWORD + valueFrom: + secretKeyRef: + key: "password" + name: "ds-pipeline-db-testdsp7" + - name: DBCONFIG_MYSQLCONFIG_DBNAME + value: "randomDBName" + - name: DBCONFIG_MYSQLCONFIG_HOST + value: "mariadb-testdsp7.default.svc.cluster.local" + - name: DBCONFIG_MYSQLCONFIG_PORT + value: "3306" image: api-server:test7 imagePullPolicy: Always name: ds-pipeline-api-server diff --git a/controllers/testdata/declarative/case_7/expected/created/persistence-agent_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/persistence-agent_deployment.yaml index f914a38e2..abcb70d3e 100644 --- a/controllers/testdata/declarative/case_7/expected/created/persistence-agent_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/persistence-agent_deployment.yaml @@ -35,7 +35,7 @@ spec: - name: KUBEFLOW_USERID_PREFIX value: "" - name: EXECUTIONTYPE - value: PipelineRun + value: Workflow image: persistenceagent:test7 imagePullPolicy: IfNotPresent name: ds-pipeline-persistenceagent @@ -73,4 +73,17 @@ spec: limits: cpu: 2524m memory: 5Gi + volumeMounts: + - mountPath: /var/run/secrets/kubeflow/tokens/persistenceagent-sa-token + name: persistenceagent-sa-token + subPath: ds-pipeline-persistenceagent-testdsp7-token serviceAccountName: ds-pipeline-persistenceagent-testdsp7 + volumes: + - name: persistenceagent-sa-token + projected: + sources: + - serviceAccountToken: + audience: pipelines.kubeflow.org + expirationSeconds: 3600 + path: ds-pipeline-persistenceagent-testdsp7-token + defaultMode: 420 From 8749630b5e6379f0748dd4e3f2d7df8aa2214007 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 5 Dec 2023 14:42:14 -0500 Subject: [PATCH 62/85] Add Context Documentation for WorkflowController - Add description and code comments regarding Workflow Controller and its image to the CRD and DSPO config --- api/v1alpha1/dspipeline_types.go | 1 + config/configmaps/files/config.yaml | 5 ++++- ...ions.opendatahub.io_datasciencepipelinesapplications.yaml | 3 +++ 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index bc84557fe..c91efe42b 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -51,6 +51,7 @@ type DSPASpec struct { // +kubebuilder:validation:Optional // +kubebuilder:default:="v1" DSPVersion string `json:"dspVersion,omitempty"` + // WorkflowController is an argo-specific component that manages a DSPA's Workflow objects and handles the orchestration of them with the central Argo server // +kubebuilder:validation:Optional // +kubebuilder:default:={deploy: false} *WorkflowController `json:"workflowController,omitempty"` diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index dc5d0ef6f..3302586d9 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -37,7 +37,10 @@ ImagesV2: MlmdWriter: $(IMAGESV2_TEKTON_MLMDWRITER) VisualizationServer: $(IMAGES_TEKTON_VISUALIZATIONSERVER) # WorkflowController is an argo-only component - # Using argo image here only for fault tolerance, but should handle this in code. + # Using argo image here only for fault tolerance, but should handle this in code + # In a theoretical example, this is here so that if a V2 DSPA is using Tekton backend but + # also requests WorkflowController.deploy=true, we may hit issues if the value just doesn't exist. + # Having a fill-in value, even if it doesn't work with the rest of the deployment, alleviates some of those concerns WorkflowController: $(IMAGESV2_ARGO_WORKFLOWCONTROLLER) DSPO: HealthCheck: diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 499293c2d..d6f805dd9 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -763,6 +763,9 @@ spec: workflowController: default: deploy: false + description: WorkflowController is an argo-specific component that + manages a DSPA's Workflow objects and handles the orchestration + of them with the central Argo server properties: deploy: default: true From 07cf05fd646ec1f7ec6502df80f38bde6bb584b1 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 6 Dec 2023 10:47:22 -0500 Subject: [PATCH 63/85] Fix WorkflowController incorrect log messages --- controllers/workflow_controller.go | 6 +++--- controllers/workflow_controller_test.go | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/controllers/workflow_controller.go b/controllers/workflow_controller.go index eccbe0b20..21f0e01be 100644 --- a/controllers/workflow_controller.go +++ b/controllers/workflow_controller.go @@ -28,17 +28,17 @@ func (r *DSPAReconciler) ReconcileWorkflowController(dsp *dspav1alpha1.DataScien log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) if !dsp.Spec.WorkflowController.Deploy { - log.Info("Skipping Application of Visualization Server Resources") + log.Info("Skipping Application of WorkflowController Resources") return nil } - log.Info("Applying Visualization Server Resources") + log.Info("Applying WorkflowController Resources") err := r.ApplyDir(dsp, params, workflowControllerTemplatesDir) if err != nil { return err } - log.Info("Finished applying Visualization Server Resources") + log.Info("Finished applying WorkflowController Resources") return nil } diff --git a/controllers/workflow_controller_test.go b/controllers/workflow_controller_test.go index 31a4df87d..443535d82 100644 --- a/controllers/workflow_controller_test.go +++ b/controllers/workflow_controller_test.go @@ -30,7 +30,7 @@ func TestDeployWorkflowController(t *testing.T) { testDSPAName := "testdspa" expectedWorkflowControllerName := "ds-pipeline-workflow-controller-testdspa" - // Construct DSPASpec with deployed Visualization Server + // Construct DSPASpec with deployed WorkflowController dspa := &dspav1alpha1.DataSciencePipelinesApplication{ Spec: dspav1alpha1.DSPASpec{ APIServer: &dspav1alpha1.APIServer{ @@ -64,7 +64,7 @@ func TestDeployWorkflowController(t *testing.T) { err := params.ExtractParams(ctx, dspa, reconciler.Client, reconciler.Log) assert.Nil(t, err) - // Ensure Visualization Server Deployment doesn't yet exist + // Ensure WorkflowController Deployment doesn't yet exist deployment := &appsv1.Deployment{} created, err := reconciler.IsResourceCreated(ctx, deployment, expectedWorkflowControllerName, testNamespace) assert.False(t, created) @@ -74,7 +74,7 @@ func TestDeployWorkflowController(t *testing.T) { err = reconciler.ReconcileWorkflowController(dspa, params) assert.Nil(t, err) - // Ensure Visualization Server Deployment now exists + // Ensure WorkflowController Deployment now exists deployment = &appsv1.Deployment{} created, err = reconciler.IsResourceCreated(ctx, deployment, expectedWorkflowControllerName, testNamespace) assert.True(t, created) @@ -87,7 +87,7 @@ func TestDontDeployWorkflowController(t *testing.T) { testDSPAName := "testdspa" expectedWorkflowControllerName := "ds-pipeline-workflow-controller-testdspa" - // Construct DSPASpec with non-deployed Visualization Server + // Construct DSPASpec with non-deployed WorkflowController dspa := &dspav1alpha1.DataSciencePipelinesApplication{ Spec: dspav1alpha1.DSPASpec{ WorkflowController: &dspav1alpha1.WorkflowController{ @@ -103,7 +103,7 @@ func TestDontDeployWorkflowController(t *testing.T) { // Create Context, Fake Controller and Params ctx, params, reconciler := CreateNewTestObjects() - // Ensure Visualization Server Deployment doesn't yet exist + // Ensure WorkflowController Deployment doesn't yet exist deployment := &appsv1.Deployment{} created, err := reconciler.IsResourceCreated(ctx, deployment, expectedWorkflowControllerName, testNamespace) assert.False(t, created) @@ -113,7 +113,7 @@ func TestDontDeployWorkflowController(t *testing.T) { err = reconciler.ReconcileWorkflowController(dspa, params) assert.Nil(t, err) - // Ensure Visualization Server Deployment still doesn't exist + // Ensure WorkflowController Deployment still doesn't exist deployment = &appsv1.Deployment{} created, err = reconciler.IsResourceCreated(ctx, deployment, expectedWorkflowControllerName, testNamespace) assert.False(t, created) From 86e191a1bc9c2fa4422b5005caa7e95e6a1abb89 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 6 Dec 2023 10:49:10 -0500 Subject: [PATCH 64/85] Remove Unused V2Tekton Image References --- config/base/kustomization.yaml | 18 ++---------------- config/base/params.env | 2 +- config/configmaps/files/config.yaml | 19 +------------------ config/manager/manager.yaml | 4 ++-- controllers/config/defaults.go | 1 + controllers/dspipeline_params.go | 3 ++- kfdef/kfdef.yaml | 2 +- 7 files changed, 10 insertions(+), 39 deletions(-) diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index 157c11d90..e3b47b8d6 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -162,13 +162,13 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGESV2_ARGO_ARTIFACT - - name: IMAGESV2_ARGO_PERSISTENTAGENT + - name: IMAGESV2_ARGO_PERSISTENCEAGENT objref: kind: ConfigMap name: dspo-parameters apiVersion: v1 fieldref: - fieldpath: data.IMAGESV2_ARGO_PERSISTENTAGENT + fieldpath: data.IMAGESV2_ARGO_PERSISTENCEAGENT - name: IMAGESV2_ARGO_SCHEDULEDWORKFLOW objref: kind: ConfigMap @@ -190,20 +190,6 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGESV2_ARGO_MOVERESULTSIMAGE - - name: IMAGESV2_ARGO_PERSISTENTAGENT - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_PERSISTENTAGENT - - name: IMAGESV2_ARGO_SCHEDULEDWORKFLOW - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_SCHEDULEDWORKFLOW - name: IMAGESV2_ARGO_MLMDENVOY objref: kind: ConfigMap diff --git a/config/base/params.env b/config/base/params.env index 856b2e0b3..622ca7572 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -14,7 +14,7 @@ IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 IMAGESV2_ARGO_APISERVER=gcr.io/ml-pipeline/api-server:2.0.2 IMAGESV2_ARGO_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:main -IMAGESV2_ARGO_PERSISTENTAGENT=gcr.io/ml-pipeline/persistenceagent:2.0.2 +IMAGESV2_ARGO_PERSISTENCEAGENT=gcr.io/ml-pipeline/persistenceagent:2.0.2 IMAGESV2_ARGO_SCHEDULEDWORKFLOW=gcr.io/ml-pipeline/scheduledworkflow:2.0.2 IMAGESV2_ARGO_MLMDENVOY=gcr.io/ml-pipeline/metadata-envoy:2.0.2 IMAGESV2_ARGO_MLMDGRPC=gcr.io/tfx-oss-public/ml_metadata_store_server:1.14.0 diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index 3302586d9..1b33b54f1 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -18,30 +18,13 @@ ImagesV2: Artifact: $(IMAGESV2_ARGO_ARTIFACT) Cache: $(IMAGESV2_ARGO_CACHE) MoveResultsImage: $(IMAGESV2_ARGO_MOVERESULTSIMAGE) - PersistentAgent: $(IMAGESV2_ARGO_PERSISTENTAGENT) + PersistentAgent: $(IMAGESV2_ARGO_PERSISTENCEAGENT) ScheduledWorkflow: $(IMAGESV2_ARGO_SCHEDULEDWORKFLOW) MlmdEnvoy: $(IMAGESV2_ARGO_MLMDENVOY) MlmdGRPC: $(IMAGESV2_ARGO_MLMDGRPC) MlmdWriter: $(IMAGESV2_ARGO_MLMDWRITER) VisualizationServer: $(IMAGES_ARGO_VISUALIZATIONSERVER) WorkflowController: $(IMAGESV2_ARGO_WORKFLOWCONTROLLER) - Tekton: - ApiServer: $(IMAGESV2_TEKTON_APISERVER) - Artifact: $(IMAGESV2_TEKTON_ARTIFACT) - Cache: $(IMAGESV2_TEKTON_CACHE) - MoveResultsImage: $(IMAGESV2_TEKTON_MOVERESULTSIMAGE) - PersistentAgent: $(IMAGESV2_TEKTON_PERSISTENTAGENT) - ScheduledWorkflow: $(IMAGESV2_TEKTON_SCHEDULEDWORKFLOW) - MlmdEnvoy: $(IMAGESV2_TEKTON_MLMDENVOY) - MlmdGRPC: $(IMAGESV2_TEKTON_MLMDGRPC) - MlmdWriter: $(IMAGESV2_TEKTON_MLMDWRITER) - VisualizationServer: $(IMAGES_TEKTON_VISUALIZATIONSERVER) - # WorkflowController is an argo-only component - # Using argo image here only for fault tolerance, but should handle this in code - # In a theoretical example, this is here so that if a V2 DSPA is using Tekton backend but - # also requests WorkflowController.deploy=true, we may hit issues if the value just doesn't exist. - # Having a fill-in value, even if it doesn't work with the rest of the deployment, alleviates some of those concerns - WorkflowController: $(IMAGESV2_ARGO_WORKFLOWCONTROLLER) DSPO: HealthCheck: Database: diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index cb0896126..df9fc077e 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -66,8 +66,8 @@ spec: value: $(IMAGESV2_ARGO_APISERVER) - name: IMAGESV2_ARGO_ARTIFACT value: $(IMAGESV2_ARGO_ARTIFACT) - - name: IMAGESV2_ARGO_PERSISTENTAGENT - value: $(IMAGESV2_ARGO_PERSISTENTAGENT) + - name: IMAGESV2_ARGO_PERSISTENCEAGENT + value: $(IMAGESV2_ARGO_PERSISTENCEAGENT) - name: IMAGESV2_ARGO_SCHEDULEDWORKFLOW value: $(IMAGESV2_ARGO_SCHEDULEDWORKFLOW) - name: IMAGESV2_ARGO_CACHE diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index 99ca0306d..a847047fa 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -91,6 +91,7 @@ const ( // DSPV2-Tekton Image Paths // Note: These won't exist in config but aren't used, adding in case of future support +// TODO: remove const ( APIServerImagePathV2Tekton = "ImagesV2.Tekton.ApiServer" APIServerArtifactImagePathV2Tekton = "ImagesV2.Tekton.Artifact" diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 57e6e8f2f..9666f54da 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -40,7 +40,6 @@ type DSPAParams struct { Namespace string Owner mf.Owner DSPVersion string - EngineDriver string APIServer *dspa.APIServer APIServerPiplinesCABundleMountPath string PiplinesCABundleMountPath string @@ -96,6 +95,8 @@ func (p *DSPAParams) UsingTektonEngineDriver(dsp *dspa.DataSciencePipelinesAppli return !p.UsingV2Pipelines(dsp) } +// TODO: rework to dynamically retrieve image based soley on 'pipelinesVersion' and 'engineDriver' rather than +// explicitly set images func (p *DSPAParams) GetImageForComponent(dsp *dspa.DataSciencePipelinesApplication, v1Image, v2ArgoImage, v2TektonImage string) string { if p.UsingV2Pipelines(dsp) { if p.UsingArgoEngineDriver(dsp) { diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index 697f2aed1..ffc494d94 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -38,7 +38,7 @@ spec: value: gcr.io/ml-pipeline/api-server:2.0.2 - name: IMAGESV2_ARGO_ARTIFACT value: quay.io/opendatahub/ds-pipelines-artifact-manager:main - - name: IMAGESV2_ARGO_PERSISTENTAGENT + - name: IMAGESV2_ARGO_PERSISTENCEAGENT value: gcr.io/ml-pipeline/persistenceagent:2.0.2 - name: IMAGESV2_ARGO_SCHEDULEDWORKFLOW value: gcr.io/ml-pipeline/scheduledworkflow:2.0.2 From e4921bc6441be74f2d9a704d8b061520bf96d461 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Mon, 11 Dec 2023 13:21:48 -0500 Subject: [PATCH 65/85] Correct makedeploy newTag pointer --- config/overlays/make-deploy/kustomization.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/overlays/make-deploy/kustomization.yaml b/config/overlays/make-deploy/kustomization.yaml index 1ec5fdeb4..7814f52a5 100644 --- a/config/overlays/make-deploy/kustomization.yaml +++ b/config/overlays/make-deploy/kustomization.yaml @@ -8,4 +8,4 @@ patchesStrategicMerge: images: - name: controller newName: quay.io/opendatahub/data-science-pipelines-operator - newTag: pr-479 + newTag: main From ade82dacdae5064d9fe82d6dd78cf27252b334ec Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Mon, 11 Dec 2023 13:28:15 -0500 Subject: [PATCH 66/85] Remove broken rbac kustomization item --- config/rbac/kustomization.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index fe94772a8..1f0dd897c 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -9,6 +9,5 @@ resources: - role_binding.yaml - role.yaml - service_account.yaml -- aggregate_dspa_role.yaml - argo_role.yaml - argo_role_binding.yaml From 0b7049377fbdf42b7192948df506d2f831e66c3b Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Mon, 11 Dec 2023 17:35:03 -0500 Subject: [PATCH 67/85] Add a sample dspa for v2. Signed-off-by: Humair Khan --- config/samples/dspa_simple_v2.yaml | 40 ++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 config/samples/dspa_simple_v2.yaml diff --git a/config/samples/dspa_simple_v2.yaml b/config/samples/dspa_simple_v2.yaml new file mode 100644 index 000000000..29ea5f9b8 --- /dev/null +++ b/config/samples/dspa_simple_v2.yaml @@ -0,0 +1,40 @@ +apiVersion: datasciencepipelinesapplications.opendatahub.io/v1alpha1 +kind: DataSciencePipelinesApplication +metadata: + name: sample +spec: + dspVersion: v2 + apiServer: + deploy: true + image: gcr.io/ml-pipeline/api-server:2.0.2 + persistenceAgent: + deploy: true + image: gcr.io/ml-pipeline/persistenceagent:2.0.2 + scheduledWorkflow: + deploy: true + image: gcr.io/ml-pipeline/scheduledworkflow:2.0.2 + visualizationServer: + deploy: true + image: gcr.io/ml-pipeline/visualization-server:2.0.2 + mlmd: + deploy: true + grpc: + image: gcr.io/tfx-oss-public/ml_metadata_store_server:1.14.0 + envoy: + image: gcr.io/ml-pipeline/metadata-envoy:2.0.2 + writer: + image: gcr.io/ml-pipeline/metadata-writer:2.0.2 + database: + disableHealthCheck: true + mariaDB: + deploy: true + objectStorage: + disableHealthCheck: true + minio: + deploy: true + image: 'quay.io/opendatahub/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance' + mlpipelineUI: + image: gcr.io/ml-pipeline/frontend:2.0.2 + workflowController: + deploy: true + image: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance From e7b1631e0933b30587f7c267965729aae0e5ebef Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Tue, 12 Dec 2023 10:50:05 -0500 Subject: [PATCH 68/85] Add minio artifact secret name exception for v2. Signed-off-by: Humair Khan --- api/v1alpha1/dspipeline_types.go | 1 + ...ons.opendatahub.io_datasciencepipelinesapplications.yaml | 2 ++ controllers/dspipeline_params.go | 6 ++++++ .../case_7/expected/created/apiserver_deployment.yaml | 6 +++--- .../case_7/expected/created/minio_deployment.yaml | 4 ++-- .../case_7/expected/created/mlpipelines-ui_deployment.yaml | 4 ++-- 6 files changed, 16 insertions(+), 7 deletions(-) diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index c91efe42b..0949294fb 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -331,6 +331,7 @@ type ExternalStorage struct { type S3CredentialSecret struct { // +kubebuilder:validation:Required + // Note: In V2 this value needs to be mlpipeline-minio-artifact SecretName string `json:"secretName"` // The "Keys" in the k8sSecret key/value pairs. Not to be confused with the values. AccessKey string `json:"accessKey"` diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index d6f805dd9..7e5fe4a7a 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -535,6 +535,7 @@ spec: secretKey: type: string secretName: + description: 'Note: In V2 this value needs to be mlpipeline-minio-artifact' type: string required: - accessKey @@ -629,6 +630,7 @@ spec: secretKey: type: string secretName: + description: 'Note: In V2 this value needs to be mlpipeline-minio-artifact' type: string required: - accessKey diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 9666f54da..2b3aae80c 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -349,6 +349,12 @@ func (p *DSPAParams) SetupObjectParams(ctx context.Context, dsp *dspa.DataScienc SecretKey: config.DefaultObjectStorageSecretKey, } } + + // TODO: Remove once v2launcher minio secret is parameterized during artifact passing + if p.UsingV2Pipelines(dsp) { + p.ObjectStorageConnection.CredentialsSecret.SecretName = "mlpipeline-minio-artifact" + } + accessKey, secretKey, err := p.RetrieveOrCreateObjectStoreSecret(ctx, client, p.ObjectStorageConnection.CredentialsSecret, log) if err != nil { return err diff --git a/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml index 090f30d5e..a0545c34a 100644 --- a/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml @@ -46,7 +46,7 @@ spec: - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET - value: "ds-pipeline-s3-testdsp7" + value: "mlpipeline-minio-artifact" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY value: "accesskey" - name: OBJECTSTORECONFIG_CREDENTIALSSECRETKEYKEY @@ -59,12 +59,12 @@ spec: valueFrom: secretKeyRef: key: "accesskey" - name: "ds-pipeline-s3-testdsp7" + name: "mlpipeline-minio-artifact" - name: OBJECTSTORECONFIG_SECRETACCESSKEY valueFrom: secretKeyRef: key: "secretkey" - name: "ds-pipeline-s3-testdsp7" + name: "mlpipeline-minio-artifact" - name: OBJECTSTORECONFIG_SECURE value: "false" - name: MINIO_SERVICE_SERVICE_HOST diff --git a/controllers/testdata/declarative/case_7/expected/created/minio_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/minio_deployment.yaml index da4a1627b..473ce4a24 100644 --- a/controllers/testdata/declarative/case_7/expected/created/minio_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/minio_deployment.yaml @@ -31,12 +31,12 @@ spec: valueFrom: secretKeyRef: key: "accesskey" - name: "ds-pipeline-s3-testdsp7" + name: "mlpipeline-minio-artifact" - name: MINIO_SECRET_KEY valueFrom: secretKeyRef: key: "secretkey" - name: "ds-pipeline-s3-testdsp7" + name: "mlpipeline-minio-artifact" image: minio:test7 name: minio ports: diff --git a/controllers/testdata/declarative/case_7/expected/created/mlpipelines-ui_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/mlpipelines-ui_deployment.yaml index 839521717..71c491a58 100644 --- a/controllers/testdata/declarative/case_7/expected/created/mlpipelines-ui_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/mlpipelines-ui_deployment.yaml @@ -35,12 +35,12 @@ spec: valueFrom: secretKeyRef: key: "accesskey" - name: "ds-pipeline-s3-testdsp7" + name: "mlpipeline-minio-artifact" - name: MINIO_SECRET_KEY valueFrom: secretKeyRef: key: "secretkey" - name: "ds-pipeline-s3-testdsp7" + name: "mlpipeline-minio-artifact" - name: ALLOW_CUSTOM_VISUALIZATIONS value: "true" - name: ARGO_ARCHIVE_LOGS From 420719e8ca6fc05f5c63bb9ef6d00e542df2a124 Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Wed, 13 Dec 2023 14:56:42 -0500 Subject: [PATCH 69/85] Include minio-service in minio deployment. Signed-off-by: Humair Khan --- controllers/storage.go | 1 + 1 file changed, 1 insertion(+) diff --git a/controllers/storage.go b/controllers/storage.go index eece5e60c..ddc4e8679 100644 --- a/controllers/storage.go +++ b/controllers/storage.go @@ -40,6 +40,7 @@ var minioTemplates = []string{ "minio/default/deployment.yaml.tmpl", "minio/default/pvc.yaml.tmpl", "minio/default/service.yaml.tmpl", + "minio/default/service.minioservice.yaml.tmpl", "minio/default/minio-sa.yaml.tmpl", storageRoute, } From 6551c13d9d91e9845e7473113490d0e66eb84e1b Mon Sep 17 00:00:00 2001 From: Humair Khan Date: Fri, 15 Dec 2023 10:18:39 -0500 Subject: [PATCH 70/85] chore: further simplify v2 simple dspa example. Signed-off-by: Humair Khan --- config/samples/dspa_simple_v2.yaml | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/config/samples/dspa_simple_v2.yaml b/config/samples/dspa_simple_v2.yaml index 29ea5f9b8..232cdf7d5 100644 --- a/config/samples/dspa_simple_v2.yaml +++ b/config/samples/dspa_simple_v2.yaml @@ -4,37 +4,10 @@ metadata: name: sample spec: dspVersion: v2 - apiServer: - deploy: true - image: gcr.io/ml-pipeline/api-server:2.0.2 - persistenceAgent: - deploy: true - image: gcr.io/ml-pipeline/persistenceagent:2.0.2 - scheduledWorkflow: - deploy: true - image: gcr.io/ml-pipeline/scheduledworkflow:2.0.2 - visualizationServer: - deploy: true - image: gcr.io/ml-pipeline/visualization-server:2.0.2 - mlmd: - deploy: true - grpc: - image: gcr.io/tfx-oss-public/ml_metadata_store_server:1.14.0 - envoy: - image: gcr.io/ml-pipeline/metadata-envoy:2.0.2 - writer: - image: gcr.io/ml-pipeline/metadata-writer:2.0.2 - database: - disableHealthCheck: true - mariaDB: - deploy: true objectStorage: - disableHealthCheck: true minio: - deploy: true image: 'quay.io/opendatahub/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance' mlpipelineUI: image: gcr.io/ml-pipeline/frontend:2.0.2 workflowController: deploy: true - image: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance From 0fa3544a2a428ab6e627e5ccad0e050fdb9e7262 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Fri, 15 Dec 2023 14:27:40 -0500 Subject: [PATCH 71/85] Add default values for v2 sample pipeline input parameters --- .../apiserver/sample-pipeline/sample-pipeline.yaml.tmpl | 3 +++ .../case_7/expected/created/sample-pipeline.yaml.tmpl | 3 +++ 2 files changed, 6 insertions(+) diff --git a/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl b/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl index 6addfef33..f95c656a7 100644 --- a/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl +++ b/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl @@ -238,10 +238,13 @@ data: inputDefinitions: parameters: min_max_scaler: + defaultValue: true parameterType: BOOLEAN neighbors: + defaultValue: 3 parameterType: NUMBER_INTEGER standard_scaler: + defaultValue: false parameterType: BOOLEAN outputDefinitions: artifacts: diff --git a/controllers/testdata/declarative/case_7/expected/created/sample-pipeline.yaml.tmpl b/controllers/testdata/declarative/case_7/expected/created/sample-pipeline.yaml.tmpl index f7c147db5..1ad0f85f3 100644 --- a/controllers/testdata/declarative/case_7/expected/created/sample-pipeline.yaml.tmpl +++ b/controllers/testdata/declarative/case_7/expected/created/sample-pipeline.yaml.tmpl @@ -237,10 +237,13 @@ data: inputDefinitions: parameters: min_max_scaler: + defaultValue: true parameterType: BOOLEAN neighbors: + defaultValue: 3 parameterType: NUMBER_INTEGER standard_scaler: + defaultValue: false parameterType: BOOLEAN outputDefinitions: artifacts: From dd54ed51669f4b4885b31151fc147c2a6759595d Mon Sep 17 00:00:00 2001 From: Helber Belmiro Date: Mon, 11 Dec 2023 17:17:18 -0300 Subject: [PATCH 72/85] Removed VisualizationServer and MLMD Writer --- api/v1alpha1/dspipeline_types.go | 10 -- api/v1alpha1/zz_generated.deepcopy.go | 20 --- config/base/kustomization.yaml | 21 ---- config/base/params.env | 2 - config/configmaps/files/config.yaml | 3 - ...b.io_datasciencepipelinesapplications.yaml | 10 -- .../apiserver/default/deployment.yaml.tmpl | 4 - .../metadata-writer.deployment.yaml.tmpl | 0 .../{ => v1}/metadata-writer.role.yaml.tmpl | 0 .../metadata-writer.rolebinding.yaml.tmpl | 0 .../metadata-writer.serviceaccount.yaml.tmpl | 0 .../visualizationserver/deployment.yaml.tmpl | 60 --------- .../visualizationserver/service.yaml.tmpl | 19 --- .../serviceaccount.yaml.tmpl | 5 - config/manager/manager.yaml | 6 - config/samples/dspa_simple_v2.yaml | 22 ++++ controllers/config/defaults.go | 2 - controllers/dspipeline_controller.go | 5 - controllers/dspipeline_params.go | 73 +++++++++-- controllers/mlmd.go | 7 ++ .../created/apiserver_deployment.yaml | 4 - .../created/apiserver_deployment.yaml | 4 - .../created/apiserver_deployment.yaml | 4 - .../created/apiserver_deployment.yaml | 4 - .../created/apiserver_deployment.yaml | 4 - .../created/apiserver_deployment.yaml | 4 - .../created/apiserver_deployment.yaml | 4 - controllers/visualization_server.go | 44 ------- controllers/visualization_server_test.go | 118 ------------------ kfdef/kfdef.yaml | 6 - 30 files changed, 90 insertions(+), 375 deletions(-) rename config/internal/ml-metadata/{ => v1}/metadata-writer.deployment.yaml.tmpl (100%) rename config/internal/ml-metadata/{ => v1}/metadata-writer.role.yaml.tmpl (100%) rename config/internal/ml-metadata/{ => v1}/metadata-writer.rolebinding.yaml.tmpl (100%) rename config/internal/ml-metadata/{ => v1}/metadata-writer.serviceaccount.yaml.tmpl (100%) delete mode 100644 config/internal/visualizationserver/deployment.yaml.tmpl delete mode 100644 config/internal/visualizationserver/service.yaml.tmpl delete mode 100644 config/internal/visualizationserver/serviceaccount.yaml.tmpl delete mode 100644 controllers/visualization_server.go delete mode 100644 controllers/visualization_server_test.go diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index 0949294fb..e9ae8abb5 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -46,9 +46,6 @@ type DSPASpec struct { // +kubebuilder:default:={deploy: false} *CRDViewer `json:"crdviewer"` // +kubebuilder:validation:Optional - // +kubebuilder:default:={deploy: false} - *VisualizationServer `json:"visualizationServer"` - // +kubebuilder:validation:Optional // +kubebuilder:default:="v1" DSPVersion string `json:"dspVersion,omitempty"` // WorkflowController is an argo-specific component that manages a DSPA's Workflow objects and handles the orchestration of them with the central Argo server @@ -290,13 +287,6 @@ type CRDViewer struct { Image string `json:"image,omitempty"` } -type VisualizationServer struct { - // +kubebuilder:default:=true - // +kubebuilder:validation:Optional - Deploy bool `json:"deploy"` - Image string `json:"image,omitempty"` -} - type WorkflowController struct { // +kubebuilder:default:=true // +kubebuilder:validation:Optional diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index db7974ddf..76df0b296 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -144,11 +144,6 @@ func (in *DSPASpec) DeepCopyInto(out *DSPASpec) { *out = new(CRDViewer) **out = **in } - if in.VisualizationServer != nil { - in, out := &in.VisualizationServer, &out.VisualizationServer - *out = new(VisualizationServer) - **out = **in - } if in.WorkflowController != nil { in, out := &in.WorkflowController, &out.WorkflowController *out = new(WorkflowController) @@ -596,21 +591,6 @@ func (in *SecretKeyValue) DeepCopy() *SecretKeyValue { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VisualizationServer) DeepCopyInto(out *VisualizationServer) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VisualizationServer. -func (in *VisualizationServer) DeepCopy() *VisualizationServer { - if in == nil { - return nil - } - out := new(VisualizationServer) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WorkflowController) DeepCopyInto(out *WorkflowController) { *out = *in diff --git a/config/base/kustomization.yaml b/config/base/kustomization.yaml index e3b47b8d6..a250efc07 100644 --- a/config/base/kustomization.yaml +++ b/config/base/kustomization.yaml @@ -99,13 +99,6 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGES_CRDVIEWER - - name: IMAGES_VISUALIZATIONSERVER - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGES_VISUALIZATIONSERVER - name: IMAGES_DSPO objref: kind: ConfigMap @@ -204,20 +197,6 @@ vars: apiVersion: v1 fieldref: fieldpath: data.IMAGESV2_ARGO_MLMDGRPC - - name: IMAGESV2_ARGO_MLMDWRITER - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_ARGO_MLMDWRITER - - name: IMAGESV2_ARGO_VISUALIZATIONSERVER - objref: - kind: ConfigMap - name: dspo-parameters - apiVersion: v1 - fieldref: - fieldpath: data.IMAGESV2_ARGO_VISUALIZATIONSERVER - name: IMAGESV2_ARGO_WORKFLOWCONTROLLER objref: kind: ConfigMap diff --git a/config/base/params.env b/config/base/params.env index 622ca7572..befd3e162 100644 --- a/config/base/params.env +++ b/config/base/params.env @@ -11,7 +11,6 @@ IMAGES_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.8 IMAGES_MARIADB=registry.redhat.io/rhel8/mariadb-103:1 IMAGES_OAUTHPROXY=registry.redhat.io/openshift4/ose-oauth-proxy@sha256:ab112105ac37352a2a4916a39d6736f5db6ab4c29bad4467de8d613e80e9bb33 IMAGES_CRDVIEWER=gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 -IMAGES_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 IMAGESV2_ARGO_APISERVER=gcr.io/ml-pipeline/api-server:2.0.2 IMAGESV2_ARGO_ARTIFACT=quay.io/opendatahub/ds-pipelines-artifact-manager:main IMAGESV2_ARGO_PERSISTENCEAGENT=gcr.io/ml-pipeline/persistenceagent:2.0.2 @@ -21,7 +20,6 @@ IMAGESV2_ARGO_MLMDGRPC=gcr.io/tfx-oss-public/ml_metadata_store_server:1.14.0 IMAGESV2_ARGO_MLMDWRITER=gcr.io/ml-pipeline/metadata-writer:2.0.2 IMAGESV2_ARGO_CACHE=registry.access.redhat.com/ubi8/ubi-minimal:8.7 IMAGESV2_ARGO_MOVERESULTSIMAGE=registry.access.redhat.com/ubi8/ubi-micro:8.7 -IMAGESV2_ARGO_VISUALIZATIONSERVER=gcr.io/ml-pipeline/visualization-server:2.0.2 IMAGESV2_ARGO_WORKFLOWCONTROLLER=gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance ZAP_LOG_LEVEL=info MAX_CONCURRENT_RECONCILES=10 diff --git a/config/configmaps/files/config.yaml b/config/configmaps/files/config.yaml index 1b33b54f1..a1d9fe056 100644 --- a/config/configmaps/files/config.yaml +++ b/config/configmaps/files/config.yaml @@ -11,7 +11,6 @@ Images: MlmdGRPC: $(IMAGES_MLMDGRPC) MlmdWriter: $(IMAGES_MLMDWRITER) CRDViewer: $(IMAGES_CRDVIEWER) - VisualizationServer: $(IMAGES_VISUALIZATIONSERVER) ImagesV2: Argo: ApiServer: $(IMAGESV2_ARGO_APISERVER) @@ -22,8 +21,6 @@ ImagesV2: ScheduledWorkflow: $(IMAGESV2_ARGO_SCHEDULEDWORKFLOW) MlmdEnvoy: $(IMAGESV2_ARGO_MLMDENVOY) MlmdGRPC: $(IMAGESV2_ARGO_MLMDGRPC) - MlmdWriter: $(IMAGESV2_ARGO_MLMDWRITER) - VisualizationServer: $(IMAGES_ARGO_VISUALIZATIONSERVER) WorkflowController: $(IMAGESV2_ARGO_WORKFLOWCONTROLLER) DSPO: HealthCheck: diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 7e5fe4a7a..121aefeb1 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -752,16 +752,6 @@ spec: type: object type: object type: object - visualizationServer: - default: - deploy: false - properties: - deploy: - default: true - type: boolean - image: - type: string - type: object workflowController: default: deploy: false diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index 85adaa00b..c1494283b 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -49,10 +49,6 @@ spec: value: "{{.APIServer.AutoUpdatePipelineDefaultVersion}}" - name: DBCONFIG_CONMAXLIFETIMESEC value: "{{.APIServer.DBConfigConMaxLifetimeSec}}" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST - value: "ds-pipeline-visualizationserver" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT - value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl b/config/internal/ml-metadata/v1/metadata-writer.deployment.yaml.tmpl similarity index 100% rename from config/internal/ml-metadata/metadata-writer.deployment.yaml.tmpl rename to config/internal/ml-metadata/v1/metadata-writer.deployment.yaml.tmpl diff --git a/config/internal/ml-metadata/metadata-writer.role.yaml.tmpl b/config/internal/ml-metadata/v1/metadata-writer.role.yaml.tmpl similarity index 100% rename from config/internal/ml-metadata/metadata-writer.role.yaml.tmpl rename to config/internal/ml-metadata/v1/metadata-writer.role.yaml.tmpl diff --git a/config/internal/ml-metadata/metadata-writer.rolebinding.yaml.tmpl b/config/internal/ml-metadata/v1/metadata-writer.rolebinding.yaml.tmpl similarity index 100% rename from config/internal/ml-metadata/metadata-writer.rolebinding.yaml.tmpl rename to config/internal/ml-metadata/v1/metadata-writer.rolebinding.yaml.tmpl diff --git a/config/internal/ml-metadata/metadata-writer.serviceaccount.yaml.tmpl b/config/internal/ml-metadata/v1/metadata-writer.serviceaccount.yaml.tmpl similarity index 100% rename from config/internal/ml-metadata/metadata-writer.serviceaccount.yaml.tmpl rename to config/internal/ml-metadata/v1/metadata-writer.serviceaccount.yaml.tmpl diff --git a/config/internal/visualizationserver/deployment.yaml.tmpl b/config/internal/visualizationserver/deployment.yaml.tmpl deleted file mode 100644 index 46524439b..000000000 --- a/config/internal/visualizationserver/deployment.yaml.tmpl +++ /dev/null @@ -1,60 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - labels: - app: ds-pipeline-visualizationserver-{{.Name}} - component: data-science-pipelines - dspa: {{.Name}} - name: ds-pipeline-visualizationserver-{{.Name}} - namespace: {{.Namespace}} -spec: - selector: - matchLabels: - app: ds-pipeline-visualizationserver-{{.Name}} - component: data-science-pipelines - dspa: {{.Name}} - template: - metadata: - annotations: - cluster-autoscaler.kubernetes.io/safe-to-evict: "true" - labels: - app: ds-pipeline-visualizationserver-{{.Name}} - component: data-science-pipelines - dspa: {{.Name}} - spec: - containers: - - image: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 # TODO - imagePullPolicy: IfNotPresent - livenessProbe: - exec: - command: - - wget - - -q - - -S - - -O - - '-' - - http://localhost:8888/ - initialDelaySeconds: 3 - periodSeconds: 5 - timeoutSeconds: 2 - name: ds-pipeline-visualizationserver - ports: - - containerPort: 8888 - name: http - readinessProbe: - exec: - command: - - wget - - -q - - -S - - -O - - '-' - - http://localhost:8888/ - initialDelaySeconds: 3 - periodSeconds: 5 - timeoutSeconds: 2 - resources: - requests: - cpu: 30m - memory: 500Mi - serviceAccountName: ds-pipeline-visualizationserver-{{.Name}} diff --git a/config/internal/visualizationserver/service.yaml.tmpl b/config/internal/visualizationserver/service.yaml.tmpl deleted file mode 100644 index f2d76833f..000000000 --- a/config/internal/visualizationserver/service.yaml.tmpl +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: ds-pipeline-visualizationserver-{{.Name}} - namespace: {{.Namespace}} - annotations: - service.alpha.openshift.io/serving-cert-secret-name: ds-pipelines-proxy-tls-{{.Name}} - labels: - app: ds-pipeline-{{.Name}} - component: data-science-pipelines -spec: - ports: - - name: http - port: 8888 - protocol: TCP - targetPort: 8888 - selector: - app: ds-pipeline-visualizationserver-{{.Name}} - component: data-science-pipelines diff --git a/config/internal/visualizationserver/serviceaccount.yaml.tmpl b/config/internal/visualizationserver/serviceaccount.yaml.tmpl deleted file mode 100644 index e1c415786..000000000 --- a/config/internal/visualizationserver/serviceaccount.yaml.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: ds-pipeline-visualizationserver-{{.Name}} - namespace: {{.Namespace}} diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index df9fc077e..6aff09d66 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -60,8 +60,6 @@ spec: value: $(IMAGES_MLMDWRITER) - name: IMAGES_CRDVIEWER value: $(IMAGES_CRDVIEWER) - - name: IMAGES_VISUALIZATIONSERVER - value: $(IMAGES_VISUALIZATIONSERVER) - name: IMAGESV2_ARGO_APISERVER value: $(IMAGESV2_ARGO_APISERVER) - name: IMAGESV2_ARGO_ARTIFACT @@ -78,10 +76,6 @@ spec: value: $(IMAGESV2_ARGO_MLMDENVOY) - name: IMAGESV2_ARGO_MLMDGRPC value: $(IMAGESV2_ARGO_MLMDGRPC) - - name: IMAGESV2_ARGO_MLMDWRITER - value: $(IMAGESV2_ARGO_MLMDWRITER) - - name: IMAGESV2_ARGO_VISUALIZATIONSERVER - value: $(IMAGESV2_ARGO_VISUALIZATIONSERVER) - name: IMAGESV2_ARGO_WORKFLOWCONTROLLER value: $(IMAGESV2_ARGO_WORKFLOWCONTROLLER) - name: ZAP_LOG_LEVEL diff --git a/config/samples/dspa_simple_v2.yaml b/config/samples/dspa_simple_v2.yaml index 232cdf7d5..a60028870 100644 --- a/config/samples/dspa_simple_v2.yaml +++ b/config/samples/dspa_simple_v2.yaml @@ -4,10 +4,32 @@ metadata: name: sample spec: dspVersion: v2 + apiServer: + deploy: true + image: gcr.io/ml-pipeline/api-server:2.0.2 + persistenceAgent: + deploy: true + image: gcr.io/ml-pipeline/persistenceagent:2.0.2 + scheduledWorkflow: + deploy: true + image: gcr.io/ml-pipeline/scheduledworkflow:2.0.2 + mlmd: + deploy: true + grpc: + image: gcr.io/tfx-oss-public/ml_metadata_store_server:1.14.0 + envoy: + image: gcr.io/ml-pipeline/metadata-envoy:2.0.2 + database: + disableHealthCheck: true + mariaDB: + deploy: true objectStorage: + disableHealthCheck: true minio: + deploy: true image: 'quay.io/opendatahub/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance' mlpipelineUI: image: gcr.io/ml-pipeline/frontend:2.0.2 workflowController: deploy: true + image: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance diff --git a/controllers/config/defaults.go b/controllers/config/defaults.go index a847047fa..78c132e70 100644 --- a/controllers/config/defaults.go +++ b/controllers/config/defaults.go @@ -86,7 +86,6 @@ const ( ScheduledWorkflowImagePathV2Argo = "ImagesV2.Argo.ScheduledWorkflow" MlmdEnvoyImagePathV2Argo = "ImagesV2.Argo.MlmdEnvoy" MlmdGRPCImagePathV2Argo = "ImagesV2.Argo.MlmdGRPC" - MlmdWriterImagePathV2Argo = "ImagesV2.Argo.MlmdWriter" ) // DSPV2-Tekton Image Paths @@ -101,7 +100,6 @@ const ( ScheduledWorkflowImagePathV2Tekton = "ImagesV2.Tekton.ScheduledWorkflow" MlmdEnvoyImagePathV2Tekton = "ImagesV2.Tekton.MlmdEnvoy" MlmdGRPCImagePathV2Tekton = "ImagesV2.Tekton.MlmdGRPC" - MlmdWriterImagePathV2Tekton = "ImagesV2.Tekton.MlmdWriter" ) // DSPA Status Condition Types diff --git a/controllers/dspipeline_controller.go b/controllers/dspipeline_controller.go index f5e37c263..a77ee89b0 100644 --- a/controllers/dspipeline_controller.go +++ b/controllers/dspipeline_controller.go @@ -283,11 +283,6 @@ func (r *DSPAReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl. return ctrl.Result{}, err } - err = r.ReconcileVisualizationServer(dspa, params) - if err != nil { - return ctrl.Result{}, err - } - err = r.ReconcileWorkflowController(dspa, params) if err != nil { return ctrl.Result{}, err diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 2b3aae80c..d019dcc83 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -56,7 +56,6 @@ type DSPAParams struct { Minio *dspa.Minio MLMD *dspa.MLMD CRDViewer *dspa.CRDViewer - VisualizationServer *dspa.VisualizationServer WorkflowController *dspa.WorkflowController DBConnection ObjectStorageConnection @@ -87,6 +86,10 @@ func (p *DSPAParams) UsingV2Pipelines(dsp *dspa.DataSciencePipelinesApplication) return dsp.Spec.DSPVersion == "v2" } +func (p *DSPAParams) UsingV1Pipelines(dsp *dspa.DataSciencePipelinesApplication) bool { + return dsp.Spec.DSPVersion == "v1" || dsp.Spec.DSPVersion == "" +} + func (p *DSPAParams) UsingArgoEngineDriver(dsp *dspa.DataSciencePipelinesApplication) bool { return p.UsingV2Pipelines(dsp) } @@ -99,15 +102,19 @@ func (p *DSPAParams) UsingTektonEngineDriver(dsp *dspa.DataSciencePipelinesAppli // explicitly set images func (p *DSPAParams) GetImageForComponent(dsp *dspa.DataSciencePipelinesApplication, v1Image, v2ArgoImage, v2TektonImage string) string { if p.UsingV2Pipelines(dsp) { - if p.UsingArgoEngineDriver(dsp) { - return v2ArgoImage - } else { - return v2TektonImage - } + return p.GetImageForComponentV2(dsp, v2ArgoImage, v2TektonImage) } return v1Image } +func (p *DSPAParams) GetImageForComponentV2(dsp *dspa.DataSciencePipelinesApplication, v2ArgoImage, v2TektonImage string) string { + if p.UsingArgoEngineDriver(dsp) { + return v2ArgoImage + } else { + return v2TektonImage + } +} + // UsingExternalDB will return true if an external Database is specified in the CR, otherwise false. func (p *DSPAParams) UsingExternalDB(dsp *dspa.DataSciencePipelinesApplication) bool { if dsp.Spec.Database != nil && dsp.Spec.Database.ExternalDB != nil { @@ -389,11 +396,11 @@ func (p *DSPAParams) SetupObjectParams(ctx context.Context, dsp *dspa.DataScienc } -func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelinesApplication, client client.Client, log logr.Logger) error { +func (p *DSPAParams) SetupMlmdV1() error { if p.MLMD != nil { - MlmdEnvoyImagePath := p.GetImageForComponent(dsp, config.MlmdEnvoyImagePath, config.MlmdEnvoyImagePathV2Argo, config.MlmdEnvoyImagePathV2Tekton) - MlmdGRPCImagePath := p.GetImageForComponent(dsp, config.MlmdGRPCImagePath, config.MlmdGRPCImagePathV2Argo, config.MlmdGRPCImagePathV2Tekton) - MlmdWriterImagePath := p.GetImageForComponent(dsp, config.MlmdWriterImagePath, config.MlmdWriterImagePathV2Argo, config.MlmdWriterImagePathV2Tekton) + MlmdEnvoyImagePath := config.MlmdEnvoyImagePath + MlmdGRPCImagePath := config.MlmdGRPCImagePath + MlmdWriterImagePath := config.MlmdWriterImagePath if p.MLMD.Envoy == nil { p.MLMD.Envoy = &dspa.Envoy{ @@ -428,6 +435,39 @@ func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelin return nil } +func (p *DSPAParams) SetupMlmdV2(dsp *dspa.DataSciencePipelinesApplication, log logr.Logger) error { + if p.MLMD != nil { + mlmdEnvoyImagePath := p.GetImageForComponentV2(dsp, config.MlmdEnvoyImagePathV2Argo, config.MlmdEnvoyImagePathV2Tekton) + mlmdGRPCImagePath := p.GetImageForComponentV2(dsp, config.MlmdGRPCImagePathV2Argo, config.MlmdGRPCImagePathV2Tekton) + + if p.MLMD.Envoy == nil { + p.MLMD.Envoy = &dspa.Envoy{ + Image: config.GetStringConfigWithDefault(mlmdEnvoyImagePath, config.DefaultImageValue), + } + } + if p.MLMD.GRPC == nil { + p.MLMD.GRPC = &dspa.GRPC{ + Image: config.GetStringConfigWithDefault(mlmdGRPCImagePath, config.DefaultImageValue), + } + } + if p.MLMD.Writer != nil { + log.Info("MLMD Writer is not supported in pipelines V2") + } + + mlmdEnvoyImageFromConfig := config.GetStringConfigWithDefault(mlmdEnvoyImagePath, config.DefaultImageValue) + mlmdGRPCImageFromConfig := config.GetStringConfigWithDefault(mlmdGRPCImagePath, config.DefaultImageValue) + + setStringDefault(mlmdEnvoyImageFromConfig, &p.MLMD.Envoy.Image) + setStringDefault(mlmdGRPCImageFromConfig, &p.MLMD.GRPC.Image) + + setResourcesDefault(config.MlmdEnvoyResourceRequirements, &p.MLMD.Envoy.Resources) + setResourcesDefault(config.MlmdGRPCResourceRequirements, &p.MLMD.GRPC.Resources) + + setStringDefault(config.MlmdGrpcPort, &p.MLMD.GRPC.Port) + } + return nil +} + func setStringDefault(defaultValue string, value *string) { if *value == "" { *value = defaultValue @@ -519,9 +559,18 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip setResourcesDefault(config.MlPipelineUIResourceRequirements, &p.MlPipelineUI.Resources) } - // TODO (gfrasca): believe we need to set default VisualizationServer and WorkflowController Images here + // TODO (gfrasca): believe we need to set default WorkflowController Images here + + var err error + + if p.UsingV2Pipelines(dsp) { + err = p.SetupMlmdV2(dsp, log) + } else if p.UsingV1Pipelines(dsp) { + err = p.SetupMlmdV1() + } else { + err = fmt.Errorf("unsupported pipelines version: %s", dsp.Spec.DSPVersion) + } - err := p.SetupMLMD(ctx, dsp, client, log) if err != nil { return err } diff --git a/controllers/mlmd.go b/controllers/mlmd.go index 3a3197416..e1c69c48a 100644 --- a/controllers/mlmd.go +++ b/controllers/mlmd.go @@ -38,6 +38,13 @@ func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApp return err } + if params.UsingV1Pipelines(dsp) { + err = r.ApplyDir(dsp, params, mlmdTemplatesDir+"/v1") + if err != nil { + return err + } + } + log.Info("Finished applying MLMD Resources") return nil } diff --git a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml index 15a850e3a..3d151b9c2 100644 --- a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml @@ -41,10 +41,6 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "120" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST - value: "ds-pipeline-visualizationserver" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT - value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp0" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml index 54378eadd..b8faa89b0 100644 --- a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml @@ -41,10 +41,6 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "125" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST - value: "ds-pipeline-visualizationserver" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT - value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp2" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml index a191ef260..66921107c 100644 --- a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml @@ -41,10 +41,6 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "120" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST - value: "ds-pipeline-visualizationserver" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT - value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "teststoragesecretname3" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml index c1e74562b..15c0123ba 100644 --- a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml @@ -41,10 +41,6 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "125" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST - value: "ds-pipeline-visualizationserver" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT - value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp4" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml index 39cb194fc..69762c5b6 100644 --- a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml @@ -41,10 +41,6 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "120" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST - value: "ds-pipeline-visualizationserver" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT - value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp5" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml index f7893dccc..e8b71ee9e 100644 --- a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml @@ -47,10 +47,6 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "120" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST - value: "ds-pipeline-visualizationserver" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT - value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp6" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml index a0545c34a..4646ecfe7 100644 --- a/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml @@ -41,10 +41,6 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "125" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST - value: "ds-pipeline-visualizationserver" - - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT - value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "mlpipeline-minio-artifact" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/visualization_server.go b/controllers/visualization_server.go deleted file mode 100644 index 817341fbb..000000000 --- a/controllers/visualization_server.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" -) - -var visualizationServerTemplatesDir = "visualizationserver" - -func (r *DSPAReconciler) ReconcileVisualizationServer(dsp *dspav1alpha1.DataSciencePipelinesApplication, - params *DSPAParams) error { - - log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) - - if !dsp.Spec.VisualizationServer.Deploy { - log.Info("Skipping Application of Visualization Server Resources") - return nil - } - - log.Info("Applying Visualization Server Resources") - - err := r.ApplyDir(dsp, params, visualizationServerTemplatesDir) - if err != nil { - return err - } - - log.Info("Finished applying Visualization Server Resources") - return nil -} diff --git a/controllers/visualization_server_test.go b/controllers/visualization_server_test.go deleted file mode 100644 index 7a06f6ca2..000000000 --- a/controllers/visualization_server_test.go +++ /dev/null @@ -1,118 +0,0 @@ -//go:build test_all || test_unit - -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "testing" - - dspav1alpha1 "github.com/opendatahub-io/data-science-pipelines-operator/api/v1alpha1" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" -) - -func TestDeployVisualizationServer(t *testing.T) { - testNamespace := "testnamespace" - testDSPAName := "testdspa" - expectedVisualizationServerName := "ds-pipeline-visualizationserver-testdspa" - - // Construct DSPASpec with deployed Visualization Server - dspa := &dspav1alpha1.DataSciencePipelinesApplication{ - Spec: dspav1alpha1.DSPASpec{ - VisualizationServer: &dspav1alpha1.VisualizationServer{ - Deploy: true, - }, - Database: &dspav1alpha1.Database{ - DisableHealthCheck: false, - MariaDB: &dspav1alpha1.MariaDB{ - Deploy: true, - }, - }, - ObjectStorage: &dspav1alpha1.ObjectStorage{ - DisableHealthCheck: false, - Minio: &dspav1alpha1.Minio{ - Deploy: false, - Image: "someimage", - }, - }, - }, - } - - // Enrich DSPA with name+namespace - dspa.Namespace = testNamespace - dspa.Name = testDSPAName - - // Create Context, Fake Controller and Params - ctx, params, reconciler := CreateNewTestObjects() - err := params.ExtractParams(ctx, dspa, reconciler.Client, reconciler.Log) - assert.Nil(t, err) - - // Ensure Visualization Server Deployment doesn't yet exist - deployment := &appsv1.Deployment{} - created, err := reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) - assert.False(t, created) - assert.Nil(t, err) - - // Run test reconciliation - err = reconciler.ReconcileVisualizationServer(dspa, params) - assert.Nil(t, err) - - // Ensure Visualization Server Deployment now exists - deployment = &appsv1.Deployment{} - created, err = reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) - assert.True(t, created) - assert.Nil(t, err) - -} - -func TestDontDeployVisualizationServer(t *testing.T) { - testNamespace := "testnamespace" - testDSPAName := "testdspa" - expectedVisualizationServerName := "ds-pipeline-visualization-server-testdspa" - - // Construct DSPASpec with non-deployed Visualization Server - dspa := &dspav1alpha1.DataSciencePipelinesApplication{ - Spec: dspav1alpha1.DSPASpec{ - VisualizationServer: &dspav1alpha1.VisualizationServer{ - Deploy: false, - }, - }, - } - - // Enrich DSPA with name+namespace - dspa.Name = testDSPAName - dspa.Namespace = testNamespace - - // Create Context, Fake Controller and Params - ctx, params, reconciler := CreateNewTestObjects() - - // Ensure Visualization Server Deployment doesn't yet exist - deployment := &appsv1.Deployment{} - created, err := reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) - assert.False(t, created) - assert.Nil(t, err) - - // Run test reconciliation - err = reconciler.ReconcileVisualizationServer(dspa, params) - assert.Nil(t, err) - - // Ensure Visualization Server Deployment still doesn't exist - deployment = &appsv1.Deployment{} - created, err = reconciler.IsResourceCreated(ctx, deployment, expectedVisualizationServerName, testNamespace) - assert.False(t, created) - assert.Nil(t, err) -} diff --git a/kfdef/kfdef.yaml b/kfdef/kfdef.yaml index ffc494d94..0620f6449 100644 --- a/kfdef/kfdef.yaml +++ b/kfdef/kfdef.yaml @@ -32,8 +32,6 @@ spec: value: quay.io/opendatahub/ds-pipelines-metadata-writer:1.1.0 - name: IMAGES_CRDVIEWER value: gcr.io/ml-pipeline/viewer-crd-controller:2.0.0-rc.2 - - name: IMAGES_VISUALIZATIONSERVER - value: gcr.io/ml-pipeline/visualization-server:2.0.0-rc.2 - name: IMAGESV2_ARGO_APISERVER value: gcr.io/ml-pipeline/api-server:2.0.2 - name: IMAGESV2_ARGO_ARTIFACT @@ -52,10 +50,6 @@ spec: value: gcr.io/ml-pipeline/metadata-envoy:2.0.2 - name: IMAGESV2_ARGO_MLMDGRPC value: gcr.io/tfx-oss-public/ml_metadata_store_server:1.14.0 - - name: IMAGESV2_ARGO_MLMDWRITER - value: gcr.io/ml-pipeline/metadata-writer:2.0.2 - - name: IMAGESV2_ARGO_VISUALIZATIONSERVER - value: gcr.io/ml-pipeline/visualization-server:2.0.2 - name: IMAGESV2_ARGO_WORKFLOWCONTROLLER value: gcr.io/ml-pipeline/workflow-controller:v3.3.10-license-compliance repoRef: From 4ff63f67432a79044ef6c159c2d227e4b6652f63 Mon Sep 17 00:00:00 2001 From: Helber Belmiro Date: Fri, 15 Dec 2023 11:46:48 -0300 Subject: [PATCH 73/85] Restored ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST and ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT envs Signed-off-by: Helber Belmiro --- config/internal/apiserver/default/deployment.yaml.tmpl | 4 ++++ config/overlays/make-deploy/kustomization.yaml | 4 ++-- .../case_0/expected/created/apiserver_deployment.yaml | 4 ++++ .../case_2/expected/created/apiserver_deployment.yaml | 4 ++++ .../case_3/expected/created/apiserver_deployment.yaml | 4 ++++ .../case_4/expected/created/apiserver_deployment.yaml | 4 ++++ .../case_5/expected/created/apiserver_deployment.yaml | 4 ++++ .../case_6/expected/created/apiserver_deployment.yaml | 4 ++++ .../case_7/expected/created/apiserver_deployment.yaml | 4 ++++ 9 files changed, 34 insertions(+), 2 deletions(-) diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index c1494283b..85adaa00b 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -49,6 +49,10 @@ spec: value: "{{.APIServer.AutoUpdatePipelineDefaultVersion}}" - name: DBCONFIG_CONMAXLIFETIMESEC value: "{{.APIServer.DBConfigConMaxLifetimeSec}}" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "{{.ObjectStorageConnection.CredentialsSecret.SecretName}}" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/config/overlays/make-deploy/kustomization.yaml b/config/overlays/make-deploy/kustomization.yaml index 7814f52a5..97615ece9 100644 --- a/config/overlays/make-deploy/kustomization.yaml +++ b/config/overlays/make-deploy/kustomization.yaml @@ -7,5 +7,5 @@ patchesStrategicMerge: - img_patch.yaml images: - name: controller - newName: quay.io/opendatahub/data-science-pipelines-operator - newTag: main + newName: quay.io/hbelmiro/dspo + newTag: dev-issue-495-1702651405 diff --git a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml index 3d151b9c2..15a850e3a 100644 --- a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml @@ -41,6 +41,10 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "120" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp0" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml index b8faa89b0..54378eadd 100644 --- a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml @@ -41,6 +41,10 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "125" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp2" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml index 66921107c..a191ef260 100644 --- a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml @@ -41,6 +41,10 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "120" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "teststoragesecretname3" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml index 15c0123ba..c1e74562b 100644 --- a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml @@ -41,6 +41,10 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "125" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp4" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml index 69762c5b6..39cb194fc 100644 --- a/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_5/expected/created/apiserver_deployment.yaml @@ -41,6 +41,10 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "120" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp5" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml index e8b71ee9e..f7893dccc 100644 --- a/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_6/expected/created/apiserver_deployment.yaml @@ -47,6 +47,10 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "120" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "ds-pipeline-s3-testdsp6" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY diff --git a/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml index 4646ecfe7..a0545c34a 100644 --- a/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_7/expected/created/apiserver_deployment.yaml @@ -41,6 +41,10 @@ spec: value: "true" - name: DBCONFIG_CONMAXLIFETIMESEC value: "125" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_HOST + value: "ds-pipeline-visualizationserver" + - name: ML_PIPELINE_VISUALIZATIONSERVER_SERVICE_PORT + value: "8888" - name: OBJECTSTORECONFIG_CREDENTIALSSECRET value: "mlpipeline-minio-artifact" - name: OBJECTSTORECONFIG_CREDENTIALSACCESSKEYKEY From c9da18a5426e82cd193feb21a514b14cc2430ee6 Mon Sep 17 00:00:00 2001 From: Helber Belmiro Date: Fri, 15 Dec 2023 11:53:48 -0300 Subject: [PATCH 74/85] Removed GetImageForComponentV2 Signed-off-by: Helber Belmiro --- controllers/dspipeline_params.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index d019dcc83..5230c490f 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -102,19 +102,15 @@ func (p *DSPAParams) UsingTektonEngineDriver(dsp *dspa.DataSciencePipelinesAppli // explicitly set images func (p *DSPAParams) GetImageForComponent(dsp *dspa.DataSciencePipelinesApplication, v1Image, v2ArgoImage, v2TektonImage string) string { if p.UsingV2Pipelines(dsp) { - return p.GetImageForComponentV2(dsp, v2ArgoImage, v2TektonImage) + if p.UsingArgoEngineDriver(dsp) { + return v2ArgoImage + } else { + return v2TektonImage + } } return v1Image } -func (p *DSPAParams) GetImageForComponentV2(dsp *dspa.DataSciencePipelinesApplication, v2ArgoImage, v2TektonImage string) string { - if p.UsingArgoEngineDriver(dsp) { - return v2ArgoImage - } else { - return v2TektonImage - } -} - // UsingExternalDB will return true if an external Database is specified in the CR, otherwise false. func (p *DSPAParams) UsingExternalDB(dsp *dspa.DataSciencePipelinesApplication) bool { if dsp.Spec.Database != nil && dsp.Spec.Database.ExternalDB != nil { @@ -437,8 +433,8 @@ func (p *DSPAParams) SetupMlmdV1() error { func (p *DSPAParams) SetupMlmdV2(dsp *dspa.DataSciencePipelinesApplication, log logr.Logger) error { if p.MLMD != nil { - mlmdEnvoyImagePath := p.GetImageForComponentV2(dsp, config.MlmdEnvoyImagePathV2Argo, config.MlmdEnvoyImagePathV2Tekton) - mlmdGRPCImagePath := p.GetImageForComponentV2(dsp, config.MlmdGRPCImagePathV2Argo, config.MlmdGRPCImagePathV2Tekton) + mlmdEnvoyImagePath := p.GetImageForComponent(dsp, "", config.MlmdEnvoyImagePathV2Argo, config.MlmdEnvoyImagePathV2Tekton) + mlmdGRPCImagePath := p.GetImageForComponent(dsp, "", config.MlmdGRPCImagePathV2Argo, config.MlmdGRPCImagePathV2Tekton) if p.MLMD.Envoy == nil { p.MLMD.Envoy = &dspa.Envoy{ From af2e5a887dc8a93285c7bd785a62fa0b6aa90a28 Mon Sep 17 00:00:00 2001 From: Helber Belmiro Date: Fri, 15 Dec 2023 16:11:39 -0300 Subject: [PATCH 75/85] Restored SetupMLMD Signed-off-by: Helber Belmiro --- .../overlays/make-deploy/kustomization.yaml | 4 +- controllers/dspipeline_params.go | 65 ++++--------------- 2 files changed, 16 insertions(+), 53 deletions(-) diff --git a/config/overlays/make-deploy/kustomization.yaml b/config/overlays/make-deploy/kustomization.yaml index 97615ece9..7814f52a5 100644 --- a/config/overlays/make-deploy/kustomization.yaml +++ b/config/overlays/make-deploy/kustomization.yaml @@ -7,5 +7,5 @@ patchesStrategicMerge: - img_patch.yaml images: - name: controller - newName: quay.io/hbelmiro/dspo - newTag: dev-issue-495-1702651405 + newName: quay.io/opendatahub/data-science-pipelines-operator + newTag: main diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 5230c490f..5d3ca4a94 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -392,11 +392,10 @@ func (p *DSPAParams) SetupObjectParams(ctx context.Context, dsp *dspa.DataScienc } -func (p *DSPAParams) SetupMlmdV1() error { +func (p *DSPAParams) SetupMLMD(dsp *dspa.DataSciencePipelinesApplication) error { if p.MLMD != nil { - MlmdEnvoyImagePath := config.MlmdEnvoyImagePath - MlmdGRPCImagePath := config.MlmdGRPCImagePath - MlmdWriterImagePath := config.MlmdWriterImagePath + MlmdEnvoyImagePath := p.GetImageForComponent(dsp, config.MlmdEnvoyImagePath, config.MlmdEnvoyImagePathV2Argo, config.MlmdEnvoyImagePathV2Tekton) + MlmdGRPCImagePath := p.GetImageForComponent(dsp, config.MlmdGRPCImagePath, config.MlmdGRPCImagePathV2Argo, config.MlmdGRPCImagePathV2Tekton) if p.MLMD.Envoy == nil { p.MLMD.Envoy = &dspa.Envoy{ @@ -408,58 +407,31 @@ func (p *DSPAParams) SetupMlmdV1() error { Image: config.GetStringConfigWithDefault(MlmdGRPCImagePath, config.DefaultImageValue), } } - if p.MLMD.Writer == nil { - p.MLMD.Writer = &dspa.Writer{ - Image: config.GetStringConfigWithDefault(MlmdWriterImagePath, config.DefaultImageValue), - } - } mlmdEnvoyImageFromConfig := config.GetStringConfigWithDefault(MlmdEnvoyImagePath, config.DefaultImageValue) mlmdGRPCImageFromConfig := config.GetStringConfigWithDefault(MlmdGRPCImagePath, config.DefaultImageValue) - mlmdWriterImageFromConfig := config.GetStringConfigWithDefault(MlmdWriterImagePath, config.DefaultImageValue) setStringDefault(mlmdEnvoyImageFromConfig, &p.MLMD.Envoy.Image) setStringDefault(mlmdGRPCImageFromConfig, &p.MLMD.GRPC.Image) - setStringDefault(mlmdWriterImageFromConfig, &p.MLMD.Writer.Image) setResourcesDefault(config.MlmdEnvoyResourceRequirements, &p.MLMD.Envoy.Resources) setResourcesDefault(config.MlmdGRPCResourceRequirements, &p.MLMD.GRPC.Resources) - setResourcesDefault(config.MlmdWriterResourceRequirements, &p.MLMD.Writer.Resources) setStringDefault(config.MlmdGrpcPort, &p.MLMD.GRPC.Port) - } - return nil -} -func (p *DSPAParams) SetupMlmdV2(dsp *dspa.DataSciencePipelinesApplication, log logr.Logger) error { - if p.MLMD != nil { - mlmdEnvoyImagePath := p.GetImageForComponent(dsp, "", config.MlmdEnvoyImagePathV2Argo, config.MlmdEnvoyImagePathV2Tekton) - mlmdGRPCImagePath := p.GetImageForComponent(dsp, "", config.MlmdGRPCImagePathV2Argo, config.MlmdGRPCImagePathV2Tekton) + if p.UsingV1Pipelines(dsp) { + MlmdWriterImagePath := config.MlmdWriterImagePath - if p.MLMD.Envoy == nil { - p.MLMD.Envoy = &dspa.Envoy{ - Image: config.GetStringConfigWithDefault(mlmdEnvoyImagePath, config.DefaultImageValue), + if p.MLMD.Writer == nil { + p.MLMD.Writer = &dspa.Writer{ + Image: config.GetStringConfigWithDefault(MlmdWriterImagePath, config.DefaultImageValue), + } } - } - if p.MLMD.GRPC == nil { - p.MLMD.GRPC = &dspa.GRPC{ - Image: config.GetStringConfigWithDefault(mlmdGRPCImagePath, config.DefaultImageValue), - } - } - if p.MLMD.Writer != nil { - log.Info("MLMD Writer is not supported in pipelines V2") - } - mlmdEnvoyImageFromConfig := config.GetStringConfigWithDefault(mlmdEnvoyImagePath, config.DefaultImageValue) - mlmdGRPCImageFromConfig := config.GetStringConfigWithDefault(mlmdGRPCImagePath, config.DefaultImageValue) - - setStringDefault(mlmdEnvoyImageFromConfig, &p.MLMD.Envoy.Image) - setStringDefault(mlmdGRPCImageFromConfig, &p.MLMD.GRPC.Image) - - setResourcesDefault(config.MlmdEnvoyResourceRequirements, &p.MLMD.Envoy.Resources) - setResourcesDefault(config.MlmdGRPCResourceRequirements, &p.MLMD.GRPC.Resources) - - setStringDefault(config.MlmdGrpcPort, &p.MLMD.GRPC.Port) + mlmdWriterImageFromConfig := config.GetStringConfigWithDefault(MlmdWriterImagePath, config.DefaultImageValue) + setStringDefault(mlmdWriterImageFromConfig, &p.MLMD.Writer.Image) + setResourcesDefault(config.MlmdWriterResourceRequirements, &p.MLMD.Writer.Resources) + } } return nil } @@ -557,16 +529,7 @@ func (p *DSPAParams) ExtractParams(ctx context.Context, dsp *dspa.DataSciencePip // TODO (gfrasca): believe we need to set default WorkflowController Images here - var err error - - if p.UsingV2Pipelines(dsp) { - err = p.SetupMlmdV2(dsp, log) - } else if p.UsingV1Pipelines(dsp) { - err = p.SetupMlmdV1() - } else { - err = fmt.Errorf("unsupported pipelines version: %s", dsp.Spec.DSPVersion) - } - + err := p.SetupMLMD(dsp) if err != nil { return err } From 8e65d60091cbc51d167f825a7b57bbcc4a316bef Mon Sep 17 00:00:00 2001 From: Helber Belmiro Date: Tue, 19 Dec 2023 08:57:30 -0300 Subject: [PATCH 76/85] Updated `DSPAParams#UsingV1Pipelines`. Co-authored-by: Giulio Frasca --- controllers/dspipeline_params.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 5d3ca4a94..468cd8009 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -87,7 +87,7 @@ func (p *DSPAParams) UsingV2Pipelines(dsp *dspa.DataSciencePipelinesApplication) } func (p *DSPAParams) UsingV1Pipelines(dsp *dspa.DataSciencePipelinesApplication) bool { - return dsp.Spec.DSPVersion == "v1" || dsp.Spec.DSPVersion == "" + return !p.UsingV2Pipelines(dsp) } func (p *DSPAParams) UsingArgoEngineDriver(dsp *dspa.DataSciencePipelinesApplication) bool { From b0f2bd1da18977191dc266fba3c46e07b54485ea Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 20 Dec 2023 13:13:21 -0500 Subject: [PATCH 77/85] Update sample pipeline and remove unused sample --- .../apiserver/sample-pipeline.yaml.tmpl | 554 ------------------ .../sample-pipeline/sample-pipeline.yaml.tmpl | 2 +- 2 files changed, 1 insertion(+), 555 deletions(-) delete mode 100644 config/internal/apiserver/sample-pipeline.yaml.tmpl diff --git a/config/internal/apiserver/sample-pipeline.yaml.tmpl b/config/internal/apiserver/sample-pipeline.yaml.tmpl deleted file mode 100644 index 8afd6db2f..000000000 --- a/config/internal/apiserver/sample-pipeline.yaml.tmpl +++ /dev/null @@ -1,554 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: sample-pipeline-{{.Name}} - namespace: {{.Namespace}} - labels: - app: {{.APIServerDefaultResourceName}} - component: data-science-pipelines -data: - iris-pipeline-compiled.yaml: |- - apiVersion: tekton.dev/v1beta1 - kind: PipelineRun - metadata: - name: iris-pipeline - annotations: - tekton.dev/output_artifacts: '{"data-prep": [{"key": "artifacts/$PIPELINERUN/data-prep/X_test.tgz", - "name": "data-prep-X_test", "path": "/tmp/outputs/X_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/X_train.tgz", - "name": "data-prep-X_train", "path": "/tmp/outputs/X_train/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_test.tgz", - "name": "data-prep-y_test", "path": "/tmp/outputs/y_test/data"}, {"key": "artifacts/$PIPELINERUN/data-prep/y_train.tgz", - "name": "data-prep-y_train", "path": "/tmp/outputs/y_train/data"}], "evaluate-model": - [{"key": "artifacts/$PIPELINERUN/evaluate-model/mlpipeline-metrics.tgz", "name": - "mlpipeline-metrics", "path": "/tmp/outputs/mlpipeline_metrics/data"}], "train-model": - [{"key": "artifacts/$PIPELINERUN/train-model/model.tgz", "name": "train-model-model", - "path": "/tmp/outputs/model/data"}]}' - tekton.dev/input_artifacts: '{"evaluate-model": [{"name": "data-prep-X_test", - "parent_task": "data-prep"}, {"name": "data-prep-y_test", "parent_task": "data-prep"}, - {"name": "train-model-model", "parent_task": "train-model"}], "train-model": - [{"name": "data-prep-X_train", "parent_task": "data-prep"}, {"name": "data-prep-y_train", - "parent_task": "data-prep"}], "validate-model": [{"name": "train-model-model", - "parent_task": "train-model"}]}' - tekton.dev/artifact_bucket: mlpipeline - tekton.dev/artifact_endpoint: ${MINIO_SERVICE_SERVICE_HOST}:${MINIO_SERVICE_SERVICE_PORT} - tekton.dev/artifact_endpoint_scheme: http:// - tekton.dev/artifact_items: '{"data-prep": [["X_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test"], - ["X_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train"], - ["y_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test"], - ["y_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train"]], - "evaluate-model": [["mlpipeline-metrics", "/tmp/outputs/mlpipeline_metrics/data"]], - "train-model": [["model", "$(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model"]], - "validate-model": []}' - sidecar.istio.io/inject: "false" - tekton.dev/template: '' - pipelines.kubeflow.org/big_data_passing_format: $(workspaces.$TASK_NAME.path)/artifacts/$ORIG_PR_NAME/$TASKRUN_NAME/$TASK_PARAM_NAME - pipelines.kubeflow.org/pipeline_spec: '{"inputs": [{"default": "iris-model", "name": - "model_obc", "optional": true, "type": "String"}], "name": "Iris Pipeline"}' - labels: - pipelines.kubeflow.org/pipelinename: '' - pipelines.kubeflow.org/generation: '' - spec: - params: - - name: model_obc - value: iris-model - pipelineSpec: - params: - - name: model_obc - default: iris-model - tasks: - - name: data-prep - taskSpec: - steps: - - name: main - args: - - --X-train - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train - - --X-test - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test - - --y-train - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train - - --y-test - - $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def _make_parent_dirs_and_return_path(file_path: str): - import os - os.makedirs(os.path.dirname(file_path), exist_ok=True) - return file_path - - def data_prep( - X_train_file, - X_test_file, - y_train_file, - y_test_file, - ): - import pickle - - import pandas as pd - - from sklearn import datasets - from sklearn.model_selection import train_test_split - - def get_iris_data(): - iris = datasets.load_iris() - data = pd.DataFrame( - { - "sepalLength": iris.data[:, 0], - "sepalWidth": iris.data[:, 1], - "petalLength": iris.data[:, 2], - "petalWidth": iris.data[:, 3], - "species": iris.target, - } - ) - - print("Initial Dataset:") - print(data.head()) - - return data - - def create_training_set(dataset, test_size = 0.3): - # Features - X = dataset[["sepalLength", "sepalWidth", "petalLength", "petalWidth"]] - # Labels - y = dataset["species"] - - # Split dataset into training set and test set - X_train, X_test, y_train, y_test = train_test_split( - X, y, test_size=test_size, random_state=11 - ) - - return X_train, X_test, y_train, y_test - - def save_pickle(object_file, target_object): - with open(object_file, "wb") as f: - pickle.dump(target_object, f) - - dataset = get_iris_data() - X_train, X_test, y_train, y_test = create_training_set(dataset) - - save_pickle(X_train_file, X_train) - save_pickle(X_test_file, X_test) - save_pickle(y_train_file, y_train) - save_pickle(y_test_file, y_test) - - import argparse - _parser = argparse.ArgumentParser(prog='Data prep', description='') - _parser.add_argument("--X-train", dest="X_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--X-test", dest="X_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-train", dest="y_train_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-test", dest="y_test_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = data_prep(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: output-taskrun-name - command: - - sh - - -ec - - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: copy-results-artifacts - command: - - sh - - -ec - - | - set -exo pipefail - TOTAL_SIZE=0 - copy_artifact() { - if [ -d "$1" ]; then - tar -czvf "$1".tar.gz "$1" - SUFFIX=".tar.gz" - fi - ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` - TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) - touch "$2" - if [[ $TOTAL_SIZE -lt 3072 ]]; then - if [ -d "$1" ]; then - tar -tzf "$1".tar.gz > "$2" - elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then - cp "$1" "$2" - fi - fi - } - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train $(results.X-train.path) - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test $(results.X-test.path) - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_train $(results.y-train.path) - copy_artifact $(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/y_test $(results.y-test.path) - onError: continue - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - results: - - name: X-test - description: /tmp/outputs/X_test/data - - name: X-train - description: /tmp/outputs/X_train/data - - name: taskrun-name - - name: y-test - description: /tmp/outputs/y_test/data - - name: y-train - description: /tmp/outputs/y_train/data - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Data prep", "outputs": - [{"name": "X_train"}, {"name": "X_test"}, {"name": "y_train"}, {"name": - "y_test"}], "version": "Data prep@sha256=5aeb512900f57983c9f643ec30ddb4ccc66490a443269b51ce0a67d57cb373b0"}' - workspaces: - - name: data-prep - workspaces: - - name: data-prep - workspace: iris-pipeline - - name: train-model - params: - - name: data-prep-trname - value: $(tasks.data-prep.results.taskrun-name) - taskSpec: - steps: - - name: main - args: - - --X-train - - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_train - - --y-train - - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_train - - --model - - $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def _make_parent_dirs_and_return_path(file_path: str): - import os - os.makedirs(os.path.dirname(file_path), exist_ok=True) - return file_path - - def train_model( - X_train_file, - y_train_file, - model_file, - ): - import pickle - - from sklearn.ensemble import RandomForestClassifier - - def load_pickle(object_file): - with open(object_file, "rb") as f: - target_object = pickle.load(f) - - return target_object - - def save_pickle(object_file, target_object): - with open(object_file, "wb") as f: - pickle.dump(target_object, f) - - def train_iris(X_train, y_train): - model = RandomForestClassifier(n_estimators=100) - model.fit(X_train, y_train) - - return model - - X_train = load_pickle(X_train_file) - y_train = load_pickle(y_train_file) - - model = train_iris(X_train, y_train) - - save_pickle(model_file, model) - - import argparse - _parser = argparse.ArgumentParser(prog='Train model', description='') - _parser.add_argument("--X-train", dest="X_train_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-train", dest="y_train_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--model", dest="model_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = train_model(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: output-taskrun-name - command: - - sh - - -ec - - echo -n "$(context.taskRun.name)" > "$(results.taskrun-name.path)" - - image: registry.access.redhat.com/ubi8/ubi-minimal - name: copy-results-artifacts - command: - - sh - - -ec - - | - set -exo pipefail - TOTAL_SIZE=0 - copy_artifact() { - if [ -d "$1" ]; then - tar -czvf "$1".tar.gz "$1" - SUFFIX=".tar.gz" - fi - ARTIFACT_SIZE=`wc -c "$1"${SUFFIX} | awk '{print $1}'` - TOTAL_SIZE=$( expr $TOTAL_SIZE + $ARTIFACT_SIZE) - touch "$2" - if [[ $TOTAL_SIZE -lt 3072 ]]; then - if [ -d "$1" ]; then - tar -tzf "$1".tar.gz > "$2" - elif ! awk "/[^[:print:]]/{f=1} END{exit !f}" "$1"; then - cp "$1" "$2" - fi - fi - } - copy_artifact $(workspaces.train-model.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/model $(results.model.path) - onError: continue - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - params: - - name: data-prep-trname - results: - - name: model - description: /tmp/outputs/model/data - - name: taskrun-name - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Train model", - "outputs": [{"name": "model"}], "version": "Train model@sha256=cb1fbd399ee5849dcdfaafced23a0496cae1d5861795062b22512b766ec418ce"}' - workspaces: - - name: train-model - workspaces: - - name: train-model - workspace: iris-pipeline - runAfter: - - data-prep - - data-prep - - name: evaluate-model - params: - - name: data-prep-trname - value: $(tasks.data-prep.results.taskrun-name) - - name: train-model-trname - value: $(tasks.train-model.results.taskrun-name) - taskSpec: - steps: - - name: main - args: - - --X-test - - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/X_test - - --y-test - - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.data-prep-trname)/y_test - - --model - - $(workspaces.evaluate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model - - --mlpipeline-metrics - - /tmp/outputs/mlpipeline_metrics/data - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def _make_parent_dirs_and_return_path(file_path: str): - import os - os.makedirs(os.path.dirname(file_path), exist_ok=True) - return file_path - - def evaluate_model( - X_test_file, - y_test_file, - model_file, - mlpipeline_metrics_file, - ): - import json - import pickle - - from sklearn.metrics import accuracy_score - - def load_pickle(object_file): - with open(object_file, "rb") as f: - target_object = pickle.load(f) - - return target_object - - X_test = load_pickle(X_test_file) - y_test = load_pickle(y_test_file) - model = load_pickle(model_file) - - y_pred = model.predict(X_test) - - accuracy_score_metric = accuracy_score(y_test, y_pred) - print(f"Accuracy: {accuracy_score_metric}") - - metrics = { - "metrics": [ - { - "name": "accuracy-score", - "numberValue": accuracy_score_metric, - "format": "PERCENTAGE", - }, - ] - } - - with open(mlpipeline_metrics_file, "w") as f: - json.dump(metrics, f) - - import argparse - _parser = argparse.ArgumentParser(prog='Evaluate model', description='') - _parser.add_argument("--X-test", dest="X_test_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--y-test", dest="y_test_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) - _parser.add_argument("--mlpipeline-metrics", dest="mlpipeline_metrics_file", type=_make_parent_dirs_and_return_path, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = evaluate_model(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - params: - - name: data-prep-trname - - name: train-model-trname - stepTemplate: - volumeMounts: - - name: mlpipeline-metrics - mountPath: /tmp/outputs/mlpipeline_metrics - volumes: - - name: mlpipeline-metrics - emptyDir: {} - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Evaluate model", - "outputs": [{"name": "mlpipeline_metrics", "type": "Metrics"}], "version": - "Evaluate model@sha256=f398e65faecc6f5a4ba11a2c78d8a2274e3ede205a0e199c8bb615531a3abd4a"}' - workspaces: - - name: evaluate-model - workspaces: - - name: evaluate-model - workspace: iris-pipeline - runAfter: - - data-prep - - data-prep - - train-model - - name: validate-model - params: - - name: train-model-trname - value: $(tasks.train-model.results.taskrun-name) - taskSpec: - steps: - - name: main - args: - - --model - - $(workspaces.validate-model.path)/artifacts/$ORIG_PR_NAME/$(params.train-model-trname)/model - command: - - sh - - -c - - (PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m pip install --quiet --no-warn-script-location - 'pandas' 'scikit-learn' || PIP_DISABLE_PIP_VERSION_CHECK=1 python3 -m - pip install --quiet --no-warn-script-location 'pandas' 'scikit-learn' - --user) && "$0" "$@" - - sh - - -ec - - | - program_path=$(mktemp) - printf "%s" "$0" > "$program_path" - python3 -u "$program_path" "$@" - - | - def validate_model(model_file): - import pickle - - def load_pickle(object_file): - with open(object_file, "rb") as f: - target_object = pickle.load(f) - - return target_object - - model = load_pickle(model_file) - - input_values = [[5, 3, 1.6, 0.2]] - - print(f"Performing test prediction on {input_values}") - result = model.predict(input_values) - - print(f"Response: {result}") - - import argparse - _parser = argparse.ArgumentParser(prog='Validate model', description='') - _parser.add_argument("--model", dest="model_file", type=str, required=True, default=argparse.SUPPRESS) - _parsed_args = vars(_parser.parse_args()) - - _outputs = validate_model(**_parsed_args) - image: registry.access.redhat.com/ubi8/python-38 - env: - - name: ORIG_PR_NAME - valueFrom: - fieldRef: - fieldPath: metadata.labels['custom.tekton.dev/originalPipelineRun'] - params: - - name: train-model-trname - metadata: - labels: - pipelines.kubeflow.org/cache_enabled: "true" - annotations: - pipelines.kubeflow.org/component_spec_digest: '{"name": "Validate model", - "outputs": [], "version": "Validate model@sha256=53d18ff94fc8f164e7d8455f2c87fa7fdac17e7502502aaa52012e4247d089ee"}' - workspaces: - - name: validate-model - workspaces: - - name: validate-model - workspace: iris-pipeline - runAfter: - - train-model - workspaces: - - name: iris-pipeline - workspaces: - - name: iris-pipeline - volumeClaimTemplate: - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 2Gi diff --git a/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl b/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl index 6addfef33..89fbb3b2c 100644 --- a/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl +++ b/config/internal/apiserver/sample-pipeline/sample-pipeline.yaml.tmpl @@ -283,7 +283,7 @@ data: "parent_task": "data-prep"}], "validate-model": [{"name": "train-model-model", "parent_task": "train-model"}]}' tekton.dev/artifact_bucket: mlpipeline - tekton.dev/artifact_endpoint: minio-service.kubeflow:9000 + tekton.dev/artifact_endpoint: ${MINIO_SERVICE_SERVICE_HOST}:${MINIO_SERVICE_SERVICE_PORT} tekton.dev/artifact_endpoint_scheme: http:// tekton.dev/artifact_items: '{"data-prep": [["X_test", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_test"], ["X_train", "$(workspaces.data-prep.path)/artifacts/$ORIG_PR_NAME/$(context.taskRun.name)/X_train"], From 9b39ab98c7d1ca29359ca7b2a98cae91c44f9720 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 20 Dec 2023 13:14:02 -0500 Subject: [PATCH 78/85] Remove String-length limit for Declarative tests (helps debugging) --- controllers/suite_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 34886937e..5c538869f 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -33,6 +33,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" "github.com/go-logr/logr" "k8s.io/client-go/kubernetes/scheme" @@ -84,6 +85,8 @@ var _ = BeforeEach(func() { var _ = BeforeSuite(func() { ctx, cancel = context.WithCancel(context.TODO()) + format.MaxLength = 0 + // Initialize logger opts := zap.Options{ Development: true, From a311b920d01617be7b8d0e9044c50d35d34d4404 Mon Sep 17 00:00:00 2001 From: Helber Belmiro Date: Thu, 21 Dec 2023 13:40:22 -0300 Subject: [PATCH 79/85] Restored health check Signed-off-by: Helber Belmiro --- config/samples/dspa_simple_v2.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/config/samples/dspa_simple_v2.yaml b/config/samples/dspa_simple_v2.yaml index a60028870..489ec65e1 100644 --- a/config/samples/dspa_simple_v2.yaml +++ b/config/samples/dspa_simple_v2.yaml @@ -24,7 +24,6 @@ spec: mariaDB: deploy: true objectStorage: - disableHealthCheck: true minio: deploy: true image: 'quay.io/opendatahub/minio:RELEASE.2019-08-14T20-37-41Z-license-compliance' From bb8356ba741166ca44b8bf99df98206138224e2b Mon Sep 17 00:00:00 2001 From: vmudadla Date: Mon, 8 Jan 2024 16:07:46 -0600 Subject: [PATCH 80/85] Document supported Argo versions for v2 --- docs/release/compatibility.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/docs/release/compatibility.yaml b/docs/release/compatibility.yaml index efe8d01dc..54951f1b8 100644 --- a/docs/release/compatibility.yaml +++ b/docs/release/compatibility.yaml @@ -1,5 +1,6 @@ - dsp: 1.0.x kfp-tekton: 1.5.1 + argo: ml-metadata: 1.5.0 envoy: 1.8.4 ocp-pipelines: 1.8 @@ -10,6 +11,7 @@ openshift: 4.10,4.11,4.12 - dsp: 1.1.x kfp-tekton: 1.5.1 + argo: ml-metadata: 1.5.0 envoy: 1.8.4 ocp-pipelines: 1.8 @@ -20,6 +22,7 @@ openshift: 4.10,4.11,4.12 - dsp: 1.2.x kfp-tekton: 1.5.1 + argo: ml-metadata: 1.5.0 envoy: 1.8.4 ocp-pipelines: 1.8 @@ -30,6 +33,7 @@ openshift: 4.10,4.11,4.12 - dsp: 1.3.x kfp-tekton: 1.5.1 + argo: ml-metadata: 1.5.0 envoy: 1.8.4 ocp-pipelines: 1.8 @@ -40,6 +44,7 @@ openshift: 4.10,4.11,4.12 - dsp: 1.4.x kfp-tekton: 1.5.1 + argo: ml-metadata: 1.5.0 envoy: 1.8.4 ocp-pipelines: 1.8 @@ -50,6 +55,7 @@ openshift: 4.11,4.12,4.13 - dsp: 1.5.x kfp-tekton: 1.5.1 + argo: ml-metadata: 1.5.0 envoy: 1.8.4 ocp-pipelines: 1.8 @@ -60,6 +66,7 @@ openshift: 4.11,4.12,4.13 - dsp: 1.6.x kfp-tekton: 1.5.1 + argo: ml-metadata: 1.5.0 envoy: 1.8.4 ocp-pipelines: 1.8 @@ -68,3 +75,14 @@ ubi-minimal: 8.8 ubi-micro: 8.8 openshift: 4.11,4.12,4.13 +- dsp: 1.7.x + kfp-tekton: + argo: 3.3.10 + ml-metadata: 1.5.0 + envoy: 2.0.2 + ocp-pipelines: 1.8 + oauth-proxy: v4.10 + mariadb-103: 1 + ubi-minimal: 8.7 + ubi-micro: 8.7 + openshift: 4.11,4.12,4.13 From a10e4b6a24cf976ce61d89fa6c85e7cf64552e43 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Mon, 8 Jan 2024 17:16:04 -0500 Subject: [PATCH 81/85] Require MLMD to be deployed when using V2 Pipelines --- api/v1alpha1/dspipeline_types.go | 5 ++--- ...endatahub.io_datasciencepipelinesapplications.yaml | 4 +--- .../internal/apiserver/default/deployment.yaml.tmpl | 2 ++ controllers/dspipeline_params.go | 11 +++++++++++ controllers/mlmd.go | 2 +- 5 files changed, 17 insertions(+), 7 deletions(-) diff --git a/api/v1alpha1/dspipeline_types.go b/api/v1alpha1/dspipeline_types.go index 39d0b8b6e..bf0c1dda6 100644 --- a/api/v1alpha1/dspipeline_types.go +++ b/api/v1alpha1/dspipeline_types.go @@ -40,8 +40,7 @@ type DSPASpec struct { // ObjectStorage specifies Object Store configurations, used for DS Pipelines artifact passing and storage. Specify either the your own External Storage (e.g. AWS S3), or use the default Minio deployment (unsupported, primarily for development, and testing) . // +kubebuilder:validation:Required *ObjectStorage `json:"objectStorage"` - // +kubebuilder:default:={deploy: true} - *MLMD `json:"mlmd,omitempty"` + *MLMD `json:"mlmd,omitempty"` // +kubebuilder:validation:Optional // +kubebuilder:default:={deploy: false} *CRDViewer `json:"crdviewer"` @@ -261,7 +260,7 @@ type Minio struct { type MLMD struct { // Enable DS Pipelines Operator management of MLMD. Setting Deploy to false disables operator reconciliation. Default: true - // +kubebuilder:default:=true + // +kubebuilder:default:=false // +kubebuilder:validation:Optional Deploy bool `json:"deploy"` *Envoy `json:"envoy,omitempty"` diff --git a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml index 5486fe587..db8121ed5 100644 --- a/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml +++ b/config/crd/bases/datasciencepipelinesapplications.opendatahub.io_datasciencepipelinesapplications.yaml @@ -305,11 +305,9 @@ spec: default: v1 type: string mlmd: - default: - deploy: true properties: deploy: - default: true + default: false description: 'Enable DS Pipelines Operator management of MLMD. Setting Deploy to false disables operator reconciliation. Default: true' diff --git a/config/internal/apiserver/default/deployment.yaml.tmpl b/config/internal/apiserver/default/deployment.yaml.tmpl index 85adaa00b..30f9bd191 100644 --- a/config/internal/apiserver/default/deployment.yaml.tmpl +++ b/config/internal/apiserver/default/deployment.yaml.tmpl @@ -84,6 +84,7 @@ spec: - name: MOVERESULTS_IMAGE value: "{{.APIServer.MoveResultsImage}}" ## Env Vars to only include if MLMD Deployed ## + {{ if .MLMD }} {{ if .MLMD.Deploy }} - name: METADATA_GRPC_SERVICE_SERVICE_HOST value: "ds-pipeline-metadata-grpc-{{.Name}}.{{.Namespace}}.svc.cluster.local" @@ -92,6 +93,7 @@ spec: value: "{{.MLMD.GRPC.Port}}" {{ end }} {{ end }} + {{ end }} - name: ML_PIPELINE_SERVICE_HOST value: "ds-pipeline-{{.Name}}.{{.Namespace}}.svc.cluster.local" - name: ML_PIPELINE_SERVICE_PORT_GRPC diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 9666f54da..38039b0a4 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -384,6 +384,17 @@ func (p *DSPAParams) SetupObjectParams(ctx context.Context, dsp *dspa.DataScienc } func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelinesApplication, client client.Client, log logr.Logger) error { + if p.UsingV2Pipelines(dsp) { + if p.MLMD == nil { + log.Info("MLMD not specified, but is a required component for V2 Pipelines. Including MLMD with default specs.") + p.MLMD = &dspa.MLMD{ + Deploy: true, + } + } else { + log.Info("MLMD disabled in DSPA, but is a required component for V2 Pipelines. Overriding to enable component") + p.MLMD.Deploy = true + } + } if p.MLMD != nil { MlmdEnvoyImagePath := p.GetImageForComponent(dsp, config.MlmdEnvoyImagePath, config.MlmdEnvoyImagePathV2Argo, config.MlmdEnvoyImagePathV2Tekton) MlmdGRPCImagePath := p.GetImageForComponent(dsp, config.MlmdGRPCImagePath, config.MlmdGRPCImagePathV2Argo, config.MlmdGRPCImagePathV2Tekton) diff --git a/controllers/mlmd.go b/controllers/mlmd.go index 3a3197416..d4b0e5102 100644 --- a/controllers/mlmd.go +++ b/controllers/mlmd.go @@ -26,7 +26,7 @@ func (r *DSPAReconciler) ReconcileMLMD(dsp *dspav1alpha1.DataSciencePipelinesApp log := r.Log.WithValues("namespace", dsp.Namespace).WithValues("dspa_name", dsp.Name) - if !dsp.Spec.MLMD.Deploy { + if (params.MLMD == nil || !params.MLMD.Deploy) && (dsp.Spec.MLMD == nil || !dsp.Spec.MLMD.Deploy) { r.Log.Info("Skipping Application of ML-Metadata (MLMD) Resources") return nil } From e0ed31cbc0d93831b1d06d48fa1a4ba247056ea8 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 9 Jan 2024 02:14:48 -0500 Subject: [PATCH 82/85] Update FuncTests to validate against new expected MLMD behavior --- .../case_0/expected/created/apiserver_deployment.yaml | 4 ---- .../case_2/expected/created/apiserver_deployment.yaml | 4 ---- .../case_3/expected/created/apiserver_deployment.yaml | 4 ---- .../case_4/expected/created/apiserver_deployment.yaml | 4 ---- 4 files changed, 16 deletions(-) diff --git a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml index 15a850e3a..61185ee35 100644 --- a/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_0/expected/created/apiserver_deployment.yaml @@ -75,10 +75,6 @@ spec: value: "ubi-minimal:test0" - name: MOVERESULTS_IMAGE value: "busybox:test0" - - name: METADATA_GRPC_SERVICE_SERVICE_HOST - value: "ds-pipeline-metadata-grpc-testdsp0.default.svc.cluster.local" - - name: METADATA_GRPC_SERVICE_SERVICE_PORT - value: "8080" - name: ML_PIPELINE_SERVICE_HOST value: ds-pipeline-testdsp0.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC diff --git a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml index 54378eadd..8fe35e13e 100644 --- a/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_2/expected/created/apiserver_deployment.yaml @@ -75,10 +75,6 @@ spec: value: "ubi-minimal:test2" - name: MOVERESULTS_IMAGE value: "busybox:test2" - - name: METADATA_GRPC_SERVICE_SERVICE_HOST - value: "ds-pipeline-metadata-grpc-testdsp2.default.svc.cluster.local" - - name: METADATA_GRPC_SERVICE_SERVICE_PORT - value: "8080" - name: ML_PIPELINE_SERVICE_HOST value: ds-pipeline-testdsp2.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC diff --git a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml index a191ef260..3174377ba 100644 --- a/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_3/expected/created/apiserver_deployment.yaml @@ -75,10 +75,6 @@ spec: value: ubi-minimal:test3 - name: MOVERESULTS_IMAGE value: busybox:test3 - - name: METADATA_GRPC_SERVICE_SERVICE_HOST - value: "ds-pipeline-metadata-grpc-testdsp3.default.svc.cluster.local" - - name: METADATA_GRPC_SERVICE_SERVICE_PORT - value: "8080" - name: ML_PIPELINE_SERVICE_HOST value: ds-pipeline-testdsp3.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC diff --git a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml index c1e74562b..3c56b5494 100644 --- a/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml +++ b/controllers/testdata/declarative/case_4/expected/created/apiserver_deployment.yaml @@ -75,10 +75,6 @@ spec: value: "this-ubi-minimal-image-from-cr-should-be-used:test4" - name: MOVERESULTS_IMAGE value: "this-busybox-image-from-cr-should-be-used:test4" - - name: METADATA_GRPC_SERVICE_SERVICE_HOST - value: "ds-pipeline-metadata-grpc-testdsp4.default.svc.cluster.local" - - name: METADATA_GRPC_SERVICE_SERVICE_PORT - value: "8080" - name: ML_PIPELINE_SERVICE_HOST value: ds-pipeline-testdsp4.default.svc.cluster.local - name: ML_PIPELINE_SERVICE_PORT_GRPC From 7979f2ca75a5fbca8f6e08bb981d685e7121e2f1 Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Tue, 9 Jan 2024 10:34:53 -0500 Subject: [PATCH 83/85] Throw error when user explitly disables MLMD with V2 Pipeline DSPAs --- controllers/dspipeline_params.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 38039b0a4..995132aa2 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -391,8 +391,7 @@ func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelin Deploy: true, } } else { - log.Info("MLMD disabled in DSPA, but is a required component for V2 Pipelines. Overriding to enable component") - p.MLMD.Deploy = true + return fmt.Errorf("MLMD explicitly disabled in DSPA, but is a required component for V2 Pipelines") } } if p.MLMD != nil { From 7c67e428acdf53b2eb93c5899a832c9a60a1936d Mon Sep 17 00:00:00 2001 From: Giulio Frasca Date: Wed, 10 Jan 2024 09:19:10 -0500 Subject: [PATCH 84/85] Fix v2 mlmd conditional check in dspa params --- controllers/dspipeline_params.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/controllers/dspipeline_params.go b/controllers/dspipeline_params.go index 561af16c1..3d6e3d1c3 100644 --- a/controllers/dspipeline_params.go +++ b/controllers/dspipeline_params.go @@ -399,7 +399,7 @@ func (p *DSPAParams) SetupMLMD(ctx context.Context, dsp *dspa.DataSciencePipelin p.MLMD = &dspa.MLMD{ Deploy: true, } - } else { + } else if !p.MLMD.Deploy { return fmt.Errorf("MLMD explicitly disabled in DSPA, but is a required component for V2 Pipelines") } } From 7da7cab1cbe669c232fb9562158bc27260c83de8 Mon Sep 17 00:00:00 2001 From: vmudadla Date: Fri, 12 Jan 2024 11:13:55 -0600 Subject: [PATCH 85/85] Updated versions --- docs/release/compatibility.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/release/compatibility.yaml b/docs/release/compatibility.yaml index 54951f1b8..de587401e 100644 --- a/docs/release/compatibility.yaml +++ b/docs/release/compatibility.yaml @@ -76,7 +76,7 @@ ubi-micro: 8.8 openshift: 4.11,4.12,4.13 - dsp: 1.7.x - kfp-tekton: + kfp-tekton: 1.5.1 argo: 3.3.10 ml-metadata: 1.5.0 envoy: 2.0.2 @@ -85,4 +85,4 @@ mariadb-103: 1 ubi-minimal: 8.7 ubi-micro: 8.7 - openshift: 4.11,4.12,4.13 + openshift: 4.12,4.13,4.14