From bf9b7f8d0e96dd51a871df751dbb5e086565c47f Mon Sep 17 00:00:00 2001 From: Shyamsundar Ranganathan Date: Fri, 12 Jul 2024 09:49:11 -0400 Subject: [PATCH 1/7] Add logger to DRClusterConfig reconciler Also, cleanup some scaffolding comments. Signed-off-by: Shyamsundar Ranganathan --- cmd/main.go | 1 + .../controller/drclusterconfig_controller.go | 26 +++++++++---------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/cmd/main.go b/cmd/main.go index b8c85fbc0..29370c2ae 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -174,6 +174,7 @@ func setupReconcilersCluster(mgr ctrl.Manager, ramenConfig *ramendrv1alpha1.Rame if err := (&controllers.DRClusterConfigReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("DRClusterConfig"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "DRClusterConfig") os.Exit(1) diff --git a/internal/controller/drclusterconfig_controller.go b/internal/controller/drclusterconfig_controller.go index 6cdfb7d32..1d937f523 100644 --- a/internal/controller/drclusterconfig_controller.go +++ b/internal/controller/drclusterconfig_controller.go @@ -11,6 +11,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" + "github.com/go-logr/logr" + "github.com/google/uuid" ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" ) @@ -18,25 +20,21 @@ import ( type DRClusterConfigReconciler struct { client.Client Scheme *runtime.Scheme + Log logr.Logger } -//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the DRClusterConfig object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.16.3/pkg/reconcile +//nolint:lll +// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/finalizers,verbs=update + func (r *DRClusterConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { _ = log.FromContext(ctx) - // TODO(user): your logic here + log := r.Log.WithValues("name", req.NamespacedName.Name, "rid", uuid.New()) + log.Info("reconcile enter") + + defer log.Info("reconcile exit") return ctrl.Result{}, nil } From 4cf2d7d7c5f3bf7c2c7ccd354b5d22a1eb6c69b8 Mon Sep 17 00:00:00 2001 From: Shyamsundar Ranganathan Date: Fri, 12 Jul 2024 09:50:21 -0400 Subject: [PATCH 2/7] Add initial reconcile for DRClusterConfig - Add finalizer to resource being reconciled - Remove on delete - Update reconciler to rate limit max exponential backoff to 5 minutes Signed-off-by: Shyamsundar Ranganathan --- .../controller/drclusterconfig_controller.go | 82 +++++++++++++++++-- 1 file changed, 73 insertions(+), 9 deletions(-) diff --git a/internal/controller/drclusterconfig_controller.go b/internal/controller/drclusterconfig_controller.go index 1d937f523..8888c0d79 100644 --- a/internal/controller/drclusterconfig_controller.go +++ b/internal/controller/drclusterconfig_controller.go @@ -5,22 +5,34 @@ package controllers import ( "context" + "fmt" + "time" + "golang.org/x/time/rate" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" + ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" "github.com/go-logr/logr" "github.com/google/uuid" - ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" + ramen "github.com/ramendr/ramen/api/v1alpha1" + "github.com/ramendr/ramen/internal/controller/util" +) + +const ( + drCConfigFinalizerName = "drclusterconfigs.ramendr.openshift.io/ramen" + + maxReconcileBackoff = 5 * time.Minute ) // DRClusterConfigReconciler reconciles a DRClusterConfig object type DRClusterConfigReconciler struct { client.Client - Scheme *runtime.Scheme - Log logr.Logger + Scheme *runtime.Scheme + Log logr.Logger + RateLimiter *workqueue.RateLimiter } //nolint:lll @@ -29,19 +41,71 @@ type DRClusterConfigReconciler struct { // +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/finalizers,verbs=update func (r *DRClusterConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) - log := r.Log.WithValues("name", req.NamespacedName.Name, "rid", uuid.New()) log.Info("reconcile enter") defer log.Info("reconcile exit") + drCConfig := &ramen.DRClusterConfig{} + if err := r.Client.Get(ctx, req.NamespacedName, drCConfig); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(fmt.Errorf("get: %w", err)) + } + + if util.ResourceIsDeleted(drCConfig) { + return r.processDeletion(ctx, log, drCConfig) + } + + return r.processCreateOrUpdate(ctx, log, drCConfig) +} + +func (r *DRClusterConfigReconciler) processDeletion( + ctx context.Context, + log logr.Logger, + drCConfig *ramen.DRClusterConfig, +) (ctrl.Result, error) { + if err := util.NewResourceUpdater(drCConfig). + RemoveFinalizer(drCConfigFinalizerName). + Update(ctx, r.Client); err != nil { + log.Info("Failed to remove finalizer for DRClusterConfig resource", "error", err) + + return ctrl.Result{Requeue: true}, nil + } + + return ctrl.Result{}, nil +} + +func (r *DRClusterConfigReconciler) processCreateOrUpdate( + ctx context.Context, + log logr.Logger, + drCConfig *ramen.DRClusterConfig, +) (ctrl.Result, error) { + if err := util.NewResourceUpdater(drCConfig). + AddFinalizer(drCConfigFinalizerName). + Update(ctx, r.Client); err != nil { + log.Info("Failed to add finalizer for DRClusterConfig resource", "error", err) + + return ctrl.Result{Requeue: true}, nil + } + return ctrl.Result{}, nil } // SetupWithManager sets up the controller with the Manager. func (r *DRClusterConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&ramendrv1alpha1.DRClusterConfig{}). - Complete(r) + controller := ctrl.NewControllerManagedBy(mgr) + + rateLimiter := workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, maxReconcileBackoff), + // defaults from client-go + //nolint: gomnd + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ) + + if r.RateLimiter != nil { + rateLimiter = *r.RateLimiter + } + + return controller.WithOptions(ctrlcontroller.Options{ + RateLimiter: rateLimiter, + }).For(&ramen.DRClusterConfig{}).Complete(r) } From 262bef66780328e70d55739c229bb6950784d652 Mon Sep 17 00:00:00 2001 From: Shyamsundar Ranganathan Date: Fri, 12 Jul 2024 09:51:42 -0400 Subject: [PATCH 3/7] Add roles for various storage classes and cluster claims Signed-off-by: Shyamsundar Ranganathan --- config/dr-cluster/rbac/role.yaml | 11 +++++++++++ config/rbac/role.yaml | 11 +++++++++++ internal/controller/drclusterconfig_controller.go | 4 ++++ 3 files changed, 26 insertions(+) diff --git a/config/dr-cluster/rbac/role.yaml b/config/dr-cluster/rbac/role.yaml index 47ef42d6a..480bee100 100644 --- a/config/dr-cluster/rbac/role.yaml +++ b/config/dr-cluster/rbac/role.yaml @@ -137,6 +137,17 @@ rules: - patch - update - watch +- apiGroups: + - cluster.open-cluster-management.io + resources: + - clusterclaims + verbs: + - create + - delete + - get + - list + - update + - watch - apiGroups: - ramendr.openshift.io resources: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 664648058..3f7cea04f 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -85,6 +85,17 @@ rules: - get - list - watch +- apiGroups: + - cluster.open-cluster-management.io + resources: + - clusterclaims + verbs: + - create + - delete + - get + - list + - update + - watch - apiGroups: - cluster.open-cluster-management.io resources: diff --git a/internal/controller/drclusterconfig_controller.go b/internal/controller/drclusterconfig_controller.go index 8888c0d79..1b8ea860f 100644 --- a/internal/controller/drclusterconfig_controller.go +++ b/internal/controller/drclusterconfig_controller.go @@ -39,6 +39,10 @@ type DRClusterConfigReconciler struct { // +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/status,verbs=get;update;patch // +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/finalizers,verbs=update +// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumereplicationclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.open-cluster-management.io,resources=clusterclaims,verbs=get;list;watch;create;update;delete func (r *DRClusterConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := r.Log.WithValues("name", req.NamespacedName.Name, "rid", uuid.New()) From ceca83369aa5d5ca176aafedd99256fab296a800 Mon Sep 17 00:00:00 2001 From: Shyamsundar Ranganathan Date: Fri, 12 Jul 2024 09:52:32 -0400 Subject: [PATCH 4/7] Add StorageClass listing and dummy functions for claim creation Building the scaffold for the overall functionality. Signed-off-by: Shyamsundar Ranganathan --- .../controller/drclusterconfig_controller.go | 80 +++++++++++++++++-- 1 file changed, 75 insertions(+), 5 deletions(-) diff --git a/internal/controller/drclusterconfig_controller.go b/internal/controller/drclusterconfig_controller.go index 1b8ea860f..d2d865d7a 100644 --- a/internal/controller/drclusterconfig_controller.go +++ b/internal/controller/drclusterconfig_controller.go @@ -9,6 +9,7 @@ import ( "time" "golang.org/x/time/rate" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" @@ -25,6 +26,9 @@ const ( drCConfigFinalizerName = "drclusterconfigs.ramendr.openshift.io/ramen" maxReconcileBackoff = 5 * time.Minute + + // Prefixes for various ClusterClaims + ccSCPrefix = "storage.class" ) // DRClusterConfigReconciler reconciles a DRClusterConfig object @@ -62,22 +66,35 @@ func (r *DRClusterConfigReconciler) Reconcile(ctx context.Context, req ctrl.Requ return r.processCreateOrUpdate(ctx, log, drCConfig) } +// processDeletion ensures all cluster claims created by drClusterConfig are deleted, before removing the finalizer on +// the resource itself func (r *DRClusterConfigReconciler) processDeletion( ctx context.Context, log logr.Logger, drCConfig *ramen.DRClusterConfig, ) (ctrl.Result, error) { + if err := r.pruneClusterClaims(ctx, log, []string{}); err != nil { + return ctrl.Result{Requeue: true}, err + } + if err := util.NewResourceUpdater(drCConfig). RemoveFinalizer(drCConfigFinalizerName). Update(ctx, r.Client); err != nil { - log.Info("Failed to remove finalizer for DRClusterConfig resource", "error", err) - - return ctrl.Result{Requeue: true}, nil + return ctrl.Result{Requeue: true}, + fmt.Errorf("failed to remove finalizer for DRClusterConfig resource, %w", err) } return ctrl.Result{}, nil } +// pruneClusterClaims will prune all ClusterClaims created by drClusterConfig that are not in the +// passed in survivor list +func (r *DRClusterConfigReconciler) pruneClusterClaims(ctx context.Context, log logr.Logger, survivors []string) error { + return nil +} + +// processCreateOrUpdate protects the resource with a finalizer and creates ClusterClaims for various storage related +// classes in the cluster. It would finally prune stale ClusterClaims from previous reconciliations. func (r *DRClusterConfigReconciler) processCreateOrUpdate( ctx context.Context, log logr.Logger, @@ -86,14 +103,67 @@ func (r *DRClusterConfigReconciler) processCreateOrUpdate( if err := util.NewResourceUpdater(drCConfig). AddFinalizer(drCConfigFinalizerName). Update(ctx, r.Client); err != nil { - log.Info("Failed to add finalizer for DRClusterConfig resource", "error", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to add finalizer for DRClusterConfig resource, %w", err) + } - return ctrl.Result{Requeue: true}, nil + allSurvivors := []string{} + + survivors, err := r.createSCClusterClaims(ctx, log) + if err != nil { + return ctrl.Result{Requeue: true}, err + } + + allSurvivors = append(allSurvivors, survivors...) + + if err := r.pruneClusterClaims(ctx, log, allSurvivors); err != nil { + return ctrl.Result{Requeue: true}, err } return ctrl.Result{}, nil } +// createSCClusterClaims lists all StorageClasses and creates ClusterClaims for them +func (r *DRClusterConfigReconciler) createSCClusterClaims( + ctx context.Context, + log logr.Logger, +) ([]string, error) { + claims := []string{} + + sClasses := &storagev1.StorageClassList{} + if err := r.Client.List(ctx, sClasses); err != nil { + return nil, fmt.Errorf("failed to list StorageClasses, %w", err) + } + + for i := range sClasses.Items { + // TODO: If something is labeled later is there an Update event? + if !util.HasLabel(&sClasses.Items[i], StorageIDLabel) { + continue + } + + if err := r.ensureClusterClaim(ctx, log, ccSCPrefix, sClasses.Items[i].GetName()); err != nil { + return nil, err + } + + claims = append(claims, claimName(ccSCPrefix, sClasses.Items[i].GetName())) + } + + return claims, nil +} + +// ensureClusterClaim is a generic ClusterClaim creation function, that create a claim named "prefix.name", with +// the passed in name as the ClusterClaim spec.Value +func (r *DRClusterConfigReconciler) ensureClusterClaim( + ctx context.Context, + log logr.Logger, + prefix, name string, +) error { + return nil +} + +func claimName(prefix, name string) string { + return prefix + "." + name +} + // SetupWithManager sets up the controller with the Manager. func (r *DRClusterConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { controller := ctrl.NewControllerManagedBy(mgr) From b7fa70fa441f6f544c087ab37c3ba6868c38e2c2 Mon Sep 17 00:00:00 2001 From: Shyamsundar Ranganathan Date: Fri, 12 Jul 2024 10:03:38 -0400 Subject: [PATCH 5/7] Add ClusterClaims for detected StorageClasses Signed-off-by: Shyamsundar Ranganathan --- ...uster-management.io_clusterclaims.crd.yaml | 63 ++++++ .../controller/drclusterconfig_controller.go | 40 +++- .../drclusterconfig_controller_test.go | 201 ++++++++++++++---- internal/controller/suite_test.go | 4 + 4 files changed, 252 insertions(+), 56 deletions(-) create mode 100644 hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml diff --git a/hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml b/hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml new file mode 100644 index 000000000..4359dcadb --- /dev/null +++ b/hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml @@ -0,0 +1,63 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterclaims.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ClusterClaim + listKind: ClusterClaimList + plural: clusterclaims + singular: clusterclaim + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + ClusterClaim represents cluster information that a managed cluster claims + ClusterClaims with well known names include, + 1. id.k8s.io, it contains a unique identifier for the cluster. + 2. clusterset.k8s.io, it contains an identifier that relates the cluster + to the ClusterSet in which it belongs. + + + ClusterClaims created on a managed cluster will be collected and saved into + the status of the corresponding ManagedCluster on hub. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of the ClusterClaim. + properties: + value: + description: Value is a claim-dependent string + maxLength: 1024 + minLength: 1 + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/internal/controller/drclusterconfig_controller.go b/internal/controller/drclusterconfig_controller.go index d2d865d7a..ea30217a4 100644 --- a/internal/controller/drclusterconfig_controller.go +++ b/internal/controller/drclusterconfig_controller.go @@ -8,13 +8,17 @@ import ( "fmt" "time" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + "golang.org/x/time/rate" storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/go-logr/logr" "github.com/google/uuid" @@ -23,7 +27,9 @@ import ( ) const ( - drCConfigFinalizerName = "drclusterconfigs.ramendr.openshift.io/ramen" + drCConfigFinalizerName = "drclusterconfigs.ramendr.openshift.io/finalizer" + drCConfigOwnerLabel = "drclusterconfigs.ramendr.openshift.io/owner" + drCConfigOwnerName = "ramen" maxReconcileBackoff = 5 * time.Minute @@ -36,7 +42,7 @@ type DRClusterConfigReconciler struct { client.Client Scheme *runtime.Scheme Log logr.Logger - RateLimiter *workqueue.RateLimiter + RateLimiter *workqueue.TypedRateLimiter[reconcile.Request] } //nolint:lll @@ -108,7 +114,7 @@ func (r *DRClusterConfigReconciler) processCreateOrUpdate( allSurvivors := []string{} - survivors, err := r.createSCClusterClaims(ctx, log) + survivors, err := r.createSCClusterClaims(ctx) if err != nil { return ctrl.Result{Requeue: true}, err } @@ -125,7 +131,6 @@ func (r *DRClusterConfigReconciler) processCreateOrUpdate( // createSCClusterClaims lists all StorageClasses and creates ClusterClaims for them func (r *DRClusterConfigReconciler) createSCClusterClaims( ctx context.Context, - log logr.Logger, ) ([]string, error) { claims := []string{} @@ -140,7 +145,7 @@ func (r *DRClusterConfigReconciler) createSCClusterClaims( continue } - if err := r.ensureClusterClaim(ctx, log, ccSCPrefix, sClasses.Items[i].GetName()); err != nil { + if err := r.ensureClusterClaim(ctx, ccSCPrefix, sClasses.Items[i].GetName()); err != nil { return nil, err } @@ -150,13 +155,28 @@ func (r *DRClusterConfigReconciler) createSCClusterClaims( return claims, nil } -// ensureClusterClaim is a generic ClusterClaim creation function, that create a claim named "prefix.name", with +// ensureClusterClaim is a generic ClusterClaim creation function, that creates a claim named "prefix.name", with // the passed in name as the ClusterClaim spec.Value func (r *DRClusterConfigReconciler) ensureClusterClaim( ctx context.Context, - log logr.Logger, prefix, name string, ) error { + cc := &clusterv1alpha1.ClusterClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimName(prefix, name), + }, + } + + if _, err := ctrl.CreateOrUpdate(ctx, r.Client, cc, func() error { + util.NewResourceUpdater(cc).AddLabel(drCConfigOwnerLabel, drCConfigOwnerName) + + cc.Spec.Value = name + + return nil + }); err != nil { + return fmt.Errorf("failed to create or update ClusterClaim %s, %w", claimName(prefix, name), err) + } + return nil } @@ -168,11 +188,11 @@ func claimName(prefix, name string) string { func (r *DRClusterConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { controller := ctrl.NewControllerManagedBy(mgr) - rateLimiter := workqueue.NewMaxOfRateLimiter( - workqueue.NewItemExponentialFailureRateLimiter(1*time.Second, maxReconcileBackoff), + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](1*time.Second, maxReconcileBackoff), // defaults from client-go //nolint: gomnd - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + &workqueue.TypedBucketRateLimiter[reconcile.Request]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, ) if r.RateLimiter != nil { diff --git a/internal/controller/drclusterconfig_controller_test.go b/internal/controller/drclusterconfig_controller_test.go index 571d56b26..8acbf17f8 100644 --- a/internal/controller/drclusterconfig_controller_test.go +++ b/internal/controller/drclusterconfig_controller_test.go @@ -5,68 +5,177 @@ package controllers_test import ( "context" + "fmt" + "os" + "path/filepath" + "time" + + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/workqueue" + config "k8s.io/component-base/config/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" + ramen "github.com/ramendr/ramen/api/v1alpha1" ramencontrollers "github.com/ramendr/ramen/internal/controller" ) -var _ = Describe("DRClusterConfig Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" +var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { + var ( + ctx context.Context + cancel context.CancelFunc + cfg *rest.Config + testEnv *envtest.Environment + k8sClient client.Client + drCConfig *ramen.DRClusterConfig + sc1 *storagev1.StorageClass + ) - ctx := context.Background() + BeforeAll(func() { + By("bootstrapping test environment") - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed + Expect(os.Setenv("POD_NAMESPACE", ramenNamespace)).To(Succeed()) + + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "hack", "test"), + }, } - drclusterconfig := &ramendrv1alpha1.DRClusterConfig{} - - BeforeEach(func() { - By("creating the custom resource for the Kind DRClusterConfig") - err := k8sClient.Get(ctx, typeNamespacedName, drclusterconfig) - if err != nil && errors.IsNotFound(err) { - resource := &ramendrv1alpha1.DRClusterConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", - }, - // TODO(user): Specify other spec details if needed. - } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) - } - }) - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &ramendrv1alpha1.DRClusterConfig{} - err := apiReader.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) + if testEnv.UseExistingCluster != nil && *testEnv.UseExistingCluster == true { + namespaceDeletionSupported = true + } - By("Cleanup the specific resource instance DRClusterConfig") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) - }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &ramencontrollers.DRClusterConfigReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - } - - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, + var err error + done := make(chan interface{}) + go func() { + defer GinkgoRecover() + cfg, err = testEnv.Start() + close(done) + }() + Eventually(done).WithTimeout(time.Minute).Should(BeClosed()) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + + By("starting the DRClusterConfig reconciler") + + ramenConfig := &ramen.RamenConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "RamenConfig", + APIVersion: ramen.GroupVersion.String(), + }, + LeaderElection: &config.LeaderElectionConfiguration{ + LeaderElect: new(bool), + ResourceName: ramencontrollers.HubLeaderElectionResourceName, + }, + Metrics: ramen.ControllerMetrics{ + BindAddress: "0", // Disable metrics + }, + } + + options := manager.Options{Scheme: scheme.Scheme} + ramencontrollers.LoadControllerOptions(&options, ramenConfig) + + k8sManager, err := ctrl.NewManager(cfg, options) + Expect(err).ToNot(HaveOccurred()) + + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request]( + 10*time.Millisecond, + 100*time.Millisecond), + ) + + Expect((&ramencontrollers.DRClusterConfigReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("DRClusterConfig"), + RateLimiter: &rateLimiter, + }).SetupWithManager(k8sManager)).To(Succeed()) + + ctx, cancel = context.WithCancel(context.TODO()) + go func() { + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred()) + }() + + By("Creating a DClusterConfig") + + drCConfig = &ramen.DRClusterConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "local"}, + Spec: ramen.DRClusterConfigSpec{}, + } + Expect(k8sClient.Create(context.TODO(), drCConfig)).To(Succeed()) + }) + + AfterAll(func() { + Expect(k8sClient.Delete(context.TODO(), drCConfig)).To(Succeed()) + Eventually(func() bool { + err := k8sClient.Get(context.TODO(), types.NamespacedName{ + Name: "local", + }, drCConfig) + + return errors.IsNotFound(err) + }, timeout, interval).Should(BeTrue()) + + cancel() // Stop the reconciler + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) + }) + + Describe("ClusterClaims-StorageClasses", Ordered, func() { + Context("Given DRClusterConfig resource", func() { + When("there is a StorageClass created with required labels", func() { + It("creates a ClusterClaim", func() { + By("creating a StorageClass") + + sc1 = &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "sc1", + Labels: map[string]string{ + ramencontrollers.StorageIDLabel: "fake", + }, + }, + Provisioner: "fake.ramen.com", + } + Expect(k8sClient.Create(context.TODO(), sc1)).To(Succeed()) + + Eventually(func() error { + ccName := types.NamespacedName{ + Name: "storage.class" + "." + "sc1", + } + + cc := &clusterv1alpha1.ClusterClaim{} + err := k8sClient.Get(context.TODO(), ccName, cc) + if err != nil { + return err + } + + if cc.Spec.Value != "sc1" { + return fmt.Errorf("mismatched spec.value in Clusterclaim, expected sc1, got %s", + cc.Spec.Value) + } + + return nil + }, timeout, interval).Should(BeNil()) + }) }) - Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. }) }) }) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 66ddc461b..a2b0d2eb0 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -48,6 +48,7 @@ import ( Recipe "github.com/ramendr/recipe/api/v1alpha1" velero "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" clrapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" // +kubebuilder:scaffold:imports ) @@ -212,6 +213,9 @@ var _ = BeforeSuite(func() { err = clrapiv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = clusterv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = argocdv1alpha1hack.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) From 16d66a9e64393fcb7267cdf0981aefd86e5dda30 Mon Sep 17 00:00:00 2001 From: Shyamsundar Ranganathan Date: Sun, 14 Jul 2024 10:44:46 -0400 Subject: [PATCH 6/7] Implement pruning of ClusterClaims For classes listed, those that do not need a ClusterClaim any longer are deleted. Added a StorageClass watcher as well to the reconcile on changes to StorageClasses. Signed-off-by: Shyamsundar Ranganathan --- Makefile | 3 + .../controller/drclusterconfig_controller.go | 106 +++++++++++++- .../drclusterconfig_controller_test.go | 130 +++++++++++++----- 3 files changed, 203 insertions(+), 36 deletions(-) diff --git a/Makefile b/Makefile index 0de1cdf16..e307f3445 100644 --- a/Makefile +++ b/Makefile @@ -176,6 +176,9 @@ test-drcluster: generate manifests envtest ## Run DRCluster tests. test-drpolicy: generate manifests envtest ## Run DRPolicy tests. go test ./internal/controller -coverprofile cover.out -ginkgo.focus DRPolicyController +test-drclusterconfig: generate manifests envtest ## Run DRClusterConfig tests. + go test ./internal/controller -coverprofile cover.out -ginkgo.focus DRClusterConfig + test-util: generate manifests envtest ## Run util tests. go test ./internal/controller/util -coverprofile cover.out diff --git a/internal/controller/drclusterconfig_controller.go b/internal/controller/drclusterconfig_controller.go index ea30217a4..a39f0746c 100644 --- a/internal/controller/drclusterconfig_controller.go +++ b/internal/controller/drclusterconfig_controller.go @@ -6,6 +6,7 @@ package controllers import ( "context" "fmt" + "slices" "time" clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" @@ -14,10 +15,14 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/go-logr/logr" @@ -62,9 +67,18 @@ func (r *DRClusterConfigReconciler) Reconcile(ctx context.Context, req ctrl.Requ drCConfig := &ramen.DRClusterConfig{} if err := r.Client.Get(ctx, req.NamespacedName, drCConfig); err != nil { + log.Info("Reconcile error", "error", err) + return ctrl.Result{}, client.IgnoreNotFound(fmt.Errorf("get: %w", err)) } + // Ensure there is ony one DRClusterConfig for the cluster + if _, err := r.GetDRClusterConfig(ctx); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{}, err + } + if util.ResourceIsDeleted(drCConfig) { return r.processDeletion(ctx, log, drCConfig) } @@ -72,6 +86,23 @@ func (r *DRClusterConfigReconciler) Reconcile(ctx context.Context, req ctrl.Requ return r.processCreateOrUpdate(ctx, log, drCConfig) } +func (r *DRClusterConfigReconciler) GetDRClusterConfig(ctx context.Context) (*ramen.DRClusterConfig, error) { + drcConfigs := &ramen.DRClusterConfigList{} + if err := r.Client.List(ctx, drcConfigs); err != nil { + return nil, fmt.Errorf("failed to list DRClusterConfig, %w", err) + } + + if len(drcConfigs.Items) == 0 { + return nil, fmt.Errorf("failed to find DRClusterConfig") + } + + if len(drcConfigs.Items) > 1 { + return nil, fmt.Errorf("multiple DRClusterConfigs found") + } + + return &drcConfigs.Items[0], nil +} + // processDeletion ensures all cluster claims created by drClusterConfig are deleted, before removing the finalizer on // the resource itself func (r *DRClusterConfigReconciler) processDeletion( @@ -80,12 +111,16 @@ func (r *DRClusterConfigReconciler) processDeletion( drCConfig *ramen.DRClusterConfig, ) (ctrl.Result, error) { if err := r.pruneClusterClaims(ctx, log, []string{}); err != nil { + log.Info("Reconcile error", "error", err) + return ctrl.Result{Requeue: true}, err } if err := util.NewResourceUpdater(drCConfig). RemoveFinalizer(drCConfigFinalizerName). Update(ctx, r.Client); err != nil { + log.Info("Reconcile error", "error", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to remove finalizer for DRClusterConfig resource, %w", err) } @@ -96,6 +131,31 @@ func (r *DRClusterConfigReconciler) processDeletion( // pruneClusterClaims will prune all ClusterClaims created by drClusterConfig that are not in the // passed in survivor list func (r *DRClusterConfigReconciler) pruneClusterClaims(ctx context.Context, log logr.Logger, survivors []string) error { + matchLabels := map[string]string{ + drCConfigOwnerLabel: drCConfigOwnerName, + } + + listOptions := []client.ListOption{ + client.MatchingLabels(matchLabels), + } + + claims := &clusterv1alpha1.ClusterClaimList{} + if err := r.Client.List(ctx, claims, listOptions...); err != nil { + return fmt.Errorf("failed to list ClusterClaims, %w", err) + } + + for idx := range claims.Items { + if slices.Contains(survivors, claims.Items[idx].GetName()) { + continue + } + + if err := r.Client.Delete(ctx, &claims.Items[idx]); err != nil { + return fmt.Errorf("failed to delete ClusterClaim %s, %w", claims.Items[idx].GetName(), err) + } + + log.Info("Pruned ClusterClaim", "claimName", claims.Items[idx].GetName()) + } + return nil } @@ -109,19 +169,25 @@ func (r *DRClusterConfigReconciler) processCreateOrUpdate( if err := util.NewResourceUpdater(drCConfig). AddFinalizer(drCConfigFinalizerName). Update(ctx, r.Client); err != nil { + log.Info("Reconcile error", "error", err) + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to add finalizer for DRClusterConfig resource, %w", err) } allSurvivors := []string{} - survivors, err := r.createSCClusterClaims(ctx) + survivors, err := r.createSCClusterClaims(ctx, log) if err != nil { + log.Info("Reconcile error", "error", err) + return ctrl.Result{Requeue: true}, err } allSurvivors = append(allSurvivors, survivors...) if err := r.pruneClusterClaims(ctx, log, allSurvivors); err != nil { + log.Info("Reconcile error", "error", err) + return ctrl.Result{Requeue: true}, err } @@ -130,7 +196,7 @@ func (r *DRClusterConfigReconciler) processCreateOrUpdate( // createSCClusterClaims lists all StorageClasses and creates ClusterClaims for them func (r *DRClusterConfigReconciler) createSCClusterClaims( - ctx context.Context, + ctx context.Context, log logr.Logger, ) ([]string, error) { claims := []string{} @@ -145,7 +211,7 @@ func (r *DRClusterConfigReconciler) createSCClusterClaims( continue } - if err := r.ensureClusterClaim(ctx, ccSCPrefix, sClasses.Items[i].GetName()); err != nil { + if err := r.ensureClusterClaim(ctx, log, ccSCPrefix, sClasses.Items[i].GetName()); err != nil { return nil, err } @@ -159,6 +225,7 @@ func (r *DRClusterConfigReconciler) createSCClusterClaims( // the passed in name as the ClusterClaim spec.Value func (r *DRClusterConfigReconciler) ensureClusterClaim( ctx context.Context, + log logr.Logger, prefix, name string, ) error { cc := &clusterv1alpha1.ClusterClaim{ @@ -177,6 +244,8 @@ func (r *DRClusterConfigReconciler) ensureClusterClaim( return fmt.Errorf("failed to create or update ClusterClaim %s, %w", claimName(prefix, name), err) } + log.Info("Created ClusterClaim", "claimName", cc.GetName()) + return nil } @@ -186,7 +255,30 @@ func claimName(prefix, name string) string { // SetupWithManager sets up the controller with the Manager. func (r *DRClusterConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { - controller := ctrl.NewControllerManagedBy(mgr) + drccMapFn := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + drcConfig, err := r.GetDRClusterConfig(ctx) + if err != nil { + ctrl.Log.Info(fmt.Sprintf("failed processing DRClusterConfig mapping, %v", err)) + + return []ctrl.Request{} + } + + return []ctrl.Request{ + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: drcConfig.GetName(), + }, + }, + } + }), + ) + + drccPredFn := builder.WithPredicates(predicate.NewPredicateFuncs( + func(object client.Object) bool { + return true + }), + ) rateLimiter := workqueue.NewTypedMaxOfRateLimiter( workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](1*time.Second, maxReconcileBackoff), @@ -199,7 +291,11 @@ func (r *DRClusterConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { rateLimiter = *r.RateLimiter } + controller := ctrl.NewControllerManagedBy(mgr) + return controller.WithOptions(ctrlcontroller.Options{ RateLimiter: rateLimiter, - }).For(&ramen.DRClusterConfig{}).Complete(r) + }).For(&ramen.DRClusterConfig{}). + Watches(&storagev1.StorageClass{}, drccMapFn, drccPredFn). + Complete(r) } diff --git a/internal/controller/drclusterconfig_controller_test.go b/internal/controller/drclusterconfig_controller_test.go index 8acbf17f8..dbd1abdd4 100644 --- a/internal/controller/drclusterconfig_controller_test.go +++ b/internal/controller/drclusterconfig_controller_test.go @@ -32,15 +32,51 @@ import ( ramencontrollers "github.com/ramendr/ramen/internal/controller" ) +func ensureClaimCount(apiReader client.Reader, count int) { + Eventually(func() bool { + claims := &clusterv1alpha1.ClusterClaimList{} + + err := apiReader.List(context.TODO(), claims) + if err != nil { + return false + } + + return len(claims.Items) == count + }, timeout, interval).Should(BeTrue()) +} + +//nolint:unparam +func ensureClusterClaim(apiReader client.Reader, class, name string) { + Eventually(func() error { + ccName := types.NamespacedName{ + Name: class + "." + name, + } + + cc := &clusterv1alpha1.ClusterClaim{} + err := apiReader.Get(context.TODO(), ccName, cc) + if err != nil { + return err + } + + if cc.Spec.Value != name { + return fmt.Errorf("mismatched spec.value in ClusterClaim, expected %s, got %s", + name, cc.Spec.Value) + } + + return nil + }, timeout, interval).Should(BeNil()) +} + var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { var ( - ctx context.Context - cancel context.CancelFunc - cfg *rest.Config - testEnv *envtest.Environment - k8sClient client.Client - drCConfig *ramen.DRClusterConfig - sc1 *storagev1.StorageClass + ctx context.Context + cancel context.CancelFunc + cfg *rest.Config + testEnv *envtest.Environment + k8sClient client.Client + apiReader client.Reader + drCConfig *ramen.DRClusterConfig + baseSC, sc1, sc2 *storagev1.StorageClass ) BeforeAll(func() { @@ -94,6 +130,8 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { k8sManager, err := ctrl.NewManager(cfg, options) Expect(err).ToNot(HaveOccurred()) + apiReader = k8sManager.GetAPIReader() + Expect(apiReader).ToNot(BeNil()) rateLimiter := workqueue.NewTypedMaxOfRateLimiter( workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request]( @@ -121,9 +159,22 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { Spec: ramen.DRClusterConfigSpec{}, } Expect(k8sClient.Create(context.TODO(), drCConfig)).To(Succeed()) + + By("Defining a basic StroageClass") + + baseSC = &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baseSC", + Labels: map[string]string{ + ramencontrollers.StorageIDLabel: "fake", + }, + }, + Provisioner: "fake.ramen.com", + } }) AfterAll(func() { + By("deleting the DRClusterConfig") Expect(k8sClient.Delete(context.TODO(), drCConfig)).To(Succeed()) Eventually(func() bool { err := k8sClient.Get(context.TODO(), types.NamespacedName{ @@ -133,6 +184,9 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { return errors.IsNotFound(err) }, timeout, interval).Should(BeTrue()) + By("ensuring claim count is 0 post deletion") + ensureClaimCount(apiReader, 0) + cancel() // Stop the reconciler By("tearing down the test environment") err := testEnv.Stop() @@ -145,35 +199,49 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { It("creates a ClusterClaim", func() { By("creating a StorageClass") - sc1 = &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "sc1", - Labels: map[string]string{ - ramencontrollers.StorageIDLabel: "fake", - }, - }, - Provisioner: "fake.ramen.com", - } + sc1 = baseSC.DeepCopy() + sc1.Name = "sc1" + Expect(k8sClient.Create(context.TODO(), sc1)).To(Succeed()) + + ensureClusterClaim(apiReader, "storage.class", "sc1") + ensureClaimCount(apiReader, 1) + }) + }) + When("a StorageClass with required labels is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a StorageClass") + + Expect(k8sClient.Delete(context.TODO(), sc1)).To(Succeed()) + + ensureClaimCount(apiReader, 0) + }) + }) + When("there are multiple StorageClass created with required labels", func() { + It("creates ClusterClaims", func() { + By("creating a StorageClass") + + sc1 = baseSC.DeepCopy() + sc1.Name = "sc1" Expect(k8sClient.Create(context.TODO(), sc1)).To(Succeed()) - Eventually(func() error { - ccName := types.NamespacedName{ - Name: "storage.class" + "." + "sc1", - } + sc2 = baseSC.DeepCopy() + sc2.Name = "sc2" + Expect(k8sClient.Create(context.TODO(), sc2)).To(Succeed()) - cc := &clusterv1alpha1.ClusterClaim{} - err := k8sClient.Get(context.TODO(), ccName, cc) - if err != nil { - return err - } + ensureClusterClaim(apiReader, "storage.class", "sc1") + ensureClusterClaim(apiReader, "storage.class", "sc2") + ensureClaimCount(apiReader, 2) + }) + }) + When("a StorageClass label is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a StorageClass label") - if cc.Spec.Value != "sc1" { - return fmt.Errorf("mismatched spec.value in Clusterclaim, expected sc1, got %s", - cc.Spec.Value) - } + sc1.Labels = map[string]string{} + Expect(k8sClient.Update(context.TODO(), sc1)).To(Succeed()) - return nil - }, timeout, interval).Should(BeNil()) + ensureClaimCount(apiReader, 1) + ensureClusterClaim(apiReader, "storage.class", "sc2") }) }) }) From e3fe7f6a8470e65fa6f9321883c64a52903db8fa Mon Sep 17 00:00:00 2001 From: Shyamsundar Ranganathan Date: Sun, 14 Jul 2024 11:32:46 -0400 Subject: [PATCH 7/7] Implement CLassClaims for VRClass and VSClass Signed-off-by: Shyamsundar Ranganathan --- .../controller/drclusterconfig_controller.go | 100 +++++++++-- .../drclusterconfig_controller_test.go | 170 ++++++++++++++++-- 2 files changed, 243 insertions(+), 27 deletions(-) diff --git a/internal/controller/drclusterconfig_controller.go b/internal/controller/drclusterconfig_controller.go index a39f0746c..24e7d651c 100644 --- a/internal/controller/drclusterconfig_controller.go +++ b/internal/controller/drclusterconfig_controller.go @@ -9,14 +9,15 @@ import ( "slices" "time" - clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" - + volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" "golang.org/x/time/rate" storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/workqueue" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -39,7 +40,9 @@ const ( maxReconcileBackoff = 5 * time.Minute // Prefixes for various ClusterClaims - ccSCPrefix = "storage.class" + ccSCPrefix = "storage.class" + ccVSCPrefix = "snapshot.class" + ccVRCPrefix = "replication.class" ) // DRClusterConfigReconciler reconciles a DRClusterConfig object @@ -174,17 +177,13 @@ func (r *DRClusterConfigReconciler) processCreateOrUpdate( return ctrl.Result{Requeue: true}, fmt.Errorf("failed to add finalizer for DRClusterConfig resource, %w", err) } - allSurvivors := []string{} - - survivors, err := r.createSCClusterClaims(ctx, log) + allSurvivors, err := r.CreateClassClaims(ctx, log) if err != nil { log.Info("Reconcile error", "error", err) return ctrl.Result{Requeue: true}, err } - allSurvivors = append(allSurvivors, survivors...) - if err := r.pruneClusterClaims(ctx, log, allSurvivors); err != nil { log.Info("Reconcile error", "error", err) @@ -194,7 +193,35 @@ func (r *DRClusterConfigReconciler) processCreateOrUpdate( return ctrl.Result{}, nil } -// createSCClusterClaims lists all StorageClasses and creates ClusterClaims for them +// CreateClassClaims creates cluster claims for various storage related classes of interest +func (r *DRClusterConfigReconciler) CreateClassClaims(ctx context.Context, log logr.Logger) ([]string, error) { + allSurvivors := []string{} + + survivors, err := r.createSCClusterClaims(ctx, log) + if err != nil { + return nil, err + } + + allSurvivors = append(allSurvivors, survivors...) + + survivors, err = r.createVSCClusterClaims(ctx, log) + if err != nil { + return nil, err + } + + allSurvivors = append(allSurvivors, survivors...) + + survivors, err = r.createVRCClusterClaims(ctx, log) + if err != nil { + return nil, err + } + + allSurvivors = append(allSurvivors, survivors...) + + return allSurvivors, nil +} + +// createSCClusterClaims lists StorageClasses and creates ClusterClaims for ones marked for ramen func (r *DRClusterConfigReconciler) createSCClusterClaims( ctx context.Context, log logr.Logger, ) ([]string, error) { @@ -206,7 +233,6 @@ func (r *DRClusterConfigReconciler) createSCClusterClaims( } for i := range sClasses.Items { - // TODO: If something is labeled later is there an Update event? if !util.HasLabel(&sClasses.Items[i], StorageIDLabel) { continue } @@ -221,6 +247,58 @@ func (r *DRClusterConfigReconciler) createSCClusterClaims( return claims, nil } +// createVSCClusterClaims lists VolumeSnapshotClasses and creates ClusterClaims for ones marked for ramen +func (r *DRClusterConfigReconciler) createVSCClusterClaims( + ctx context.Context, log logr.Logger, +) ([]string, error) { + claims := []string{} + + vsClasses := &snapv1.VolumeSnapshotClassList{} + if err := r.Client.List(ctx, vsClasses); err != nil { + return nil, fmt.Errorf("failed to list VolumeSnapshotClasses, %w", err) + } + + for i := range vsClasses.Items { + if !util.HasLabel(&vsClasses.Items[i], StorageIDLabel) { + continue + } + + if err := r.ensureClusterClaim(ctx, log, ccVSCPrefix, vsClasses.Items[i].GetName()); err != nil { + return nil, err + } + + claims = append(claims, claimName(ccVSCPrefix, vsClasses.Items[i].GetName())) + } + + return claims, nil +} + +// createVRCClusterClaims lists VolumeReplicationClasses and creates ClusterClaims for ones marked for ramen +func (r *DRClusterConfigReconciler) createVRCClusterClaims( + ctx context.Context, log logr.Logger, +) ([]string, error) { + claims := []string{} + + vrClasses := &volrep.VolumeReplicationClassList{} + if err := r.Client.List(ctx, vrClasses); err != nil { + return nil, fmt.Errorf("failed to list VolumeReplicationClasses, %w", err) + } + + for i := range vrClasses.Items { + if !util.HasLabel(&vrClasses.Items[i], VolumeReplicationIDLabel) { + continue + } + + if err := r.ensureClusterClaim(ctx, log, ccVRCPrefix, vrClasses.Items[i].GetName()); err != nil { + return nil, err + } + + claims = append(claims, claimName(ccVRCPrefix, vrClasses.Items[i].GetName())) + } + + return claims, nil +} + // ensureClusterClaim is a generic ClusterClaim creation function, that creates a claim named "prefix.name", with // the passed in name as the ClusterClaim spec.Value func (r *DRClusterConfigReconciler) ensureClusterClaim( @@ -297,5 +375,7 @@ func (r *DRClusterConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { RateLimiter: rateLimiter, }).For(&ramen.DRClusterConfig{}). Watches(&storagev1.StorageClass{}, drccMapFn, drccPredFn). + Watches(&snapv1.VolumeSnapshotClass{}, drccMapFn, drccPredFn). + Watches(&volrep.VolumeReplicationClass{}, drccMapFn, drccPredFn). Complete(r) } diff --git a/internal/controller/drclusterconfig_controller_test.go b/internal/controller/drclusterconfig_controller_test.go index dbd1abdd4..0c7726616 100644 --- a/internal/controller/drclusterconfig_controller_test.go +++ b/internal/controller/drclusterconfig_controller_test.go @@ -10,8 +10,8 @@ import ( "path/filepath" "time" - clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" - + volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" storagev1 "k8s.io/api/storage/v1" @@ -22,6 +22,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/util/workqueue" config "k8s.io/component-base/config/v1alpha1" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" @@ -45,7 +46,6 @@ func ensureClaimCount(apiReader client.Reader, count int) { }, timeout, interval).Should(BeTrue()) } -//nolint:unparam func ensureClusterClaim(apiReader client.Reader, class, name string) { Eventually(func() error { ccName := types.NamespacedName{ @@ -69,14 +69,17 @@ func ensureClusterClaim(apiReader client.Reader, class, name string) { var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { var ( - ctx context.Context - cancel context.CancelFunc - cfg *rest.Config - testEnv *envtest.Environment - k8sClient client.Client - apiReader client.Reader - drCConfig *ramen.DRClusterConfig - baseSC, sc1, sc2 *storagev1.StorageClass + ctx context.Context + cancel context.CancelFunc + cfg *rest.Config + testEnv *envtest.Environment + k8sClient client.Client + apiReader client.Reader + drCConfig *ramen.DRClusterConfig + baseSC, sc1, sc2 *storagev1.StorageClass + baseVSC, vsc1, vsc2 *snapv1.VolumeSnapshotClass + baseVRC, vrc1, vrc2 *volrep.VolumeReplicationClass + claimCount int ) BeforeAll(func() { @@ -160,7 +163,7 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { } Expect(k8sClient.Create(context.TODO(), drCConfig)).To(Succeed()) - By("Defining a basic StroageClass") + By("Defining basic Classes") baseSC = &storagev1.StorageClass{ ObjectMeta: metav1.ObjectMeta{ @@ -171,6 +174,29 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { }, Provisioner: "fake.ramen.com", } + + baseVSC = &snapv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baseVSC", + Labels: map[string]string{ + ramencontrollers.StorageIDLabel: "fake", + }, + }, + Driver: "fake.ramen.com", + DeletionPolicy: snapv1.VolumeSnapshotContentDelete, + } + + baseVRC = &volrep.VolumeReplicationClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baseVRC", + Labels: map[string]string{ + ramencontrollers.VolumeReplicationIDLabel: "fake", + }, + }, + Spec: volrep.VolumeReplicationClassSpec{ + Provisioner: "fake.ramen.com", + }, + } }) AfterAll(func() { @@ -193,7 +219,7 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { Expect(err).NotTo(HaveOccurred()) }) - Describe("ClusterClaims-StorageClasses", Ordered, func() { + Describe("ClusterClaims", Ordered, func() { Context("Given DRClusterConfig resource", func() { When("there is a StorageClass created with required labels", func() { It("creates a ClusterClaim", func() { @@ -203,8 +229,9 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { sc1.Name = "sc1" Expect(k8sClient.Create(context.TODO(), sc1)).To(Succeed()) + claimCount++ ensureClusterClaim(apiReader, "storage.class", "sc1") - ensureClaimCount(apiReader, 1) + ensureClaimCount(apiReader, claimCount) }) }) When("a StorageClass with required labels is deleted", func() { @@ -213,7 +240,8 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { Expect(k8sClient.Delete(context.TODO(), sc1)).To(Succeed()) - ensureClaimCount(apiReader, 0) + claimCount-- + ensureClaimCount(apiReader, claimCount) }) }) When("there are multiple StorageClass created with required labels", func() { @@ -228,9 +256,10 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { sc2.Name = "sc2" Expect(k8sClient.Create(context.TODO(), sc2)).To(Succeed()) + claimCount += 2 ensureClusterClaim(apiReader, "storage.class", "sc1") ensureClusterClaim(apiReader, "storage.class", "sc2") - ensureClaimCount(apiReader, 2) + ensureClaimCount(apiReader, claimCount) }) }) When("a StorageClass label is deleted", func() { @@ -240,10 +269,117 @@ var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { sc1.Labels = map[string]string{} Expect(k8sClient.Update(context.TODO(), sc1)).To(Succeed()) - ensureClaimCount(apiReader, 1) + claimCount-- + ensureClaimCount(apiReader, claimCount) ensureClusterClaim(apiReader, "storage.class", "sc2") }) }) }) + When("there is a SnapshotCLass created with required labels", func() { + It("creates a ClusterClaim", func() { + By("creating a SnapshotClass") + + vsc1 = baseVSC.DeepCopy() + vsc1.Name = "vsc1" + Expect(k8sClient.Create(context.TODO(), vsc1)).To(Succeed()) + + claimCount++ + ensureClusterClaim(apiReader, "snapshot.class", "vsc1") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a SnapshotClass with required labels is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a SnapshotClass") + + Expect(k8sClient.Delete(context.TODO(), vsc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + }) + }) + When("there are multiple SnapshotClass created with required labels", func() { + It("creates ClusterClaims", func() { + By("creating a SnapshotClass") + + vsc1 = baseVSC.DeepCopy() + vsc1.Name = "vsc1" + Expect(k8sClient.Create(context.TODO(), vsc1)).To(Succeed()) + + vsc2 = baseVSC.DeepCopy() + vsc2.Name = "vsc2" + Expect(k8sClient.Create(context.TODO(), vsc2)).To(Succeed()) + + claimCount += 2 + ensureClusterClaim(apiReader, "snapshot.class", "vsc1") + ensureClusterClaim(apiReader, "snapshot.class", "vsc2") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a SnapshotClass label is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a SnapshotClass label") + + vsc2.Labels = map[string]string{} + Expect(k8sClient.Update(context.TODO(), vsc2)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + ensureClusterClaim(apiReader, "snapshot.class", "vsc1") + }) + }) + When("there is a VolumeReplicationCLass created with required labels", func() { + It("creates a ClusterClaim", func() { + By("creating a VolumeReplicationClass") + + vrc1 = baseVRC.DeepCopy() + vrc1.Name = "vrc1" + Expect(k8sClient.Create(context.TODO(), vrc1)).To(Succeed()) + + claimCount++ + ensureClusterClaim(apiReader, "replication.class", "vrc1") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a VolumeReplicationClass with required labels is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a VolumeReplicationClass") + + Expect(k8sClient.Delete(context.TODO(), vrc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + }) + }) + When("there are multiple VolumeReplicationClass created with required labels", func() { + It("creates ClusterClaims", func() { + By("creating a VolumeReplicationClass") + + vrc1 = baseVRC.DeepCopy() + vrc1.Name = "vrc1" + Expect(k8sClient.Create(context.TODO(), vrc1)).To(Succeed()) + + vrc2 = baseVRC.DeepCopy() + vrc2.Name = "vrc2" + Expect(k8sClient.Create(context.TODO(), vrc2)).To(Succeed()) + + claimCount += 2 + ensureClusterClaim(apiReader, "replication.class", "vrc1") + ensureClusterClaim(apiReader, "replication.class", "vrc2") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a VolumeReplicationClass label is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a VolumeReplicationClass label") + + vrc2.Labels = map[string]string{} + Expect(k8sClient.Update(context.TODO(), vrc2)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + ensureClusterClaim(apiReader, "replication.class", "vrc1") + }) + }) }) })