diff --git a/Makefile b/Makefile index 0de1cdf16..e307f3445 100644 --- a/Makefile +++ b/Makefile @@ -176,6 +176,9 @@ test-drcluster: generate manifests envtest ## Run DRCluster tests. test-drpolicy: generate manifests envtest ## Run DRPolicy tests. go test ./internal/controller -coverprofile cover.out -ginkgo.focus DRPolicyController +test-drclusterconfig: generate manifests envtest ## Run DRClusterConfig tests. + go test ./internal/controller -coverprofile cover.out -ginkgo.focus DRClusterConfig + test-util: generate manifests envtest ## Run util tests. go test ./internal/controller/util -coverprofile cover.out diff --git a/cmd/main.go b/cmd/main.go index b8c85fbc0..29370c2ae 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -174,6 +174,7 @@ func setupReconcilersCluster(mgr ctrl.Manager, ramenConfig *ramendrv1alpha1.Rame if err := (&controllers.DRClusterConfigReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("DRClusterConfig"), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "DRClusterConfig") os.Exit(1) diff --git a/config/dr-cluster/rbac/role.yaml b/config/dr-cluster/rbac/role.yaml index 47ef42d6a..480bee100 100644 --- a/config/dr-cluster/rbac/role.yaml +++ b/config/dr-cluster/rbac/role.yaml @@ -137,6 +137,17 @@ rules: - patch - update - watch +- apiGroups: + - cluster.open-cluster-management.io + resources: + - clusterclaims + verbs: + - create + - delete + - get + - list + - update + - watch - apiGroups: - ramendr.openshift.io resources: diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 664648058..3f7cea04f 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -85,6 +85,17 @@ rules: - get - list - watch +- apiGroups: + - cluster.open-cluster-management.io + resources: + - clusterclaims + verbs: + - create + - delete + - get + - list + - update + - watch - apiGroups: - cluster.open-cluster-management.io resources: diff --git a/hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml b/hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml new file mode 100644 index 000000000..4359dcadb --- /dev/null +++ b/hack/test/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml @@ -0,0 +1,63 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterclaims.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: ClusterClaim + listKind: ClusterClaimList + plural: clusterclaims + singular: clusterclaim + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: |- + ClusterClaim represents cluster information that a managed cluster claims + ClusterClaims with well known names include, + 1. id.k8s.io, it contains a unique identifier for the cluster. + 2. clusterset.k8s.io, it contains an identifier that relates the cluster + to the ClusterSet in which it belongs. + + + ClusterClaims created on a managed cluster will be collected and saved into + the status of the corresponding ManagedCluster on hub. + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of the ClusterClaim. + properties: + value: + description: Value is a claim-dependent string + maxLength: 1024 + minLength: 1 + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/internal/controller/drclusterconfig_controller.go b/internal/controller/drclusterconfig_controller.go index 6cdfb7d32..24e7d651c 100644 --- a/internal/controller/drclusterconfig_controller.go +++ b/internal/controller/drclusterconfig_controller.go @@ -5,45 +5,377 @@ package controllers import ( "context" + "fmt" + "slices" + "time" + volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" + "golang.org/x/time/rate" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" + ctrlcontroller "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" - ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" + "github.com/go-logr/logr" + "github.com/google/uuid" + ramen "github.com/ramendr/ramen/api/v1alpha1" + "github.com/ramendr/ramen/internal/controller/util" +) + +const ( + drCConfigFinalizerName = "drclusterconfigs.ramendr.openshift.io/finalizer" + drCConfigOwnerLabel = "drclusterconfigs.ramendr.openshift.io/owner" + drCConfigOwnerName = "ramen" + + maxReconcileBackoff = 5 * time.Minute + + // Prefixes for various ClusterClaims + ccSCPrefix = "storage.class" + ccVSCPrefix = "snapshot.class" + ccVRCPrefix = "replication.class" ) // DRClusterConfigReconciler reconciles a DRClusterConfig object type DRClusterConfigReconciler struct { client.Client - Scheme *runtime.Scheme -} - -//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the DRClusterConfig object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.16.3/pkg/reconcile + Scheme *runtime.Scheme + Log logr.Logger + RateLimiter *workqueue.TypedRateLimiter[reconcile.Request] +} + +//nolint:lll +// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=ramendr.openshift.io,resources=drclusterconfigs/finalizers,verbs=update +// +kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=snapshot.storage.k8s.io,resources=volumesnapshotclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=replication.storage.openshift.io,resources=volumereplicationclasses,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.open-cluster-management.io,resources=clusterclaims,verbs=get;list;watch;create;update;delete + func (r *DRClusterConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - _ = log.FromContext(ctx) + log := r.Log.WithValues("name", req.NamespacedName.Name, "rid", uuid.New()) + log.Info("reconcile enter") + + defer log.Info("reconcile exit") + + drCConfig := &ramen.DRClusterConfig{} + if err := r.Client.Get(ctx, req.NamespacedName, drCConfig); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{}, client.IgnoreNotFound(fmt.Errorf("get: %w", err)) + } + + // Ensure there is ony one DRClusterConfig for the cluster + if _, err := r.GetDRClusterConfig(ctx); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{}, err + } + + if util.ResourceIsDeleted(drCConfig) { + return r.processDeletion(ctx, log, drCConfig) + } + + return r.processCreateOrUpdate(ctx, log, drCConfig) +} + +func (r *DRClusterConfigReconciler) GetDRClusterConfig(ctx context.Context) (*ramen.DRClusterConfig, error) { + drcConfigs := &ramen.DRClusterConfigList{} + if err := r.Client.List(ctx, drcConfigs); err != nil { + return nil, fmt.Errorf("failed to list DRClusterConfig, %w", err) + } + + if len(drcConfigs.Items) == 0 { + return nil, fmt.Errorf("failed to find DRClusterConfig") + } + + if len(drcConfigs.Items) > 1 { + return nil, fmt.Errorf("multiple DRClusterConfigs found") + } + + return &drcConfigs.Items[0], nil +} + +// processDeletion ensures all cluster claims created by drClusterConfig are deleted, before removing the finalizer on +// the resource itself +func (r *DRClusterConfigReconciler) processDeletion( + ctx context.Context, + log logr.Logger, + drCConfig *ramen.DRClusterConfig, +) (ctrl.Result, error) { + if err := r.pruneClusterClaims(ctx, log, []string{}); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, err + } - // TODO(user): your logic here + if err := util.NewResourceUpdater(drCConfig). + RemoveFinalizer(drCConfigFinalizerName). + Update(ctx, r.Client); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, + fmt.Errorf("failed to remove finalizer for DRClusterConfig resource, %w", err) + } return ctrl.Result{}, nil } +// pruneClusterClaims will prune all ClusterClaims created by drClusterConfig that are not in the +// passed in survivor list +func (r *DRClusterConfigReconciler) pruneClusterClaims(ctx context.Context, log logr.Logger, survivors []string) error { + matchLabels := map[string]string{ + drCConfigOwnerLabel: drCConfigOwnerName, + } + + listOptions := []client.ListOption{ + client.MatchingLabels(matchLabels), + } + + claims := &clusterv1alpha1.ClusterClaimList{} + if err := r.Client.List(ctx, claims, listOptions...); err != nil { + return fmt.Errorf("failed to list ClusterClaims, %w", err) + } + + for idx := range claims.Items { + if slices.Contains(survivors, claims.Items[idx].GetName()) { + continue + } + + if err := r.Client.Delete(ctx, &claims.Items[idx]); err != nil { + return fmt.Errorf("failed to delete ClusterClaim %s, %w", claims.Items[idx].GetName(), err) + } + + log.Info("Pruned ClusterClaim", "claimName", claims.Items[idx].GetName()) + } + + return nil +} + +// processCreateOrUpdate protects the resource with a finalizer and creates ClusterClaims for various storage related +// classes in the cluster. It would finally prune stale ClusterClaims from previous reconciliations. +func (r *DRClusterConfigReconciler) processCreateOrUpdate( + ctx context.Context, + log logr.Logger, + drCConfig *ramen.DRClusterConfig, +) (ctrl.Result, error) { + if err := util.NewResourceUpdater(drCConfig). + AddFinalizer(drCConfigFinalizerName). + Update(ctx, r.Client); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, fmt.Errorf("failed to add finalizer for DRClusterConfig resource, %w", err) + } + + allSurvivors, err := r.CreateClassClaims(ctx, log) + if err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, err + } + + if err := r.pruneClusterClaims(ctx, log, allSurvivors); err != nil { + log.Info("Reconcile error", "error", err) + + return ctrl.Result{Requeue: true}, err + } + + return ctrl.Result{}, nil +} + +// CreateClassClaims creates cluster claims for various storage related classes of interest +func (r *DRClusterConfigReconciler) CreateClassClaims(ctx context.Context, log logr.Logger) ([]string, error) { + allSurvivors := []string{} + + survivors, err := r.createSCClusterClaims(ctx, log) + if err != nil { + return nil, err + } + + allSurvivors = append(allSurvivors, survivors...) + + survivors, err = r.createVSCClusterClaims(ctx, log) + if err != nil { + return nil, err + } + + allSurvivors = append(allSurvivors, survivors...) + + survivors, err = r.createVRCClusterClaims(ctx, log) + if err != nil { + return nil, err + } + + allSurvivors = append(allSurvivors, survivors...) + + return allSurvivors, nil +} + +// createSCClusterClaims lists StorageClasses and creates ClusterClaims for ones marked for ramen +func (r *DRClusterConfigReconciler) createSCClusterClaims( + ctx context.Context, log logr.Logger, +) ([]string, error) { + claims := []string{} + + sClasses := &storagev1.StorageClassList{} + if err := r.Client.List(ctx, sClasses); err != nil { + return nil, fmt.Errorf("failed to list StorageClasses, %w", err) + } + + for i := range sClasses.Items { + if !util.HasLabel(&sClasses.Items[i], StorageIDLabel) { + continue + } + + if err := r.ensureClusterClaim(ctx, log, ccSCPrefix, sClasses.Items[i].GetName()); err != nil { + return nil, err + } + + claims = append(claims, claimName(ccSCPrefix, sClasses.Items[i].GetName())) + } + + return claims, nil +} + +// createVSCClusterClaims lists VolumeSnapshotClasses and creates ClusterClaims for ones marked for ramen +func (r *DRClusterConfigReconciler) createVSCClusterClaims( + ctx context.Context, log logr.Logger, +) ([]string, error) { + claims := []string{} + + vsClasses := &snapv1.VolumeSnapshotClassList{} + if err := r.Client.List(ctx, vsClasses); err != nil { + return nil, fmt.Errorf("failed to list VolumeSnapshotClasses, %w", err) + } + + for i := range vsClasses.Items { + if !util.HasLabel(&vsClasses.Items[i], StorageIDLabel) { + continue + } + + if err := r.ensureClusterClaim(ctx, log, ccVSCPrefix, vsClasses.Items[i].GetName()); err != nil { + return nil, err + } + + claims = append(claims, claimName(ccVSCPrefix, vsClasses.Items[i].GetName())) + } + + return claims, nil +} + +// createVRCClusterClaims lists VolumeReplicationClasses and creates ClusterClaims for ones marked for ramen +func (r *DRClusterConfigReconciler) createVRCClusterClaims( + ctx context.Context, log logr.Logger, +) ([]string, error) { + claims := []string{} + + vrClasses := &volrep.VolumeReplicationClassList{} + if err := r.Client.List(ctx, vrClasses); err != nil { + return nil, fmt.Errorf("failed to list VolumeReplicationClasses, %w", err) + } + + for i := range vrClasses.Items { + if !util.HasLabel(&vrClasses.Items[i], VolumeReplicationIDLabel) { + continue + } + + if err := r.ensureClusterClaim(ctx, log, ccVRCPrefix, vrClasses.Items[i].GetName()); err != nil { + return nil, err + } + + claims = append(claims, claimName(ccVRCPrefix, vrClasses.Items[i].GetName())) + } + + return claims, nil +} + +// ensureClusterClaim is a generic ClusterClaim creation function, that creates a claim named "prefix.name", with +// the passed in name as the ClusterClaim spec.Value +func (r *DRClusterConfigReconciler) ensureClusterClaim( + ctx context.Context, + log logr.Logger, + prefix, name string, +) error { + cc := &clusterv1alpha1.ClusterClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: claimName(prefix, name), + }, + } + + if _, err := ctrl.CreateOrUpdate(ctx, r.Client, cc, func() error { + util.NewResourceUpdater(cc).AddLabel(drCConfigOwnerLabel, drCConfigOwnerName) + + cc.Spec.Value = name + + return nil + }); err != nil { + return fmt.Errorf("failed to create or update ClusterClaim %s, %w", claimName(prefix, name), err) + } + + log.Info("Created ClusterClaim", "claimName", cc.GetName()) + + return nil +} + +func claimName(prefix, name string) string { + return prefix + "." + name +} + // SetupWithManager sets up the controller with the Manager. func (r *DRClusterConfigReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&ramendrv1alpha1.DRClusterConfig{}). + drccMapFn := handler.EnqueueRequestsFromMapFunc(handler.MapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + drcConfig, err := r.GetDRClusterConfig(ctx) + if err != nil { + ctrl.Log.Info(fmt.Sprintf("failed processing DRClusterConfig mapping, %v", err)) + + return []ctrl.Request{} + } + + return []ctrl.Request{ + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: drcConfig.GetName(), + }, + }, + } + }), + ) + + drccPredFn := builder.WithPredicates(predicate.NewPredicateFuncs( + func(object client.Object) bool { + return true + }), + ) + + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request](1*time.Second, maxReconcileBackoff), + // defaults from client-go + //nolint: gomnd + &workqueue.TypedBucketRateLimiter[reconcile.Request]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ) + + if r.RateLimiter != nil { + rateLimiter = *r.RateLimiter + } + + controller := ctrl.NewControllerManagedBy(mgr) + + return controller.WithOptions(ctrlcontroller.Options{ + RateLimiter: rateLimiter, + }).For(&ramen.DRClusterConfig{}). + Watches(&storagev1.StorageClass{}, drccMapFn, drccPredFn). + Watches(&snapv1.VolumeSnapshotClass{}, drccMapFn, drccPredFn). + Watches(&volrep.VolumeReplicationClass{}, drccMapFn, drccPredFn). Complete(r) } diff --git a/internal/controller/drclusterconfig_controller_test.go b/internal/controller/drclusterconfig_controller_test.go index 571d56b26..0c7726616 100644 --- a/internal/controller/drclusterconfig_controller_test.go +++ b/internal/controller/drclusterconfig_controller_test.go @@ -5,68 +5,381 @@ package controllers_test import ( "context" + "fmt" + "os" + "path/filepath" + "time" + volrep "github.com/csi-addons/kubernetes-csi-addons/api/replication.storage/v1alpha1" + snapv1 "github.com/kubernetes-csi/external-snapshotter/client/v7/apis/volumesnapshot/v1" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/util/workqueue" + config "k8s.io/component-base/config/v1alpha1" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - ramendrv1alpha1 "github.com/ramendr/ramen/api/v1alpha1" + ramen "github.com/ramendr/ramen/api/v1alpha1" ramencontrollers "github.com/ramendr/ramen/internal/controller" ) -var _ = Describe("DRClusterConfig Controller", func() { - Context("When reconciling a resource", func() { - const resourceName = "test-resource" +func ensureClaimCount(apiReader client.Reader, count int) { + Eventually(func() bool { + claims := &clusterv1alpha1.ClusterClaimList{} + + err := apiReader.List(context.TODO(), claims) + if err != nil { + return false + } + + return len(claims.Items) == count + }, timeout, interval).Should(BeTrue()) +} + +func ensureClusterClaim(apiReader client.Reader, class, name string) { + Eventually(func() error { + ccName := types.NamespacedName{ + Name: class + "." + name, + } + + cc := &clusterv1alpha1.ClusterClaim{} + err := apiReader.Get(context.TODO(), ccName, cc) + if err != nil { + return err + } + + if cc.Spec.Value != name { + return fmt.Errorf("mismatched spec.value in ClusterClaim, expected %s, got %s", + name, cc.Spec.Value) + } + + return nil + }, timeout, interval).Should(BeNil()) +} + +var _ = Describe("DRClusterConfig-ClusterClaimsTests", Ordered, func() { + var ( + ctx context.Context + cancel context.CancelFunc + cfg *rest.Config + testEnv *envtest.Environment + k8sClient client.Client + apiReader client.Reader + drCConfig *ramen.DRClusterConfig + baseSC, sc1, sc2 *storagev1.StorageClass + baseVSC, vsc1, vsc2 *snapv1.VolumeSnapshotClass + baseVRC, vrc1, vrc2 *volrep.VolumeReplicationClass + claimCount int + ) + + BeforeAll(func() { + By("bootstrapping test environment") + + Expect(os.Setenv("POD_NAMESPACE", ramenNamespace)).To(Succeed()) + + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "hack", "test"), + }, + } + + if testEnv.UseExistingCluster != nil && *testEnv.UseExistingCluster == true { + namespaceDeletionSupported = true + } + + var err error + done := make(chan interface{}) + go func() { + defer GinkgoRecover() + cfg, err = testEnv.Start() + close(done) + }() + Eventually(done).WithTimeout(time.Minute).Should(BeClosed()) + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) - ctx := context.Background() + By("starting the DRClusterConfig reconciler") - typeNamespacedName := types.NamespacedName{ - Name: resourceName, - Namespace: "default", // TODO(user):Modify as needed + ramenConfig := &ramen.RamenConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "RamenConfig", + APIVersion: ramen.GroupVersion.String(), + }, + LeaderElection: &config.LeaderElectionConfiguration{ + LeaderElect: new(bool), + ResourceName: ramencontrollers.HubLeaderElectionResourceName, + }, + Metrics: ramen.ControllerMetrics{ + BindAddress: "0", // Disable metrics + }, } - drclusterconfig := &ramendrv1alpha1.DRClusterConfig{} - - BeforeEach(func() { - By("creating the custom resource for the Kind DRClusterConfig") - err := k8sClient.Get(ctx, typeNamespacedName, drclusterconfig) - if err != nil && errors.IsNotFound(err) { - resource := &ramendrv1alpha1.DRClusterConfig{ - ObjectMeta: metav1.ObjectMeta{ - Name: resourceName, - Namespace: "default", - }, - // TODO(user): Specify other spec details if needed. - } - Expect(k8sClient.Create(ctx, resource)).To(Succeed()) - } + + options := manager.Options{Scheme: scheme.Scheme} + ramencontrollers.LoadControllerOptions(&options, ramenConfig) + + k8sManager, err := ctrl.NewManager(cfg, options) + Expect(err).ToNot(HaveOccurred()) + apiReader = k8sManager.GetAPIReader() + Expect(apiReader).ToNot(BeNil()) + + rateLimiter := workqueue.NewTypedMaxOfRateLimiter( + workqueue.NewTypedItemExponentialFailureRateLimiter[reconcile.Request]( + 10*time.Millisecond, + 100*time.Millisecond), + ) + + Expect((&ramencontrollers.DRClusterConfigReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Log: ctrl.Log.WithName("controllers").WithName("DRClusterConfig"), + RateLimiter: &rateLimiter, + }).SetupWithManager(k8sManager)).To(Succeed()) + + ctx, cancel = context.WithCancel(context.TODO()) + go func() { + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred()) + }() + + By("Creating a DClusterConfig") + + drCConfig = &ramen.DRClusterConfig{ + ObjectMeta: metav1.ObjectMeta{Name: "local"}, + Spec: ramen.DRClusterConfigSpec{}, + } + Expect(k8sClient.Create(context.TODO(), drCConfig)).To(Succeed()) + + By("Defining basic Classes") + + baseSC = &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baseSC", + Labels: map[string]string{ + ramencontrollers.StorageIDLabel: "fake", + }, + }, + Provisioner: "fake.ramen.com", + } + + baseVSC = &snapv1.VolumeSnapshotClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baseVSC", + Labels: map[string]string{ + ramencontrollers.StorageIDLabel: "fake", + }, + }, + Driver: "fake.ramen.com", + DeletionPolicy: snapv1.VolumeSnapshotContentDelete, + } + + baseVRC = &volrep.VolumeReplicationClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "baseVRC", + Labels: map[string]string{ + ramencontrollers.VolumeReplicationIDLabel: "fake", + }, + }, + Spec: volrep.VolumeReplicationClassSpec{ + Provisioner: "fake.ramen.com", + }, + } + }) + + AfterAll(func() { + By("deleting the DRClusterConfig") + Expect(k8sClient.Delete(context.TODO(), drCConfig)).To(Succeed()) + Eventually(func() bool { + err := k8sClient.Get(context.TODO(), types.NamespacedName{ + Name: "local", + }, drCConfig) + + return errors.IsNotFound(err) + }, timeout, interval).Should(BeTrue()) + + By("ensuring claim count is 0 post deletion") + ensureClaimCount(apiReader, 0) + + cancel() // Stop the reconciler + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) + }) + + Describe("ClusterClaims", Ordered, func() { + Context("Given DRClusterConfig resource", func() { + When("there is a StorageClass created with required labels", func() { + It("creates a ClusterClaim", func() { + By("creating a StorageClass") + + sc1 = baseSC.DeepCopy() + sc1.Name = "sc1" + Expect(k8sClient.Create(context.TODO(), sc1)).To(Succeed()) + + claimCount++ + ensureClusterClaim(apiReader, "storage.class", "sc1") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a StorageClass with required labels is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a StorageClass") + + Expect(k8sClient.Delete(context.TODO(), sc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + }) + }) + When("there are multiple StorageClass created with required labels", func() { + It("creates ClusterClaims", func() { + By("creating a StorageClass") + + sc1 = baseSC.DeepCopy() + sc1.Name = "sc1" + Expect(k8sClient.Create(context.TODO(), sc1)).To(Succeed()) + + sc2 = baseSC.DeepCopy() + sc2.Name = "sc2" + Expect(k8sClient.Create(context.TODO(), sc2)).To(Succeed()) + + claimCount += 2 + ensureClusterClaim(apiReader, "storage.class", "sc1") + ensureClusterClaim(apiReader, "storage.class", "sc2") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a StorageClass label is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a StorageClass label") + + sc1.Labels = map[string]string{} + Expect(k8sClient.Update(context.TODO(), sc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + ensureClusterClaim(apiReader, "storage.class", "sc2") + }) + }) }) + When("there is a SnapshotCLass created with required labels", func() { + It("creates a ClusterClaim", func() { + By("creating a SnapshotClass") - AfterEach(func() { - // TODO(user): Cleanup logic after each test, like removing the resource instance. - resource := &ramendrv1alpha1.DRClusterConfig{} - err := apiReader.Get(ctx, typeNamespacedName, resource) - Expect(err).NotTo(HaveOccurred()) + vsc1 = baseVSC.DeepCopy() + vsc1.Name = "vsc1" + Expect(k8sClient.Create(context.TODO(), vsc1)).To(Succeed()) - By("Cleanup the specific resource instance DRClusterConfig") - Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + claimCount++ + ensureClusterClaim(apiReader, "snapshot.class", "vsc1") + ensureClaimCount(apiReader, claimCount) + }) }) - It("should successfully reconcile the resource", func() { - By("Reconciling the created resource") - controllerReconciler := &ramencontrollers.DRClusterConfigReconciler{ - Client: k8sClient, - Scheme: k8sClient.Scheme(), - } - - _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ - NamespacedName: typeNamespacedName, + When("a SnapshotClass with required labels is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a SnapshotClass") + + Expect(k8sClient.Delete(context.TODO(), vsc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + }) + }) + When("there are multiple SnapshotClass created with required labels", func() { + It("creates ClusterClaims", func() { + By("creating a SnapshotClass") + + vsc1 = baseVSC.DeepCopy() + vsc1.Name = "vsc1" + Expect(k8sClient.Create(context.TODO(), vsc1)).To(Succeed()) + + vsc2 = baseVSC.DeepCopy() + vsc2.Name = "vsc2" + Expect(k8sClient.Create(context.TODO(), vsc2)).To(Succeed()) + + claimCount += 2 + ensureClusterClaim(apiReader, "snapshot.class", "vsc1") + ensureClusterClaim(apiReader, "snapshot.class", "vsc2") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a SnapshotClass label is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a SnapshotClass label") + + vsc2.Labels = map[string]string{} + Expect(k8sClient.Update(context.TODO(), vsc2)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + ensureClusterClaim(apiReader, "snapshot.class", "vsc1") + }) + }) + When("there is a VolumeReplicationCLass created with required labels", func() { + It("creates a ClusterClaim", func() { + By("creating a VolumeReplicationClass") + + vrc1 = baseVRC.DeepCopy() + vrc1.Name = "vrc1" + Expect(k8sClient.Create(context.TODO(), vrc1)).To(Succeed()) + + claimCount++ + ensureClusterClaim(apiReader, "replication.class", "vrc1") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a VolumeReplicationClass with required labels is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a VolumeReplicationClass") + + Expect(k8sClient.Delete(context.TODO(), vrc1)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + }) + }) + When("there are multiple VolumeReplicationClass created with required labels", func() { + It("creates ClusterClaims", func() { + By("creating a VolumeReplicationClass") + + vrc1 = baseVRC.DeepCopy() + vrc1.Name = "vrc1" + Expect(k8sClient.Create(context.TODO(), vrc1)).To(Succeed()) + + vrc2 = baseVRC.DeepCopy() + vrc2.Name = "vrc2" + Expect(k8sClient.Create(context.TODO(), vrc2)).To(Succeed()) + + claimCount += 2 + ensureClusterClaim(apiReader, "replication.class", "vrc1") + ensureClusterClaim(apiReader, "replication.class", "vrc2") + ensureClaimCount(apiReader, claimCount) + }) + }) + When("a VolumeReplicationClass label is deleted", func() { + It("deletes the associated ClusterClaim", func() { + By("deleting a VolumeReplicationClass label") + + vrc2.Labels = map[string]string{} + Expect(k8sClient.Update(context.TODO(), vrc2)).To(Succeed()) + + claimCount-- + ensureClaimCount(apiReader, claimCount) + ensureClusterClaim(apiReader, "replication.class", "vrc1") }) - Expect(err).NotTo(HaveOccurred()) - // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. - // Example: If you expect a certain status condition after reconciliation, verify it here. }) }) }) diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 66ddc461b..a2b0d2eb0 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -48,6 +48,7 @@ import ( Recipe "github.com/ramendr/recipe/api/v1alpha1" velero "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" clrapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" // +kubebuilder:scaffold:imports ) @@ -212,6 +213,9 @@ var _ = BeforeSuite(func() { err = clrapiv1beta1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) + err = clusterv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = argocdv1alpha1hack.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred())