From 59549dbda196a5ccb250054264b845bd2f557ec6 Mon Sep 17 00:00:00 2001 From: Nir Soffer Date: Wed, 20 Nov 2024 22:44:27 +0200 Subject: [PATCH] Delete the legacy test environment The old scripts are not maintained for a while and were replaced by drenv and e2e. If someone want to use the scripts they can check out an older version. Fixes: #1657 Signed-off-by: Nir Soffer --- hack/README.md | 79 +- hack/cert-manager.sh | 22 - hack/curl-install.sh | 10 - hack/dev-rook-cluster.yaml | 62 -- hack/dev-rook-rbdpool.yaml | 16 - hack/dev-rook-sc.yaml | 19 - hack/docker-client-install.sh | 23 - hack/docker-install.sh | 25 - hack/docker-uninstall.sh | 27 - hack/exit_stack.sh | 27 - hack/fuse-overlayfs-install.sh | 34 - hack/git-checkout.sh | 41 - hack/github-url.sh | 19 - hack/go-install.sh | 19 - hack/kubectl-install.sh | 21 - hack/kustomize-install.sh | 10 - hack/minikube-install.sh | 15 - hack/minikube-ramen.sh | 120 --- hack/minikube-rook-mirror-setup.sh | 99 -- hack/minikube-rook-mirror-test.sh | 98 -- hack/minikube-rook-setup.sh | 191 ---- hack/minikube.sh | 15 - hack/minio-deployment.yaml | 143 --- hack/ocm-minikube-ramen.sh | 919 ---------------- hack/ocm-minikube.sh | 983 ------------------ hack/olm.sh | 41 - hack/podman-docker-install.sh | 28 - hack/podman-docker-uninstall.sh | 25 - hack/podman-install.sh | 21 - hack/podman-uninstall.sh | 16 - hack/recipe_e2e/README.md | 102 -- .../config/credentials-velero-minikube | 3 - hack/recipe_e2e/config/ramen_config.yaml | 39 - hack/recipe_e2e/config/ramen_config_base.yaml | 39 - .../recipe_e2e/config/ramen_secret_minio.yaml | 10 - hack/recipe_e2e/config/s3_secret.yaml | 10 - .../failback/vrg_busybox_secondary.yaml | 24 - .../failover/vrg_busybox_primary.yaml | 23 - hack/recipe_e2e/protect/recipe_busybox.yaml | 61 -- .../protect/vrg_busybox_primary.yaml | 19 - hack/recipe_e2e/scripts/cleanup_s3.sh | 12 - hack/recipe_e2e/scripts/deploy_primary.sh | 8 - hack/recipe_e2e/scripts/failback.sh | 125 --- hack/recipe_e2e/scripts/failover.sh | 108 -- hack/recipe_e2e/scripts/protect.sh | 68 -- .../scripts/recipe_e2e_functions.sh | 239 ----- .../scripts/reload_minikube_image.sh | 40 - hack/recipe_e2e/scripts/setup.sh | 86 -- hack/recipe_e2e/scripts/teardown.sh | 68 -- hack/rook-mirror-secret-template.yaml | 10 - hack/shell_option_store_restore.sh | 44 - hack/shio-demo.sh | 637 ------------ hack/true_if_exit_status_and_stderr.sh | 29 - hack/uidmap-install.sh | 19 - hack/until_true_or_n.sh | 20 - hack/velero-install.sh | 31 - hack/velero-test.sh | 315 ------ hack/velero-uninstall.sh | 9 - 58 files changed, 2 insertions(+), 5364 deletions(-) delete mode 100644 hack/cert-manager.sh delete mode 100755 hack/curl-install.sh delete mode 100644 hack/dev-rook-cluster.yaml delete mode 100644 hack/dev-rook-rbdpool.yaml delete mode 100644 hack/dev-rook-sc.yaml delete mode 100755 hack/docker-client-install.sh delete mode 100644 hack/docker-install.sh delete mode 100755 hack/docker-uninstall.sh delete mode 100644 hack/exit_stack.sh delete mode 100755 hack/fuse-overlayfs-install.sh delete mode 100644 hack/git-checkout.sh delete mode 100644 hack/github-url.sh delete mode 100644 hack/go-install.sh delete mode 100755 hack/kubectl-install.sh delete mode 100755 hack/kustomize-install.sh delete mode 100755 hack/minikube-install.sh delete mode 100755 hack/minikube-ramen.sh delete mode 100755 hack/minikube-rook-mirror-setup.sh delete mode 100755 hack/minikube-rook-mirror-test.sh delete mode 100755 hack/minikube-rook-setup.sh delete mode 100644 hack/minikube.sh delete mode 100644 hack/minio-deployment.yaml delete mode 100755 hack/ocm-minikube-ramen.sh delete mode 100755 hack/ocm-minikube.sh delete mode 100644 hack/olm.sh delete mode 100755 hack/podman-docker-install.sh delete mode 100755 hack/podman-docker-uninstall.sh delete mode 100755 hack/podman-install.sh delete mode 100755 hack/podman-uninstall.sh delete mode 100644 hack/recipe_e2e/README.md delete mode 100644 hack/recipe_e2e/config/credentials-velero-minikube delete mode 100644 hack/recipe_e2e/config/ramen_config.yaml delete mode 100644 hack/recipe_e2e/config/ramen_config_base.yaml delete mode 100644 hack/recipe_e2e/config/ramen_secret_minio.yaml delete mode 100644 hack/recipe_e2e/config/s3_secret.yaml delete mode 100644 hack/recipe_e2e/failback/vrg_busybox_secondary.yaml delete mode 100644 hack/recipe_e2e/failover/vrg_busybox_primary.yaml delete mode 100644 hack/recipe_e2e/protect/recipe_busybox.yaml delete mode 100644 hack/recipe_e2e/protect/vrg_busybox_primary.yaml delete mode 100644 hack/recipe_e2e/scripts/cleanup_s3.sh delete mode 100644 hack/recipe_e2e/scripts/deploy_primary.sh delete mode 100644 hack/recipe_e2e/scripts/failback.sh delete mode 100644 hack/recipe_e2e/scripts/failover.sh delete mode 100644 hack/recipe_e2e/scripts/protect.sh delete mode 100644 hack/recipe_e2e/scripts/recipe_e2e_functions.sh delete mode 100644 hack/recipe_e2e/scripts/reload_minikube_image.sh delete mode 100644 hack/recipe_e2e/scripts/setup.sh delete mode 100644 hack/recipe_e2e/scripts/teardown.sh delete mode 100644 hack/rook-mirror-secret-template.yaml delete mode 100644 hack/shell_option_store_restore.sh delete mode 100755 hack/shio-demo.sh delete mode 100644 hack/true_if_exit_status_and_stderr.sh delete mode 100755 hack/uidmap-install.sh delete mode 100644 hack/until_true_or_n.sh delete mode 100755 hack/velero-install.sh delete mode 100755 hack/velero-test.sh delete mode 100755 hack/velero-uninstall.sh diff --git a/hack/README.md b/hack/README.md index d27dfe0c9..db1f38189 100644 --- a/hack/README.md +++ b/hack/README.md @@ -3,81 +3,6 @@ SPDX-FileCopyrightText: The RamenDR authors SPDX-License-Identifier: Apache-2.0 --> -# hack/ +# Hacking ramen -## minikube-ramen.sh - -Ramen dr-cluster end-to-end test script - -- cluster names are specified with the `cluster_names` variable - - `cluster1` and `cluster2` by default -- application sample namespace name is specified with the `application_sample_namespace_name` - variable - - `default` by default -- takes a list of functions to execute: - - `deploy` (default) deploys the environment including: - minikube clusters, rook-ceph, minio s3 stores, ramen - - `undeploy` undeploys the things deployed by `deploy` - - `manager_redeploy` rebuilds and redeploys ramen manager - - `application_sample_deploy` deploys busybox-sample app to specified cluster - - `application_sample_undeploy` undeploys busybox-sample app from specified cluster - - `application_sample_vrg_deploy` deploys busybox-sample app vrg to 1st cluster - - `application_sample_vrg_undeploy` undeploys busybox-sample app vrg from 1st - cluster - -## ocm-minikube-ramen.sh - -open-cluster-management Ramen end-to-end test script - -- can be run from any directory; writes temporary files to /tmp -- installs some dependencies (e.g. minikube, golang, etc), but not necessarily - all (e.g. kvm on Linux distributions other than RHEL and Ubuntu) -- hub cluster name is specified with the `hub_cluster_name` variable - - defaults to `hub` -- managed cluster names are specified with the `spoke_cluster_names` variable - - `cluster1` and `hub` by default - - a hub may also be a managed cluster -- takes a list of functions to execute: - - `deploy` (default) deploys the environment including: - minikube clusters, ocm, rook-ceph, minio s3 stores, olm, ramen - - calls `ramen_images_build_and_archive` which builds and deploys ramen - from the source rooted from the parent directory of the script - - skips the ramen manager image build if the `skip_ramen_build` variable's - value is something other than an empty string or `false` - - calls `ramen_deploy` which deploys ramen hub operator, crds, drpolicy - and samples channel - - `application_sample_deploy` deploys the busybox-sample app to 1st managed cluster - named - - `application_sample_failover` fails over the busybox-sample app to 2nd managed - cluster named - - `application_sample_relocate` relocates the busybox-sample app to 1st managed - cluster named - - `application_sample_undeploy` undeploys the busybox-sample app from the cluster - it was last deployed to - - `undeploy` undeploys the things deployed by `deploy` - - calls `ramen_undeploy` which undeploys the things deployed by `ramen_deploy` - - calls `rook_ceph_undeploy` which deletes the minikube clusters and rook-ceph - virsh volumes and alone can undeploy the environment quickly by skipping - component undeployments - - see source for several other routines to deploy and undeploy various copmonents - individually -- is designed to be idempotent so that deploy functions can be rerun without having - to first undeploy - - one exception to this is image deployment - - for example, if ramen image changes, it can be deployed by undeploying - and redeploying ramen - - tip: if an error is encountered leaving something in a state such that an undeployment - fails, consider redeploying to get it into a known state - -Examples: - -```sh -spoke_cluster_names=cluster1\ cluster2 ./ocm-minikube-ramen.sh -spoke_cluster_names=cluster1\ cluster2 ./ocm-minikube-ramen.sh application_sample_deploy -spoke_cluster_names=cluster1\ cluster2 ./ocm-minikube-ramen.sh application_sample_failover -spoke_cluster_names=cluster1\ cluster2 ./ocm-minikube-ramen.sh application_sample_relocate -spoke_cluster_names=cluster1\ cluster2 ./ocm-minikube-ramen.sh application_sample_undeploy -./ocm-minikube-ramen.sh ramen_build_and_archive -spoke_cluster_names=cluster1\ cluster2 ./ocm-minikube-ramen.sh ramen_undeploy -spoke_cluster_names=cluster1\ cluster2 ./ocm-minikube-ramen.sh ramen_deploy -``` +This directory includes various tools for ramen developers. diff --git a/hack/cert-manager.sh b/hack/cert-manager.sh deleted file mode 100644 index 6c405da2b..000000000 --- a/hack/cert-manager.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2086 - -cert_manager_kubectl_context() { - kubectl --context $1 $2 -f https://github.com/cert-manager/cert-manager/releases/download/v1.13.1/cert-manager.yaml -} -cert_manager_deploy_context() { - cert_manager_kubectl_context $1 apply -} -cert_manager_undeploy_context() { - cert_manager_kubectl_context $1 delete\ --ignore-not-found -} -cert_manager_unset() { - unset -f cert_manager_unset - unset -f cert_manager_undeploy_context - unset -f cert_manager_deploy_context - unset -f cert_manager_kubectl_context -} diff --git a/hack/curl-install.sh b/hack/curl-install.sh deleted file mode 100755 index 6a76b6de6..000000000 --- a/hack/curl-install.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2086 -if ! command -v curl; then - wget -O ${1}/curl https://github.com/moparisthebest/static-curl/releases/download/v7.76.0/curl-amd64 - chmod +x ${1}/curl -fi diff --git a/hack/dev-rook-cluster.yaml b/hack/dev-rook-cluster.yaml deleted file mode 100644 index 977ea95cc..000000000 --- a/hack/dev-rook-cluster.yaml +++ /dev/null @@ -1,62 +0,0 @@ -# Copy of: -# github.com/rook/rook/blob/release-1.8/deploy/examples/cluster-test.yaml ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: rook-config-override - namespace: rook-ceph -data: - config: | - [global] - osd_pool_default_size = 1 - mon_warn_on_pool_no_redundancy = false - bdev_flock_retry = 20 - bluefs_buffered_io = false ---- -apiVersion: ceph.rook.io/v1 -kind: CephCluster -metadata: - name: my-cluster - namespace: rook-ceph -spec: - # On Minikube /var/lib/rook is not persisted, but anything under /data is. - # https://minikube.sigs.k8s.io/docs/handbook/persistent_volumes/#a-note-on-mounts-persistence-and-minikube-hosts - dataDirHostPath: /data/rook - cephVersion: - image: quay.io/ceph/ceph:v16.2.6 - allowUnsupported: true - mon: - count: 1 - allowMultiplePerNode: true - mgr: - count: 1 - allowMultiplePerNode: true - dashboard: - enabled: true - crashCollector: - disable: true - storage: - useAllNodes: true - useAllDevices: true - healthCheck: - daemonHealth: - mon: - interval: 45s - timeout: 600s - disruptionManagement: - managePodBudgets: true - network: - provider: host ---- -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: device-health-metrics - namespace: rook-ceph -spec: - name: device_health_metrics - failureDomain: host - replicated: - size: 1 - requireSafeReplicaSize: false diff --git a/hack/dev-rook-rbdpool.yaml b/hack/dev-rook-rbdpool.yaml deleted file mode 100644 index 1afe61c54..000000000 --- a/hack/dev-rook-rbdpool.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: ceph.rook.io/v1 -kind: CephBlockPool -metadata: - name: replicapool - namespace: rook-ceph -spec: - replicated: - size: 1 - requireSafeReplicaSize: false - mirroring: - enabled: true - mode: image - snapshotSchedules: - - interval: 2m - startTime: 14:00:00-05:00 diff --git a/hack/dev-rook-sc.yaml b/hack/dev-rook-sc.yaml deleted file mode 100644 index a36ab388d..000000000 --- a/hack/dev-rook-sc.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - name: rook-ceph-block -provisioner: rook-ceph.rbd.csi.ceph.com -parameters: - clusterID: rook-ceph - pool: replicapool - imageFormat: "2" - imageFeatures: layering - csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph - csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner - csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph - csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node - csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph - csi.storage.k8s.io/fstype: ext4 -reclaimPolicy: Delete diff --git a/hack/docker-client-install.sh b/hack/docker-client-install.sh deleted file mode 100755 index e7746ba3c..000000000 --- a/hack/docker-client-install.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck shell=sh disable=2046,2086 -set -x -set -- $1 20.10.6 -if command -v $1/docker; then - IFS=. read -r x y z <<-a - $($1/docker version --format '{{.Client.Version}}' 2>/dev/null) - a - IFS=. read -r x1 y1 z1 <<-a - $2 - a - if test $x -gt $x1 || { test $x -eq $x1 && { test $y -gt $y1 || { test $y -eq $y1 && test $z -ge $z1;};};}; then - unset -v x y z x1 y1 z1 - exit - fi - unset -v x y z x1 y1 z1 -fi -$(dirname $0)/curl-install.sh $1 -curl -L https://download.docker.com/linux/static/stable/x86_64/docker-$2.tgz | tar -xzvC$1 --strip-components 1 docker/docker diff --git a/hack/docker-install.sh b/hack/docker-install.sh deleted file mode 100644 index 1fd110afa..000000000 --- a/hack/docker-install.sh +++ /dev/null @@ -1,25 +0,0 @@ -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck shell=sh disable=2046,2086 -docker_install() -{ - if test -w /var/run/docker.sock; then - DOCKER_HOST=unix:///var/run/docker.sock - return - fi - # https://docs.docker.com/engine/security/rootless/#install - if ! command -v dockerd-rootless-setuptool.sh - # TODO or less than version ${2} - then - $(dirname ${0})/curl-install.sh ${1} - version_name=20.10.6 - curl -L https://download.docker.com/linux/static/stable/x86_64/docker-${version_name}.tgz | tar -xzvC${1} --strip-components 1 - curl -L https://download.docker.com/linux/static/stable/x86_64/docker-rootless-extras-${version_name}.tgz | tar -xzvC${1} --strip-components 1 - unset -v version_name - fi - $(dirname ${0})/uidmap-install.sh - dockerd-rootless-setuptool.sh install - # shellcheck disable=2034 - DOCKER_HOST=unix:///run/user/$(id -u)/docker.sock -} diff --git a/hack/docker-uninstall.sh b/hack/docker-uninstall.sh deleted file mode 100755 index 5d3f0ce52..000000000 --- a/hack/docker-uninstall.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -set -x -set -e -if test -x "${1}"/dockerd-rootless-setuptool.sh; then - # https://docs.docker.com/engine/security/rootless/#uninstall - "${1}"/dockerd-rootless-setuptool.sh uninstall - rootlesskit rm -rf ~/.local/share/docker - rm -f\ - "${1}"/containerd\ - "${1}"/containerd-shim\ - "${1}"/containerd-shim-runc-v2\ - "${1}"/ctr\ - "${1}"/docker\ - "${1}"/docker-init\ - "${1}"/docker-proxy\ - "${1}"/dockerd\ - "${1}"/dockerd-rootless-setuptool.sh\ - "${1}"/dockerd-rootless.sh\ - "${1}"/rootlesskit\ - "${1}"/rootlesskit-docker-proxy\ - "${1}"/runc\ - "${1}"/vpnkit -fi diff --git a/hack/exit_stack.sh b/hack/exit_stack.sh deleted file mode 100644 index 2222bb217..000000000 --- a/hack/exit_stack.sh +++ /dev/null @@ -1,27 +0,0 @@ -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck shell=sh -trap 'set -- $?; trap - EXIT; eval $exit_stack; echo exit status: $1' EXIT -trap '{ set +x; } 2>/dev/null; trap - EXIT; eval $exit_stack' EXIT -trap 'trap - ABRT' ABRT -trap 'trap - QUIT' QUIT -trap 'trap - TERM' TERM -trap 'trap - INT' INT -trap 'trap - HUP' HUP -exit_stack_push() -{ - exit_stack=$*\;$exit_stack -} 2>/dev/null -exit_stack_push unset -v exit_stack -exit_stack_push unset -f exit_stack_push -exit_stack_pop() -{ - { set +x; } 2>/dev/null - IFS=\; read -r x exit_stack <<-a - $exit_stack - a - eval set -x; $x - { unset -v x; } 2>/dev/null -} -exit_stack_push unset -f exit_stack_pop diff --git a/hack/fuse-overlayfs-install.sh b/hack/fuse-overlayfs-install.sh deleted file mode 100755 index 9d617f29c..000000000 --- a/hack/fuse-overlayfs-install.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2086 -set -x -# https://github.com/containers/fuse-overlayfs -if ! command -v fuse-overlayfs; then - # shellcheck disable=1091 - . /etc/os-release - case $NAME in - Ubuntu) - IFS=. read -r year month <<-a - $VERSION_ID - a - # sudo apt-get update - if test $year -gt 19 || { test $year -eq 19 && test $month -ge 04; } - then - sudo apt-get -y install fuse-overlayfs - else - sudo apt-get -y install buildah - sudo apt-get -y install libfuse2 - ls /dev/fuse - fuse_overlayfs_directory_path_name=/tmp/fuse-overlayfs - git clone https://github.com/containers/fuse-overlayfs $fuse_overlayfs_directory_path_name - buildah bud -v $fuse_overlayfs_directory_path_name:/build/fuse-overlayfs -t fuse-overlayfs -f $fuse_overlayfs_directory_path_name/Containerfile.static.ubuntu $fuse_overlayfs_directory_path_name - sudo cp $fuse_overlayfs_directory_path_name/fuse-overlayfs /usr/bin - unset -v fuse_overlayfs_directory_path_name - sudo rm -rf ~/.local/share/containers - fi - ;; - esac -fi diff --git a/hack/git-checkout.sh b/hack/git-checkout.sh deleted file mode 100644 index 316f1d0e5..000000000 --- a/hack/git-checkout.sh +++ /dev/null @@ -1,41 +0,0 @@ -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck shell=sh disable=2086 -git_checkout() -{ - git --git-dir $1/.git --work-tree $1 checkout $2 -} -git_checkout_undo() -{ - git_checkout $1 - -} -git_clone_and_checkout() -{ - set +e - git clone $1/$2 - # fatal: destination path '$2' already exists and is not an empty directory. - set -e - git --git-dir $2/.git fetch $1/$2 $3 - git_checkout $2 $4 -} -git_branch_delete() -{ - set +e - git --git-dir $1/.git branch --delete $3 $2 - # error: branch '' not found. - set -e -} -git_branch_delete_force() -{ - git_branch_delete $1 $2 --force -} -git_checkout_unset() -{ - unset -f git_checkout_unset - unset -f git_branch_delete_force - unset -f git_branch_delete - unset -f git_clone_and_checkout - unset -f git_checkout_undo - unset -f git_checkout -} diff --git a/hack/github-url.sh b/hack/github-url.sh deleted file mode 100644 index 035557c67..000000000 --- a/hack/github-url.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -github_url_file() -{ - echo https://raw.githubusercontent.com/"$1"/"$3"/"$2" -} -github_url_directory() -{ - echo https://github.com/"$1"/"$2"?ref="$3" -} -github_url_unset() -{ - unset -f github_url_file - unset -f github_url_directory - unset -f github_url_unset -} diff --git a/hack/go-install.sh b/hack/go-install.sh deleted file mode 100644 index 44811d0e5..000000000 --- a/hack/go-install.sh +++ /dev/null @@ -1,19 +0,0 @@ -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck shell=sh disable=2046,2086 -go_install() -{ - if ! command -v go - # TODO or version less than ${2} - #|| $(go version | { read _ _ v _; echo ${v#go}; }) - then - PATH=${1}/go/bin:${PATH} - if ! command -v go - then - mkdir -p ${1}/bin - $(dirname ${0})/curl-install.sh ${1}/bin - curl -L https://golang.org/dl/go1.16.2.linux-amd64.tar.gz | tar -C${1} -xz - fi - fi -} diff --git a/hack/kubectl-install.sh b/hack/kubectl-install.sh deleted file mode 100755 index 42f452507..000000000 --- a/hack/kubectl-install.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2046,2086 -if ! command -v kubectl\ -||\ - test $(kubectl version --client --short|cut -dv -f2|cut -d. -f1) -lt ${2}\ -||\ -{ - test $(kubectl version --client --short|cut -dv -f2|cut -d. -f1) -eq ${2}\ - &&\ - test $(kubectl version --client --short|cut -dv -f2|cut -d. -f2) -lt ${3} -} -then - # https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#install-kubectl-binary-with-curl-on-linux - $(dirname ${0})/curl-install.sh ${1} - curl -LRo ${1}/kubectl https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl - chmod +x ${1}/kubectl -fi diff --git a/hack/kustomize-install.sh b/hack/kustomize-install.sh deleted file mode 100755 index 355801cd3..000000000 --- a/hack/kustomize-install.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2046,2086 -if ! command -v kustomize; then - $(dirname ${0})/curl-install.sh ${1} - curl -L https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv4.0.5/kustomize_v4.0.5_linux_amd64.tar.gz | tar -C${1} -xz -fi diff --git a/hack/minikube-install.sh b/hack/minikube-install.sh deleted file mode 100755 index 8515482cf..000000000 --- a/hack/minikube-install.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2046,2086 -if ! command -v minikube; then - # https://minikube.sigs.k8s.io/docs/start/ - $(dirname ${0})/curl-install.sh ${1} - minikube_version=latest - minikube_version=v1.24.0 - curl -LRo ${1}/minikube https://storage.googleapis.com/minikube/releases/${minikube_version}/minikube-linux-amd64 - unset -v minikube_version - chmod +x ${1}/minikube -fi diff --git a/hack/minikube-ramen.sh b/hack/minikube-ramen.sh deleted file mode 100755 index 9a6334e7d..000000000 --- a/hack/minikube-ramen.sh +++ /dev/null @@ -1,120 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2086 -set -e -ramen_hack_directory_path_name=$(dirname $0) -cluster_names=${cluster_names:-cluster1\ cluster2} -deploy() { - spoke_cluster_names=$cluster_names $ramen_hack_directory_path_name/ocm-minikube.sh minikube_start_spokes - hub_cluser_name=""\ - spoke_cluster_names=$cluster_names $ramen_hack_directory_path_name/ocm-minikube-ramen.sh\ - rook_ceph_deploy\ - cert_manager_deploy\ - minio_deploy_spokes\ - ramen_manager_image_build_and_archive\ - ramen_deploy_spokes\ - -} -undeploy() { - hub_cluser_name=""\ - spoke_cluster_names=$cluster_names $ramen_hack_directory_path_name/ocm-minikube-ramen.sh\ - ramen_undeploy_spokes\ - minio_undeploy_spokes\ - cert_manager_undeploy\ - rook_ceph_undeploy\ - -} -manager_image_build() { - $ramen_hack_directory_path_name/ocm-minikube-ramen.sh ramen_manager_image_build_and_archive -} -manager_image_deployed() { - for cluster_name in $cluster_names; do - minikube -p $cluster_name ssh -- docker images\|grep ramen - done; unset -v cluster_name -} -manager_deploy() { - spoke_cluster_names=$cluster_names $ramen_hack_directory_path_name/ocm-minikube-ramen.sh ramen_deploy_spokes - manager_image_deployed -} -manager_undeploy() { - spoke_cluster_names=$cluster_names $ramen_hack_directory_path_name/ocm-minikube-ramen.sh ramen_undeploy_spokes -} -manager_redeploy() { - manager_undeploy& - manager_image_build& - wait - manager_deploy -} -manager_log() { - kubectl --context "$1" -nramen-system logs deploy/ramen-dr-cluster-operator manager -} -application_sample_namespace_name=${application_sample_namespace_name:-default} -application_sample_namespace_deploy() { - kubectl create namespace $application_sample_namespace_name --dry-run=client -oyaml|kubectl --context $1 apply -f- -} -application_sample_namespace_undeploy() { - kubectl --context "$1" delete namespace $application_sample_namespace_name -} -application_sample_yaml() { - kubectl create --dry-run=client -oyaml --namespace "$application_sample_namespace_name" -k https://github.com/RamenDR/ocm-ramen-samples/busybox -} -application_sample_deploy() { - application_sample_yaml|kubectl --context "$1" apply -f - -} -application_sample_undeploy() { - application_sample_yaml|kubectl --context "$1" delete -f - --ignore-not-found -} -application_sample_vrg_yaml() { - cat <<-a - --- - apiVersion: ramendr.openshift.io/v1alpha1 - kind: VolumeReplicationGroup - metadata: - name: bb - namespace: $2 - labels: - $3 - spec: - async: - replicationClassSelector: {} - schedulingInterval: 1m - pvcSelector: - matchLabels: - appname: busybox - replicationState: $1 - s3Profiles: -$(for cluster_name in $cluster_names; do echo \ \ -\ minio-on-$cluster_name; done; unset -v cluster_name)${vrg_appendix-} - a -} -application_sample_vrg_deploy() { - application_sample_vrg_yaml primary "$2" "$3"|kubectl --context "$1" apply -f - -} -application_sample_vrg_deploy_sec() { - application_sample_vrg_yaml secondary "$2" "$3"|kubectl --context "$1" apply -f - -} -application_sample_vrg_undeploy() { - application_sample_vrg_yaml primary "$2" "$3"|kubectl --context "$1" delete --ignore-not-found -f - -} -"${@:-deploy}" -unset -f application_sample_vrg_undeploy -unset -f application_sample_vrg_deploy -unset -f application_sample_vrg_kubectl -unset -f application_sample_undeploy -unset -f application_sample_deploy -unset -f application_sample_kubectl -unset -f application_sample_namespace_undeploy -unset -f application_sample_namespace_deploy -unset -v application_sample_namespace_name -unset -f manager_log -unset -f manager_redeploy -unset -f manager_undeploy -unset -f manager_deploy -unset -f manager_image_deployed -unset -f manager_image_build -unset -f undeploy -unset -f deploy -unset -v cluster_names -unset -v ramen_hack_directory_path_name diff --git a/hack/minikube-rook-mirror-setup.sh b/hack/minikube-rook-mirror-setup.sh deleted file mode 100755 index 5263e1b26..000000000 --- a/hack/minikube-rook-mirror-setup.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -set -x -set -e -o pipefail -scriptdir="$(dirname "$(realpath "$0")")" - -## Variables -PRIMARY_CLUSTER="${PRIMARY_CLUSTER:-hub}" -SECONDARY_CLUSTER="${SECONDARY_CLUSTER:-cluster1}" -POOL_NAME="replicapool" - -## Usage -usage() -{ - set +x - echo "Usage:" - echo " $0" - echo " Available environment variables:" - echo " minikube primary cluster PRIMARY_CLUSTER ${PRIMARY_CLUSTER}" - echo " minikube secondary cluster SECONDARY_CLUSTER ${SECONDARY_CLUSTER}" - exit 1 -} - -function wait_for_condition() { - local count=61 - local condition=${1} - local result - shift - - while ((count > 0)); do - result=$("${@}") - if [[ "$result" == "$condition" ]]; then - return 0 - fi - count=$((count - 1)) - sleep 5 - done - - echo "Failed to meet $condition for command $*" - exit 1 -} - -SECONDARY_CLUSTER_PEER_TOKEN_SECRET_NAME=$(kubectl get cephblockpools.ceph.rook.io "${POOL_NAME}" --context="${SECONDARY_CLUSTER}" -nrook-ceph -o jsonpath='{.status.info.rbdMirrorBootstrapPeerSecretName}') -SECONDARY_CLUSTER_SECRET=$(kubectl get secret -n rook-ceph "${SECONDARY_CLUSTER_PEER_TOKEN_SECRET_NAME}" --context="${SECONDARY_CLUSTER}" -o jsonpath='{.data.token}'| base64 -d) -SECONDARY_CLUSTER_SITE_NAME=$(kubectl get cephblockpools.ceph.rook.io "${POOL_NAME}" --context="${SECONDARY_CLUSTER}" -nrook-ceph -o jsonpath='{.status.mirroringInfo.site_name}') - -echo SECONDARY_CLUSTER_PEER_TOKEN_SECRET_NAME is "$SECONDARY_CLUSTER_PEER_TOKEN_SECRET_NAME" -echo Token for the secondary cluster is "$SECONDARY_CLUSTER_SECRET" -echo SECONDARY_CLUSTER_SITE_NAME is "${SECONDARY_CLUSTER_SITE_NAME}" - -MIRROR_SECRET_YAML=$(mktemp --suffix .yaml) -cp "${scriptdir}/rook-mirror-secret-template.yaml" "${MIRROR_SECRET_YAML}" -sed -e "s,,${SECONDARY_CLUSTER_SITE_NAME}," -i "${MIRROR_SECRET_YAML}" -sed -e "s,,${POOL_NAME}," -i "${MIRROR_SECRET_YAML}" -sed -e "s,,${SECONDARY_CLUSTER_SECRET}," -i "${MIRROR_SECRET_YAML}" -kubectl apply -f "${MIRROR_SECRET_YAML}" --context="${PRIMARY_CLUSTER}" -rm -f "${MIRROR_SECRET_YAML}" - -rbd_pool_patch="{\"spec\":{\"mirroring\":{\"peers\":{\"secretNames\":[\"${SECONDARY_CLUSTER_SITE_NAME}\"]}}}}" -kubectl patch CephBlockPool ${POOL_NAME} -n rook-ceph --type merge --patch "${rbd_pool_patch}" --context "$PRIMARY_CLUSTER" - -cat < 0)); do - result=$("${@}") - if [[ "$result" == "$condition" ]]; then - return 0 - fi - count=$((count - 1)) - sleep 5 - done - - echo "Failed to meet $condition for command $*" - exit 1 -} -# shellcheck source=./until_true_or_n.sh disable=1091 -. "$(dirname "$0")"/until_true_or_n.sh - -## Usage -usage() -{ - set +x - echo "Usage:" - echo " $0" - echo " Available environment variables:" - echo " minikube primary cluster PRIMARY_CLUSTER ${PRIMARY_CLUSTER}" - echo " minikube secondary cluster SECONDARY_CLUSTER ${SECONDARY_CLUSTER}" - exit 1 -} - -cat < 0)); do - result=$("${@}") - if [[ "$result" == "$condition" ]]; then - return 0 - fi - count=$((count - 1)) - sleep 5 - done - - echo "Failed to meet $condition for command $*" - exit 1 -} - -set +x -echo "Using environment:" -echo " minikube PROFILE ${PROFILE}" -echo " minikube kvm2 image dir IMAGE_DIR ${IMAGE_DIR}" -echo " rook source ROOK_SRC ${ROOK_SRC}" -set -x - -if [[ $1 == "delete" ]] -then - minikube delete --profile="${PROFILE}" - virsh vol-delete --pool "${POOL_NAME}" "${IMAGE_NAME}-${PROFILE}" - exit 0 -fi - -if [[ $1 == "stop" ]]; then - minikube stop --profile="${PROFILE}" - exit 0 -fi - -if [[ $1 == "start" ]]; then - minikube start --profile="${PROFILE}" - exit 0 -fi - -if [[ $1 == "create" ]] -then - ### $1 == "create" - # TODO: Check if already created and bail out! - - ## Create and attach an OSD disk for Ceph ## - set +e - pool=$(virsh pool-dumpxml $POOL_NAME) - # error: failed to get pool 'minikube' - # error: Storage pool not found: no storage pool with matching name 'minikube' - set -e - pool_target=${pool#*} - pool_target=${pool_target%*} - pool_target_path=${pool_target#*} - pool_target_path=${pool_target_path%*} - pool_target_path_set() - { - echo "${pool%*}"\"${pool_target%*}"\"$IMAGE_DIR"\"${pool_target#*}"\"${pool#*}" >/tmp/$$ - virsh pool-define /tmp/$$ - rm -f /tmp/$$ - } - pool_target_path_set_unqualified() - { - test 'Pool '$POOL_NAME' XML configuration edited.' = "$(\ - EDITOR=sed\ -i\ \''s,.*,'$IMAGE_DIR','\' virsh pool-edit $POOL_NAME \ - )" - } - case $pool_target_path in - "$IMAGE_DIR") - ;; - ?*) - pool_target_path_set - virsh pool-destroy $POOL_NAME - virsh pool-start $POOL_NAME - ;; - *) - virsh pool-create-as --name "${POOL_NAME}" --type dir --target "${IMAGE_DIR}" - ;; - esac - unset -f pool_target_path_set pool_target_path_set_unqualified - unset -v pool pool_target pool_target_path - if ! virsh vol-info --pool "$POOL_NAME" "${IMAGE_NAME}-${PROFILE}"; then - virsh vol-create-as --pool "${POOL_NAME}" --name "${IMAGE_NAME}-${PROFILE}" --capacity 32G --format qcow2 - fi - if ! virsh domblkinfo --domain "$PROFILE" --device vdb; then - sudo virsh attach-disk --domain "${PROFILE}" --source "${IMAGE_DIR}/${IMAGE_NAME}-${PROFILE}" --target vdb --persistent --driver qemu --subdriver qcow2 --targetbus virtio - fi - set +e - minikube ssh 'echo 1 | sudo tee /sys/bus/pci/rescan > /dev/null ; dmesg | grep virtio_blk' --profile="${PROFILE}" - # ssh: Process exited with status 1 - set -e - - ## Install rook-ceph ## - kubectl apply -f "${ROOK_SRC}/common.yaml" --context="${PROFILE}" - kubectl apply -f "${ROOK_SRC}/crds.yaml" --context="${PROFILE}" - - # Enable CSI addons (for volume replication) - kubectl apply -f "${CSIADDON_SRC}/crds.yaml" --context="${PROFILE}" - kubectl apply -f "${CSIADDON_SRC}/rbac.yaml" --context="${PROFILE}" - kubectl apply -f "${CSIADDON_SRC}/setup-controller.yaml" --context="${PROFILE}" - - # Applicable from rook 1.10 onwards - set -- "$(mktemp --directory)" - cat <"$1"/kustomization.yaml -resources: - - ${ROOK_SRC}/operator.yaml -patchesJson6902: - - target: - kind: ConfigMap - name: rook-ceph-operator-config - namespace: rook-ceph - patch: |- - - op: add - path: /data/CSI_ENABLE_CSIADDONS - value: 'true' - - op: add - path: /data/ROOK_CSIADDONS_IMAGE - value: quay.io/csiaddons/k8s-sidecar:latest - - op: add - path: /data/CSI_ENABLE_OMAP_GENERATOR - value: 'true' - - op: add - path: /data/ROOK_CSI_ALLOW_UNSUPPORTED_VERSION - value: 'true' - - op: add - path: /data/ROOK_CSI_CEPH_IMAGE - value: quay.io/cephcsi/cephcsi:canary -a - kubectl --context "$PROFILE" apply -k "$1" - rm -rf "$1" - set -- - # Create a dev ceph cluster - kubectl apply -f "${scriptdir}/dev-rook-cluster.yaml" --context="${PROFILE}" - kubectl apply -f "${ROOK_SRC}/toolbox.yaml" --context="${PROFILE}" - - # Create a mirroring enabled RBD pool - kubectl apply -f "${scriptdir}/dev-rook-rbdpool.yaml" --context="${PROFILE}" - - # Create a StorageClass - kubectl apply -f "${scriptdir}"/dev-rook-sc.yaml --context="${PROFILE}" - - # Ensure the pool is created and ready - wait_for_condition "Ready" kubectl get cephblockpool -n rook-ceph replicapool -o jsonpath='{.status.phase}' --context="${PROFILE}" - wait_for_condition "pool-peer-token-replicapool" kubectl get cephblockpool -n rook-ceph replicapool -o jsonpath='{.status.info.rbdMirrorBootstrapPeerSecretName}' --context="${PROFILE}" - echo Setup succesful! - exit 0 -fi - -usage diff --git a/hack/minikube.sh b/hack/minikube.sh deleted file mode 100644 index 54948d7d3..000000000 --- a/hack/minikube.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2086 -minikube_minio_url() -{ - minikube --profile $1 -n minio service --url minio -} -minikube_unset() -{ - unset -f minikube_unset - unset -f minikube_minio_url -} diff --git a/hack/minio-deployment.yaml b/hack/minio-deployment.yaml deleted file mode 100644 index 847345704..000000000 --- a/hack/minio-deployment.yaml +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2017 the Velero contributors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. ---- -apiVersion: v1 -kind: Namespace -metadata: - name: minio ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: minio - name: minio-config-pvc - labels: - component: minio -spec: - accessModes: ["ReadWriteOnce"] - storageClassName: "rook-ceph-block" - resources: - requests: - storage: 10Gi ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - namespace: minio - name: minio-storage-pvc - labels: - component: minio -spec: - accessModes: ["ReadWriteOnce"] - storageClassName: "rook-ceph-block" - resources: - requests: - storage: 10Gi ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - namespace: minio - name: minio - labels: - component: minio -spec: - strategy: - type: Recreate - selector: - matchLabels: - component: minio - template: - metadata: - labels: - component: minio - spec: - volumes: - - name: storage - persistentVolumeClaim: - claimName: minio-storage-pvc - readOnly: false - - name: config - persistentVolumeClaim: - claimName: minio-config-pvc - readOnly: false - containers: - - name: minio - image: quay.io/minio/minio:latest - imagePullPolicy: IfNotPresent - args: - - server - - /storage - - --config-dir=/config - env: - - name: MINIO_ACCESS_KEY - value: "minio" - - name: MINIO_SECRET_KEY - value: "minio123" - ports: - - containerPort: 9000 - hostPort: 9000 - volumeMounts: - - name: storage - mountPath: "/storage" - - name: config - mountPath: "/config" ---- -apiVersion: v1 -kind: Service -metadata: - namespace: minio - name: minio - labels: - component: minio -spec: - type: NodePort - ports: - - port: 9000 - targetPort: 9000 - protocol: TCP - nodePort: 30000 - selector: - component: minio ---- -apiVersion: batch/v1 -kind: Job -metadata: - namespace: minio - name: minio-setup - labels: - component: minio -spec: - template: - metadata: - name: minio-setup - spec: - restartPolicy: OnFailure - volumes: - - name: config - persistentVolumeClaim: - claimName: minio-config-pvc - readOnly: false - containers: - - name: mc - image: quay.io/minio/mc:latest - imagePullPolicy: IfNotPresent - command: - - /bin/sh - - -c - - "mc --config-dir=/config config host add ramen http://minio:9000 - minio minio123 && mc --config-dir=/config mb -p ramen/bucket" - volumeMounts: - - name: config - mountPath: "/config" diff --git a/hack/ocm-minikube-ramen.sh b/hack/ocm-minikube-ramen.sh deleted file mode 100755 index 3fc217fb1..000000000 --- a/hack/ocm-minikube-ramen.sh +++ /dev/null @@ -1,919 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=1090,2046,2086,1091 -set -e -ramen_hack_directory_path_name=$(dirname $0) -. $ramen_hack_directory_path_name/exit_stack.sh -. $ramen_hack_directory_path_name/true_if_exit_status_and_stderr.sh -exit_stack_push unset -f true_if_exit_status_and_stderr -. $ramen_hack_directory_path_name/until_true_or_n.sh -exit_stack_push unset -f until_true_or_n -. $ramen_hack_directory_path_name/olm.sh -exit_stack_push olm_unset -. $ramen_hack_directory_path_name/minikube.sh -exit_stack_push minikube_unset -. $ramen_hack_directory_path_name/cert-manager.sh; exit_stack_push cert_manager_unset -exit_stack_push unset -v ramen_hack_directory_path_name -rook_ceph_deploy_spoke() -{ - PROFILE=$1 $ramen_hack_directory_path_name/minikube-rook-setup.sh create -} -exit_stack_push unset -f rook_ceph_deploy_spoke -rook_ceph_mirrors_deploy() -{ - PRIMARY_CLUSTER=$1 SECONDARY_CLUSTER=$2 $ramen_hack_directory_path_name/minikube-rook-mirror-setup.sh - PRIMARY_CLUSTER=$2 SECONDARY_CLUSTER=$1 $ramen_hack_directory_path_name/minikube-rook-mirror-setup.sh - PRIMARY_CLUSTER=$1 SECONDARY_CLUSTER=$2 $ramen_hack_directory_path_name/minikube-rook-mirror-test.sh - PRIMARY_CLUSTER=$2 SECONDARY_CLUSTER=$1 $ramen_hack_directory_path_name/minikube-rook-mirror-test.sh -} -exit_stack_push unset -f rook_ceph_mirrors_deploy -rook_ceph_undeploy_spoke() -{ - PROFILE=$1 $ramen_hack_directory_path_name/minikube-rook-setup.sh delete -} -exit_stack_push unset -f rook_ceph_undeploy_spoke -minio_deploy() -{ - kubectl --context $1 apply -f $ramen_hack_directory_path_name/minio-deployment.yaml - date - kubectl --context $1 -n minio wait deployments/minio --for condition=available --timeout 60s - date -} -exit_stack_push unset -f minio_deploy -minio_undeploy() -{ - kubectl --context $1 delete -f $ramen_hack_directory_path_name/minio-deployment.yaml -} -exit_stack_push unset -f minio_undeploy -minio_deploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do minio_deploy $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f minio_deploy_spokes -minio_undeploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do minio_undeploy $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f minio_undeploy_spokes -image_registry_port_number=5000 -exit_stack_push unset -v image_registry_port_number -image_registry_address=localhost:$image_registry_port_number -exit_stack_push unset -v image_registry_address -image_registry_deployment_name=myregistry -exit_stack_push unset -v image_registry_deployment_name -image_registry_container_image_reference=docker.io/library/registry:2 -exit_stack_push unset -v image_registry_container_image_reference -image_registry_container_deploy_command="docker run -d --name $image_registry_deployment_name -p $image_registry_port_number:$image_registry_port_number $image_registry_container_image_reference" -exit_stack_push unset -v image_registry_container_deploy_command -image_registry_container_undeploy_command="docker container stop $image_registry_deployment_name;docker container rm -v $image_registry_deployment_name" -exit_stack_push unset -v image_registry_container_undeploy_command -image_registry_container_deploy_localhost() -{ - $image_registry_container_deploy_command -} -exit_stack_push unset -f image_registry_container_deploy_localhost -image_registry_container_undeploy_localhost() -{ - eval $image_registry_container_undeploy_command -} -exit_stack_push unset -f image_registry_container_undeploy_localhost -image_registry_container_deploy_cluster() -{ - minikube -p $1 ssh -- "$image_registry_container_deploy_command" -} -exit_stack_push unset -f image_registry_container_deploy_cluster -image_registry_container_undeploy_cluster() -{ - minikube -p $1 ssh -- "$image_registry_container_undeploy_command" -} -exit_stack_push unset -f image_registry_container_undeploy_cluster -image_registry_addon_deploy_cluster() -{ - minikube -p $1 addons enable registry - date - kubectl --context $1 -n kube-system -l kubernetes.io/minikube-addons=registry wait --for condition=ready pods - date - # Get http://localhost:5000/v2/: read tcp 127.0.0.1:36378->127.0.0.1:5000: read: connection reset by peer - until_true_or_n 30 minikube -p $1 ssh -- curl http://$image_registry_address/v2/ -} -exit_stack_push unset -f image_registry_addon_deploy_cluster -image_registry_addon_undeploy_cluster() -{ - minikube -p $1 addons disable registry - date - kubectl --context $1 -n kube-system -l kubernetes.io/minikube-addons=registry wait --for delete all --timeout 2m - date -} -exit_stack_push unset -f image_registry_addon_undeploy_cluster -image_registry_deployment_deploy_cluster() -{ - kubectl create --dry-run=client -o yaml deployment $image_registry_deployment_name --image $image_registry_container_image_reference --port $image_registry_port_number|kubectl --context $1 apply -f - - kubectl --context $1 wait deployment/$image_registry_deployment_name --for condition=available -} -exit_stack_push unset -f image_registry_deployment_deploy_cluster -image_registry_deployment_address() -{ - kubectl --context $1 get $(kubectl --context $1 get pod -l app=$image_registry_deployment_name -o name) --template='{{.status.podIP}}':$image_registry_port_number -} -exit_stack_push unset -f image_registry_deployment_address -image_registry_deployment_undeploy_cluster() -{ - kubectl --context $1 delete deployment/$image_registry_deployment_name -} -exit_stack_push unset -f image_registry_deployment_undeploy_cluster -image_registry_deploy_cluster_method=addon -exit_stack_push unset -v image_registry_deploy_cluster_method -image_registry_deploy_cluster() -{ - image_registry_${image_registry_deploy_cluster_method}_deploy_cluster $1 -} -exit_stack_push unset -f image_registry_deploy_cluster -image_registry_undeploy_cluster() -{ - image_registry_${image_registry_deploy_cluster_method}_undeploy_cluster $1 -} -exit_stack_push unset -f image_registry_undeploy_cluster -image_registry_deploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do image_registry_deploy_cluster $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f image_registry_deploy_spokes -image_registry_undeploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do image_registry_undeploy_cluster $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f image_registry_undeploy_spokes -image_archive() -{ - set -- $1 $(echo $1|tr : _) - set -- $1 $HOME/.minikube/cache/images/amd64/$(dirname $2) $(basename $2) - mkdir -p $2 - set -- $1 $2/$3 - # docker-archive doesn't support modifying existing images - rm -f $2 - DOCKER_HOST=$DOCKER_HOST\ - docker image save $1 -o $2 -} -exit_stack_push unset -f image_archive -image_load_cluster() -{ - minikube -p $1 image load $2 -} -exit_stack_push unset -f image_load_cluster -image_and_containers_exited_using_remove_cluster() -{ - minikube -p $1 ssh -- docker container rm \$\(docker container ls --all --filter ancestor=$2 --filter status=exited --quiet\)\;docker image rm $2 -} -exit_stack_push unset -f image_and_containers_exited_using_remove_cluster -image_remove_cluster() -{ - minikube -p $1 ssh -- docker image rm $2 -} -exit_stack_push unset -f image_remove_cluster -image_push_cluster() -{ - minikube -p $1 ssh -- docker image push $2 -} -exit_stack_push unset -f image_push_cluster -ramen_image_directory_name=${ramen_image_directory_name-ramendr} -exit_stack_push unset -v ramen_image_directory_name -ramen_image_name_prefix=ramen -exit_stack_push unset -v ramen_image_name_prefix -ramen_image_tag=${ramen_image_tag-canary} -exit_stack_push unset -v ramen_image_tag -ramen_image_reference() -{ - echo ${1:+$1/}${ramen_image_directory_name:+$ramen_image_directory_name/}$ramen_image_name_prefix-$2:$ramen_image_tag -} -exit_stack_push unset -f ramen_image_reference -ramen_image_reference_registry_local() -{ - ramen_image_reference $image_registry_address $1 -} -exit_stack_push unset -f ramen_image_reference_registry_local -ramen_manager_image_reference=$(ramen_image_reference "${ramen_manager_image_registry_address-localhost}" operator) -exit_stack_push unset -v ramen_manager_image_reference -ramen_manager_image_build() -{ -# ENV variable to skip building ramen -# - expects docker image named: -# [$ramen_manager_image_registry_address/][$ramen_image_directory_name/]ramen-operator:$ramen_image_tag - if test "${skip_ramen_build:-false}" != false; then - return - fi - if true; then - . ${ramen_hack_directory_path_name}/docker-install.sh; docker_install ${HOME}/.local/bin; unset -f docker_install - DOCKERCMD=docker - else - ${ramen_hack_directory_path_name}/docker-uninstall.sh ${HOME}/.local/bin - . ${ramen_hack_directory_path_name}/podman-docker-install.sh - DOCKERCMD=podman - fi - . ${ramen_hack_directory_path_name}/go-install.sh; go_install ${HOME}/.local; unset -f go_install - make -C $ramen_directory_path_name docker-build IMG=$ramen_manager_image_reference DOCKERCMD=$DOCKERCMD DOCKER_HOST=$DOCKER_HOST - unset -v DOCKERCMD -} -exit_stack_push unset -f ramen_manager_image_build -ramen_manager_image_archive() -{ - image_archive $ramen_manager_image_reference -} -exit_stack_push unset -f ramen_manager_image_archive -ramen_manager_image_build_and_archive() -{ - ramen_manager_image_build - ramen_manager_image_archive -} -exit_stack_push unset -f ramen_manager_image_build_and_archive -ramen_manager_image_load_cluster() -{ - image_load_cluster $1 $ramen_manager_image_reference -} -exit_stack_push unset -f ramen_manager_image_load_cluster -ramen_manager_image_remove_cluster() -{ - image_remove_cluster $1 $ramen_manager_image_reference -} -exit_stack_push unset -f ramen_manager_image_remove_cluster -ramen_bundle_image_reference() -{ - ramen_image_reference_registry_local $1-operator-bundle -} -exit_stack_push unset -f ramen_bundle_image_reference -ramen_bundle_image_spoke_reference=$(ramen_bundle_image_reference dr-cluster) -exit_stack_push unset -v ramen_bundle_image_spoke_reference -ramen_bundle_image_build() -{ - make -C $ramen_directory_path_name bundle-$1-build\ - IMG=$ramen_manager_image_reference\ - BUNDLE_IMG_DRCLUSTER=$ramen_bundle_image_spoke_reference\ - IMAGE_TAG=$ramen_image_tag\ - -} -exit_stack_push unset -f ramen_bundle_image_build -ramen_bundle_image_spoke_build() -{ - ramen_bundle_image_build dr-cluster -} -exit_stack_push unset -f ramen_bundle_image_spoke_build -ramen_bundle_image_spoke_push() -{ - podman push --tls-verify=false $ramen_bundle_image_spoke_reference -} -exit_stack_push unset -f ramen_bundle_image_spoke_push -ramen_bundle_image_spoke_archive() -{ - image_archive $ramen_bundle_image_spoke_reference -} -exit_stack_push unset -f ramen_bundle_image_spoke_archive -ramen_bundle_image_spoke_load_cluster() -{ - image_load_cluster $1 $ramen_bundle_image_spoke_reference -} -exit_stack_push unset -f ramen_bundle_image_spoke_load_cluster -ramen_bundle_image_spoke_remove_cluster() -{ - image_and_containers_exited_using_remove_cluster $1 $ramen_bundle_image_spoke_reference -} -exit_stack_push unset -f ramen_bundle_image_spoke_remove_cluster -ramen_bundle_image_spoke_push_cluster() -{ - image_push_cluster $1 $ramen_bundle_image_spoke_reference -} -exit_stack_push unset -f ramen_bundle_image_spoke_push_cluster -ramen_catalog_image_reference=$(ramen_image_reference_registry_local operator-catalog) -exit_stack_push unset -v ramen_catalog_image_reference -ramen_catalog_image_build() -{ - make -C $ramen_directory_path_name catalog-build\ - BUNDLE_IMGS=$1\ - BUNDLE_PULL_TOOL=none\ --skip-tls\ - CATALOG_IMG=$ramen_catalog_image_reference\ - -} -exit_stack_push unset -f ramen_catalog_image_build -ramen_catalog_image_spoke_build() -{ - ramen_catalog_image_build $ramen_bundle_image_spoke_reference -} -exit_stack_push unset -f ramen_catalog_image_spoke_build -ramen_catalog_image_archive() -{ - image_archive $ramen_catalog_image_reference -} -exit_stack_push unset -f ramen_catalog_image_archive -ramen_catalog_image_load_cluster() -{ - image_load_cluster $1 $ramen_catalog_image_reference -} -exit_stack_push unset -f ramen_catalog_image_load_cluster -ramen_catalog_image_remove_cluster() -{ - image_remove_cluster $1 $ramen_catalog_image_reference -} -exit_stack_push unset -f ramen_catalog_image_remove_cluster -ramen_catalog_image_push_cluster() -{ - image_push_cluster $1 $ramen_catalog_image_reference -} -exit_stack_push unset -f ramen_catalog_image_push_cluster -ramen_images_build() -{ - ramen_manager_image_build - ramen_bundle_image_spoke_build - image_registry_container_deploy_localhost - exit_stack_push image_registry_container_undeploy_localhost - ramen_bundle_image_spoke_push - ramen_catalog_image_spoke_build - exit_stack_pop -} -exit_stack_push unset -f ramen_images_build -ramen_images_archive() -{ - ramen_manager_image_archive - ramen_bundle_image_spoke_archive - ramen_catalog_image_archive -} -exit_stack_push unset -f ramen_images_archive -ramen_images_build_and_archive() -{ - ramen_images_build - ramen_images_archive -} -exit_stack_push unset -f ramen_images_build_and_archive -ramen_images_load_spoke() -{ - ramen_manager_image_load_cluster $1 - ramen_bundle_image_spoke_load_cluster $1 - ramen_catalog_image_load_cluster $1 -} -exit_stack_push unset -f ramen_images_load_spoke -ramen_images_push_spoke() -{ - ramen_bundle_image_spoke_push_cluster $1 - ramen_catalog_image_push_cluster $1 -} -exit_stack_push unset -f ramen_images_push_spoke -ramen_images_deploy_spoke() -{ - ramen_images_load_spoke $1 - image_registry_deploy_cluster $1 - ramen_images_push_spoke $1 -} -exit_stack_push unset -f ramen_images_deploy_spoke -ramen_images_undeploy_spoke_common() -{ - image_registry_undeploy_cluster $1 - ramen_catalog_image_remove_cluster $1 - ramen_bundle_image_spoke_remove_cluster $1 -} -exit_stack_push unset -f ramen_images_undeploy_spoke_common -ramen_images_undeploy_spoke_nonhub() -{ - ramen_images_undeploy_spoke_common $1 - ramen_manager_image_remove_cluster $1 -} -exit_stack_push unset -f ramen_images_undeploy_spoke_nonhub -ramen_images_undeploy_spoke_hub() -{ - ramen_images_undeploy_spoke_common $1 -} -exit_stack_push unset -f ramen_images_undeploy_spoke_hub -ramen_images_deploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do ramen_images_deploy_spoke $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f ramen_images_deploy_spokes -ramen_images_undeploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do ramen_images_undeploy_spoke $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f ramen_images_undeploy_spokes -ramen_catalog_kubectl() -{ - cat <<-a | kubectl --context $1 $2 -f - - kind: CatalogSource - apiVersion: operators.coreos.com/v1alpha1 - metadata: - name: ramen-catalog - namespace: ramen-system - spec: - sourceType: grpc - image: $ramen_catalog_image_reference - displayName: "Ramen Operators" - a -} -exit_stack_push unset -f ramen_catalog_deploy_cluster -ramen_catalog_deploy_cluster() -{ - ramen_catalog_kubectl $1 apply - until_true_or_n 30 eval test \"\$\(kubectl --context $1 -n ramen-system get catalogsources.operators.coreos.com/ramen-catalog -ojsonpath='{.status.connectionState.lastObservedState}'\)\" = READY -} -exit_stack_push unset -f ramen_catalog_deploy_cluster -ramen_catalog_undeploy_cluster() -{ - ramen_catalog_kubectl $1 delete - true_if_exit_status_and_stderr 1 'error: no matching resources found' \ - kubectl --context $1 -n ramen-system wait catalogsources.operators.coreos.com/ramen-catalog --for delete -} -exit_stack_push unset -f ramen_catalog_undeploy_cluster -kube_context_set() -{ - exit_stack_push kubectl config use-context $(kubectl config current-context) - kubectl config use-context ${1} -} -exit_stack_push unset -f kube_context_set -kube_context_set_undo() -{ - exit_stack_pop -} -exit_stack_push unset -f kube_context_set_undo -ramen_deploy_hub_or_spoke() -{ - ramen_manager_image_load_cluster $1 - . $ramen_hack_directory_path_name/go-install.sh; go_install $HOME/.local; unset -f go_install - kube_context_set $1 - make -C $ramen_directory_path_name deploy-$2 IMG=$ramen_manager_image_reference - kube_context_set_undo - kubectl --context $1 -n ramen-system wait deployments --all --for condition=available --timeout 2m - ramen_config_deploy_hub_or_spoke $1 $2 -} -exit_stack_push unset -f ramen_deploy_hub_or_spoke -ramen_s3_secret_kubectl_cluster() -{ - cat <<-EOF | kubectl --context $1 $2 -f - - apiVersion: v1 - kind: Secret - metadata: - name: s3secret - namespace: ramen-system - stringData: - AWS_ACCESS_KEY_ID: minio - AWS_SECRET_ACCESS_KEY: minio123 - EOF -} -exit_stack_push unset -f ramen_s3_secret_kubectl_cluster -ramen_s3_secret_deploy_cluster() -{ - ramen_s3_secret_kubectl_cluster $1 apply -} -exit_stack_push unset -f ramen_s3_secret_deploy_cluster -ramen_s3_secret_undeploy_cluster() -{ - ramen_s3_secret_kubectl_cluster $1 delete\ --ignore-not-found -} -exit_stack_push unset -f ramen_s3_secret_undeploy_cluster -ramen_s3_secret_distribution_enabled=${ramen_s3_secret_distribution_enabled-true} -exit_stack_push unset -v ramen_s3_secret_distribution_enabled -ramen_s3_secret_deploy_cluster_wait() -{ - until_true_or_n 30 kubectl --context $1 -n ramen-system get secret/s3secret -} -exit_stack_push unset -f ramen_s3_secret_deploy_cluster_wait -ramen_s3_secret_undeploy_cluster_wait() -{ - true_if_exit_status_and_stderr 1 'error: no matching resources found' \ - kubectl --context $1 -n ramen-system wait secret/s3secret --for delete -} -exit_stack_push unset -f ramen_s3_secret_undeploy_cluster_wait -if test ramen_s3_secret_distribution_enabled = true; then - secret_function_name_suffix=_wait -else - secret_function_name_suffix= -fi -exit_stack_push unset -v secret_function_name_suffix -ramen_config_map_name() -{ - echo ramen-$1-operator-config -} -exit_stack_push unset -f ramen_config_map_name -ramen_config_file_path_name() -{ - echo $ramen_directory_path_name/config/$1/manager/ramen_manager_config.yaml -} -exit_stack_push unset -f ramen_config_file_path_name -ramen_config_replace_hub_or_spoke() -{ - kubectl create configmap $(ramen_config_map_name $2) --from-file=$3 -o yaml --dry-run=client |\ - kubectl --context $1 -n ramen-system replace -f - -} -exit_stack_push unset -f ramen_config_replace_hub_or_spoke -ramen_config_deploy_hub_or_spoke() -{ - ramen_s3_secret_deploy_cluster $1 - until_true_or_n 90 kubectl --context $1 -n ramen-system get configmap $(ramen_config_map_name $2) - set -- $1 $2 /tmp/$USER/ramen/$2 - mkdir -p $3 - set -- $1 $2 $3/ramen_manager_config.yaml - cat $(ramen_config_file_path_name $2) - <<-EOF >$3 - s3StoreProfiles: - $(for cluster_name in $spoke_cluster_names; do cat <<-b - - s3ProfileName: minio-on-$cluster_name - s3Bucket: bucket - s3CompatibleEndpoint: $(minikube_minio_url $cluster_name) - s3Region: us-east-1 - s3SecretRef: - name: s3secret - namespace: ramen-system - veleroNamespaceSecretKeyRef: - name: s3secret - key: aws - b - done;unset -v cluster_name) - drClusterOperator: - deploymentAutomationEnabled: true - s3SecretDistributionEnabled: $ramen_s3_secret_distribution_enabled - kubeObjectProtection: - extraVrgNamespacesFeatureEnabled: true - EOF - ramen_config_replace_hub_or_spoke $1 $2 $3 -} -exit_stack_push unset -f ramen_config_deploy_hub_or_spoke -ramen_config_undeploy_hub_or_spoke() -{ - ramen_config_replace_hub_or_spoke $1 $2 $(ramen_config_file_path_name $2) - ramen_s3_secret_undeploy_cluster $1 -} -exit_stack_push unset -f ramen_config_undeploy_hub_or_spoke -ramen_deploy_hub() -{ - ramen_deploy_hub_or_spoke $hub_cluster_name hub - ramen_samples_channel_and_drpolicy_deploy -} -exit_stack_push unset -f ramen_deploy_hub -ramen_deploy_spoke() -{ - volsync_crds_deploy $1 - ramen_recipe_crd_deploy $1 - ramen_deploy_hub_or_spoke $1 dr-cluster -} -exit_stack_push unset -f ramen_deploy_spoke -ramen_undeploy_hub_or_spoke() -{ - ramen_config_undeploy_hub_or_spoke $1 $2 - kube_context_set $1 - make -C $ramen_directory_path_name undeploy-$2 - kube_context_set_undo - ramen_manager_image_remove_cluster $1 -} -exit_stack_push unset -f ramen_undeploy_hub_or_spoke -ramen_undeploy_hub() -{ - ramen_samples_channel_and_drpolicy_undeploy - ramen_undeploy_hub_or_spoke $hub_cluster_name hub -} -exit_stack_push unset -f ramen_undeploy_hub -ramen_undeploy_spoke() -{ - ramen_undeploy_hub_or_spoke $1 dr-cluster - ramen_recipe_crd_undeploy $1 - volsync_crds_undeploy $1 -} -exit_stack_push unset -f ramen_undeploy_spoke -ramen_deploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do ramen_deploy_spoke $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f ramen_deploy_spokes -ramen_undeploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do ramen_undeploy_spoke $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f ramen_undeploy_spokes -volsync_crds_deploy() { - volsync_crds_kubectl $1 apply -}; exit_stack_push unset -f volsync_crds_deploy -volsync_crds_undeploy() { - volsync_crds_kubectl $1 delete\ --ignore-not-found -}; exit_stack_push unset -f volsync_crds_undeploy -volsync_crds_kubectl() { - kubectl --context $1 $2\ - -f https://raw.githubusercontent.com/backube/volsync/main/config/crd/bases/volsync.backube_replicationdestinations.yaml\ - -f https://raw.githubusercontent.com/backube/volsync/main/config/crd/bases/volsync.backube_replicationsources.yaml\ - -}; exit_stack_push unset -f volsync_crds_kubectl -ramen_recipe_crd_deploy() { - ramen_recipe_crd_kubectl $1 apply -}; exit_stack_push unset -f ramen_recipe_crd_deploy -ramen_recipe_crd_undeploy() { - ramen_recipe_crd_kubectl $1 delete\ --ignore-not-found -}; exit_stack_push unset -f ramen_recipe_crd_undeploy -ramen_recipe_crd_kubectl() { - kubectl --context $1 $2 -f https://raw.githubusercontent.com/RamenDR/recipe/main/config/crd/bases/ramendr.openshift.io_recipes.yaml -}; exit_stack_push unset -f ramen_recipe_crd_kubectl -olm_deploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do olm_deploy $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f olm_deploy_spokes -olm_undeploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do olm_undeploy $cluster_name; done; unset -v cluster_name -} -exit_stack_push unset -f olm_undeploy_spokes -ocm_ramen_samples_git_ref=${ocm_ramen_samples_git_ref-main} -ocm_ramen_samples_git_path=${ocm_ramen_samples_git_path-ramendr} -exit_stack_push unset -v ocm_ramen_samples_git_ref -exit_stack_push unset -v ocm_ramen_samples_git_path -ramen_samples_channel_and_drpolicy_deploy() -{ - for cluster_name in $spoke_cluster_names; do - ramen_images_deploy_spoke $cluster_name - done; unset -v cluster_name - set -- ocm-ramen-samples/subscriptions - set -- /tmp/$USER/$1 $1 $spoke_cluster_names - mkdir -p $1 - cat <<-a >$1/kustomization.yaml - resources: - - https://github.com/$ocm_ramen_samples_git_path/$2?ref=$ocm_ramen_samples_git_ref - patchesJson6902: - - target: - group: ramendr.openshift.io - version: v1alpha1 - kind: DRCluster - name: hub - patch: |- - - op: add - path: /spec/region - value: $4 - - op: replace - path: /spec/s3ProfileName - value: minio-on-$4 - - op: replace - path: /metadata/name - value: $4 - - target: - group: ramendr.openshift.io - version: v1alpha1 - kind: DRCluster - name: cluster1 - patch: |- - - op: add - path: /spec/region - value: $3 - - op: replace - path: /spec/s3ProfileName - value: minio-on-$3 - - op: replace - path: /metadata/name - value: $3 - - target: - group: ramendr.openshift.io - version: v1alpha1 - kind: DRPolicy - name: dr-policy - patch: |- - - op: replace - path: /spec/drClusters - value: - - $3 - - $4 - a - kubectl --context $hub_cluster_name apply -k $1 - for cluster_name in $spoke_cluster_names; do - until_true_or_n 300 kubectl --context $cluster_name get namespaces/ramen-system - ramen_catalog_deploy_cluster $cluster_name - until_true_or_n 300 kubectl --context $cluster_name -n ramen-system wait deployments ramen-dr-cluster-operator --for condition=available --timeout 0 - ramen_s3_secret_deploy_cluster$secret_function_name_suffix $cluster_name - done; unset -v cluster_name - kubectl --context $hub_cluster_name -n ramen-samples get channels/ramen-gitops -} -exit_stack_push unset -f ramen_samples_channel_and_drpolicy_deploy -ramen_samples_channel_and_drpolicy_undeploy() -{ - set -- - for cluster_name in $spoke_cluster_names; do -#apiVersion: operators.coreos.com/v1alpha1 -#kind: Subscription -#metadata: -# name: ramen-dr-cluster-subscription -# namespace: ramen-system -#spec: -# channel: alpha -# installPlanApproval: Automatic -# name: ramen-dr-cluster-operator -# source: ramen-catalog -# sourceNamespace: ramen-system -# startingCSV: ramen-dr-cluster-operator.v0.0.1 -#status -# conditions: -# - message: 'constraints not satisfiable: subscription ramen-dr-cluster-subscription -# requires ramen-catalog/ramen-system/alpha/ramen-dr-cluster-operator.v0.0.1, -# subscription ramen-dr-cluster-subscription exists, clusterserviceversion ramen-dr-cluster-operator.v0.0.1 -# exists and is not referenced by a subscription, @existing/ramen-system//ramen-dr-cluster-operator.v0.0.1 -# and ramen-catalog/ramen-system/alpha/ramen-dr-cluster-operator.v0.0.1 provide -# VolumeReplicationGroup (ramendr.openshift.io/v1alpha1)' -# reason: ConstraintsNotSatisfiable -# status: "True" -# type: ResolutionFailed -#status.installedCSV\ - set -- $# "$@" $(kubectl --context $cluster_name -n ramen-system get subscriptions.operators.coreos.com/ramen-dr-cluster-subscription -ojsonpath=\{.\ -spec.startingCSV\ -\}); test $(($1+2)) -eq $#; shift - ramen_catalog_undeploy_cluster $cluster_name - done; unset -v cluster_name - date - kubectl --context $hub_cluster_name delete -k https://github.com/$ocm_ramen_samples_git_path/ocm-ramen-samples/subscriptions?ref=$ocm_ramen_samples_git_ref - date - for cluster_name in $spoke_cluster_names; do - date - kubectl --context $cluster_name -n ramen-system delete clusterserviceversions.operators.coreos.com/$1 --ignore-not-found - shift - date - true_if_exit_status_and_stderr 1 'error: no matching resources found' \ - kubectl --context $cluster_name -n ramen-system wait deployments ramen-dr-cluster-operator --for delete - date - # TODO remove once drpolicy controller does this - kubectl --context $cluster_name delete customresourcedefinitions.apiextensions.k8s.io/volumereplicationgroups.ramendr.openshift.io - date - done; unset -v cluster_name - for cluster_name in $spoke_cluster_names_nonhub; do - date - ramen_s3_secret_undeploy_cluster$secret_function_name_suffix $cluster_name - date - true_if_exit_status_and_stderr 1 'error: no matching resources found' \ - kubectl --context $cluster_name wait namespaces/ramen-system --for delete --timeout 2m - date - ramen_images_undeploy_spoke_nonhub $cluster_name - done; unset -v cluster_name - for cluster_name in $spoke_cluster_names_hub; do - ramen_images_undeploy_spoke_hub $cluster_name - done; unset -v cluster_name -} -exit_stack_push unset -f ramen_samples_channel_and_drpolicy_undeploy -application_sample_place() -{ - set -- $1 "$2" $3 $4 "$5" $6 ocm-ramen-samples subscriptions/busybox - set -- $1 "$2" $3 https://$4/$ocm_ramen_samples_git_path/$7$5/$8$6 /tmp/$USER/$7/$8 - mkdir -p $5 - cat <<-a >$5/kustomization.yaml - resources: - - $4 - namespace: busybox-sample - patchesJson6902: - - target: - group: ramendr.openshift.io - version: v1alpha1 - kind: DRPlacementControl - name: busybox-drpc - patch: |- - - op: add - path: /spec/action - value: $2 - - op: add - path: /spec/$3Cluster - value: $1 - a - kubectl --context $hub_cluster_name apply -k $5 - until_true_or_n 90 eval test \"\$\(kubectl --context ${hub_cluster_name} -n busybox-sample get subscriptions/busybox-sub -ojsonpath='{.status.phase}'\)\" = Propagated - until_true_or_n 120 eval test \"\$\(kubectl --context $hub_cluster_name -n busybox-sample get placementrules/busybox-placement -ojsonpath='{.status.decisions[].clusterName}'\)\" = $1 - if test ${1} = ${hub_cluster_name}; then - subscription_name_suffix=-local - else - unset -v subscription_name_suffix - fi - until_true_or_n 30 eval test \"\$\(kubectl --context ${1} -n busybox-sample get subscriptions/busybox-sub${subscription_name_suffix} -ojsonpath='{.status.phase}'\)\" = Subscribed - unset -v subscription_name_suffix - until_true_or_n 120 kubectl --context $1 -n busybox-sample wait pods/busybox --for condition=ready --timeout 0 - until_true_or_n 30 eval test \"\$\(kubectl --context ${1} -n busybox-sample get persistentvolumeclaims/busybox-pvc -ojsonpath='{.status.phase}'\)\" = Bound - date - until_true_or_n 90 kubectl --context ${1} -n busybox-sample get volumereplicationgroups/busybox-drpc - date -} -exit_stack_push unset -f application_sample_place -application_sample_undeploy_wait_and_namespace_undeploy() -{ - date - true_if_exit_status_and_stderr 1 'error: no matching resources found' \ - kubectl --context ${1} -n busybox-sample wait pods/busybox --for delete --timeout 2m - date - true_if_exit_status_and_stderr 1 'error: no matching resources found' \ - kubectl --context $1 -n busybox-sample wait volumereplicationgroups/busybox-drpc --for delete --timeout 2m - date - true_if_exit_status_and_stderr 1 'error: no matching resources found' \ - kubectl --context $1 -n busybox-sample wait persistentvolumeclaims/busybox-pvc --for delete --timeout 2m - # TODO remove once drplacement controller does this - kubectl --context $hub_cluster_name -n $1 delete manifestworks/busybox-drpc-busybox-sample-ns-mw #--ignore-not-found - true_if_exit_status_and_stderr 1 'error: no matching resources found' \ - kubectl --context $1 wait namespace/busybox-sample --for delete -} -exit_stack_push unset -f application_sample_undeploy_wait_and_namespace_undeploy -application_sample_deploy() -{ - set -- $spoke_cluster_names - application_sample_place $1 '' preferred github.com '' \?ref=$ocm_ramen_samples_git_ref -} -exit_stack_push unset -f application_sample_deploy -application_sample_failover() -{ - set -- $spoke_cluster_names - application_sample_place $2 Failover failover raw.githubusercontent.com /$ocm_ramen_samples_git_ref /drpc.yaml - application_sample_undeploy_wait_and_namespace_undeploy $1 -} -exit_stack_push unset -f application_sample_failover -application_sample_relocate() -{ - set -- $spoke_cluster_names - application_sample_place $1 Relocate preferred raw.githubusercontent.com /$ocm_ramen_samples_git_ref /drpc.yaml - application_sample_undeploy_wait_and_namespace_undeploy $2 -} -exit_stack_push unset -f application_sample_relocate -application_sample_undeploy() -{ - set -- $(kubectl --context ${hub_cluster_name} -n busybox-sample get placementrules/busybox-placement -ojsonpath='{.status.decisions[].clusterName}') - kubectl --context $hub_cluster_name delete -k https://github.com/$ocm_ramen_samples_git_path/ocm-ramen-samples/subscriptions/busybox?ref=$ocm_ramen_samples_git_ref - application_sample_undeploy_wait_and_namespace_undeploy $1 -} -exit_stack_push unset -f application_sample_undeploy -ramen_directory_path_name=${ramen_hack_directory_path_name}/.. -exit_stack_push unset -v ramen_directory_path_name -hub_cluster_name=${hub_cluster_name-hub} -exit_stack_push unset -v hub_cluster_name -spoke_cluster_names=${spoke_cluster_names-cluster1\ $hub_cluster_name} -exit_stack_push unset -v spoke_cluster_names -for cluster_name in $spoke_cluster_names; do - if test $cluster_name = $hub_cluster_name; then - spoke_cluster_names_hub=$spoke_cluster_names_hub\ $cluster_name - else - spoke_cluster_names_nonhub=$spoke_cluster_names_nonhub\ $cluster_name - fi -done; unset -v cluster_name -exit_stack_push unset -v spoke_cluster_names_hub -exit_stack_push unset -v spoke_cluster_names_nonhub -rook_ceph_deploy() -{ - # volumes required: mirror sources, mirror targets, minio backend - for cluster_name in $spoke_cluster_names; do - rook_ceph_deploy_spoke $cluster_name - done; unset -v cluster_name - rook_ceph_mirrors_deploy $spoke_cluster_names -} -exit_stack_push unset -f rook_ceph_deploy -rook_ceph_undeploy() -{ - for cluster_name in $spoke_cluster_names; do - rook_ceph_undeploy_spoke $cluster_name - done; unset -v cluster_name -} -exit_stack_push unset -f rook_ceph_undeploy -rook_ceph_csi_image_canary_deploy() -{ - for cluster_name in $spoke_cluster_names; do - minikube -p $cluster_name ssh -- docker image pull quay.io/cephcsi/cephcsi:canary - kubectl --context $cluster_name -n rook-ceph rollout restart deploy/csi-rbdplugin-provisioner - done; unset -v cluster_name -} -exit_stack_push unset -f rook_ceph_csi_image_canary_deploy -rook_ceph_volume_replication_image_latest_deploy() -{ - for cluster_name in $spoke_cluster_names; do - minikube -p $cluster_name ssh -- docker image pull quay.io/csiaddons/volumereplication-operator:latest - kubectl --context $cluster_name -n rook-ceph rollout restart deploy/csi-rbdplugin-provisioner - done; unset -v cluster_name -} -exit_stack_push unset -f rook_ceph_volume_replication_image_latest_deploy -cert_manager_deploy() { - for cluster_name in $hub_cluster_name $spoke_cluster_names_nonhub; do cert_manager_deploy_context $cluster_name; done; unset -v cluster_name -}; exit_stack_push unset -f cert_manager_deploy -cert_manager_undeploy() { - for cluster_name in $spoke_cluster_names_nonhub $hub_cluster_name; do cert_manager_undeploy_context $cluster_name; done; unset -v cluster_name -}; exit_stack_push unset -f cert_manager_undeploy -ramen_deploy() -{ - ramen_deploy_hub -} -exit_stack_push unset -f ramen_deploy -ramen_undeploy() -{ - ramen_undeploy_hub -} -exit_stack_push unset -f ramen_undeploy -deploy() -{ - hub_cluster_name=$hub_cluster_name spoke_cluster_names=$spoke_cluster_names $ramen_hack_directory_path_name/ocm-minikube.sh - rook_ceph_deploy - cert_manager_deploy - minio_deploy_spokes - ramen_images_build_and_archive - olm_deploy_spokes - ramen_deploy -} -exit_stack_push unset -f deploy -undeploy() -{ - ramen_undeploy - olm_undeploy_spokes - minio_undeploy_spokes - cert_manager_undeploy - rook_ceph_undeploy -} -exit_stack_push unset -f undeploy -exit_stack_push unset -v command -for command in "${@:-deploy}"; do - set -x - $command - { set +x;} 2>/dev/null -done diff --git a/hack/ocm-minikube.sh b/hack/ocm-minikube.sh deleted file mode 100755 index f9b8865c9..000000000 --- a/hack/ocm-minikube.sh +++ /dev/null @@ -1,983 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=1090,1091,2046,2086 - -# open cluster management (ocm) hub and managed minikube kvm amd64 clusters deploy -# https://github.com/ShyamsundarR/ocm-minikube/README.md - -set -x -set -e -ramen_hack_directory_path_name=$(dirname $0) -. $ramen_hack_directory_path_name/exit_stack.sh -. $ramen_hack_directory_path_name/true_if_exit_status_and_stderr.sh -exit_stack_push unset -f true_if_exit_status_and_stderr - -mkdir -p ${HOME}/.local/bin -PATH=${HOME}/.local/bin:${PATH} - -${ramen_hack_directory_path_name}/minikube-install.sh ${HOME}/.local/bin -# 1.11 wait support -# 1.19 certificates.k8s.io/v1 https://github.com/kubernetes/kubernetes/pull/91685 -# 1.21 kustomize v4.0.5 https://github.com/kubernetes-sigs/kustomize#kubectl-integration -${ramen_hack_directory_path_name}/kubectl-install.sh ${HOME}/.local/bin 1 21 -${ramen_hack_directory_path_name}/kustomize-install.sh ${HOME}/.local/bin -. ${ramen_hack_directory_path_name}/until_true_or_n.sh -# TODO registration-operator go version minimum determine programatically -. ${ramen_hack_directory_path_name}/go-install.sh; go_install ${HOME}/.local 1.15.2; unset -f go_install -. $ramen_hack_directory_path_name/git-checkout.sh -exit_stack_push git_checkout_unset -. $ramen_hack_directory_path_name/github-url.sh -exit_stack_push github_url_unset -unset -v ramen_hack_directory_path_name - -ensure_libvirt_default_network_exists() -{ - if ! virsh net-dumpxml default >/dev/null 2>&1 ; then - echo 'libvirt network default is required as it is used as the common network for all the minikube instances' - exit 1 - fi -} -exit_stack_push unset -f ensure_libvirt_default_network_exists -minikube_validate() -{ - if ! command -v virsh; then - # https://minikube.sigs.k8s.io/docs/drivers/kvm2/ - . /etc/os-release # NAME - case ${NAME} in - "Red Hat Enterprise Linux Server") - # https://access.redhat.com/articles/1344173#Q_how-install-virtualization-packages - sudo yum install libvirt -y - ;; - "Ubuntu") - # https://help.ubuntu.com/community/KVM/Installation - sudo apt-get update - if false # test ${VERSION_ID} -ge "18.10" - then - sudo apt-get install qemu-kvm libvirt-daemon-system libvirt-clients bridge-utils -y - else - sudo apt-get install qemu-kvm libvirt-bin ubuntu-vm-builder bridge-utils -y - fi - # shellcheck disable=SC2012 - sudo adduser ${LOGNAME} $(ls -l /var/run/libvirt/libvirt-sock|cut -d\ -f4) - echo 'relogin for permission to access /var/run/libvirt/libvirt-sock, then rerun' - false; exit - ;; - esac - fi - ensure_libvirt_default_network_exists -} -exit_stack_push unset -f minikube_validate -minikube_start() -{ - minikube_validate - minikube start --driver=kvm2 --network=default --profile=$1 $2 -} -exit_stack_push unset -f minikube_start -minikube_start_hub() -{ - minikube_start $hub_cluster_name --cpus=4 - hub_kubeconfig_file_create -} -exit_stack_push unset -f minikube_start_hub -minikube_start_spokes() -{ - for cluster_name in $spoke_cluster_names_nonhub; do - minikube_start $cluster_name - done; unset -v cluster_name -} -exit_stack_push unset -f minikube_start_spokes -minikubes_start() -{ - minikube_start_hub - minikube_start_spokes -} -exit_stack_push unset -f minikubes_start -minikubes_delete() -{ - for cluster_name in $spoke_cluster_names_nonhub $hub_cluster_name; do - minikube delete -p $cluster_name - done; unset -v cluster_name -} -exit_stack_push unset -f minikubes_delete -json6902_test_and_replace_yaml() -{ - printf ' - - op: test - path: %s - value: %s - - op: replace - path: %s - value: %s' $1 "$2" $1 "$3" -} -exit_stack_push unset -f json6902_test_and_replace_yaml -registration_operator_git_ref=c723e190c454110797d89e67bcb33250b35e1fd7 -exit_stack_push unset -v registration_operator_git_ref -registration_operator_image_tag=0.1.0-$registration_operator_git_ref -exit_stack_push unset -v registration_operator_image_tag -registration_operator_file_path_names_cluster_manager=" -crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml -" -registration_operator_file_path_names_klusterlet=" -crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml -" -registration_operator_file_path_names_common=" -rbac/cluster_role.yaml -rbac/cluster_role_binding.yaml -operator/namespace.yaml -operator/service_account.yaml -operator/operator.yaml -" -exit_stack_push unset -v registration_operator_file_path_names_clustermanager -exit_stack_push unset -v registration_operator_file_path_names_klusterlet -exit_stack_push unset -v registration_operator_file_path_names_common -v0_y_0() { echo v0.$1.0; } -y=6 -registration_image_tag=$(v0_y_0 $y) -work_image_tag=$registration_image_tag -placement_image_tag=$(v0_y_0 $((y-3))) -unset -v y -unset -f v0_y_0 -exit_stack_push unset -v placement_image_tag -exit_stack_push unset -v work_image_tag -exit_stack_push unset -v registration_image_tag -registration_operator_file_url() -{ - github_url_file stolostron/registration-operator $1 $registration_operator_git_ref -} -exit_stack_push unset -f registration_operator_file_url -registration_operator_image_spec() -{ - set -- $1 quay.io/open-cluster-management/$1 $2 - json6902_test_and_replace_yaml /spec/$1ImagePullSpec $2 $2:$3 -} -exit_stack_push unset -f registration_operator_image_spec -registration_operator_kubectl() -{ - set -- $1 $2 $3 "$4" "$5"\ - $(registration_operator_kustomization_directory_path_name $2 $1)\ - quay.io/open-cluster-management/registration-operator:\ - - mkdir -p $6 - cat <<-a >$6/kustomization.yaml - resources: - $(for file_path_name in $4 $registration_operator_file_path_names_common; do - echo \ \ -\ $(registration_operator_file_url deploy/$2/config/$file_path_name) - done; unset -v file_path_name) - patchesJson6902: - - target: - group: apps - version: v1 - kind: Deployment - name: $2 - namespace: open-cluster-management - patch: |-\ -$(json6902_test_and_replace_yaml /spec/replicas 3 1)\ -$(json6902_test_and_replace_yaml /spec/template/spec/containers/0/image\ - $7latest\ - $7$registration_operator_image_tag\ -) - a - kubectl --context $1 $5 -k $6 -} -exit_stack_push unset -f registration_operator_kubectl -registration_operator_kustomization_directory_path_name() -{ - echo /tmp/$USER/open-cluster-management/registration/$1/$2 -} -exit_stack_push unset -f registration_operator_kustomization_directory_path_name -registration_operator_cr_kubectl() -{ - set -- $1 $2 $3 $4 "$5" "$6" $(registration_operator_kustomization_directory_path_name $3 $1/cr) - mkdir -p $7 - cat <<-a >$7/kustomization.yaml - resources: - - $(registration_operator_file_url deploy/$3/config/samples/operator_open-cluster-management_$4s.cr.yaml) - patchesJson6902: - - target: - group: operator.open-cluster-management.io - version: v1 - kind: $2 - name: $3 - patch: |-\ -$(registration_operator_image_spec registration $registration_image_tag)\ -$(registration_operator_image_spec work $work_image_tag)\ -$5 - a - kubectl --context $1 $6 -k $7 -} -exit_stack_push unset -f registration_operator_cr_kubectl -registration_operator_deploy_hub_or_spoke() -{ - set -- $1 $2 $3 $4 "$5" "$6" apply - registration_operator_kubectl $1 $3 $4 "$5" $7 - registration_operator_cr_kubectl $1 $2 $3 $4 "$6" $7 -} -exit_stack_push unset -f registration_operator_deploy_hub_or_spoke -registration_operator_undeploy_hub_or_spoke() -{ - set -- $1 $2 $3 $4 "$5" "$6" delete\ --ignore-not-found - registration_operator_cr_kubectl $1 $2 $3 $4 "$6" "$7" - registration_operator_kubectl $1 $3 $4 "$5" "$7" -} -exit_stack_push unset -f registration_operator_undeploy_hub_or_spoke -registration_operator_hub() -{ - registration_operator_$2_hub_or_spoke $1 ClusterManager cluster-manager clustermanager\ - "$registration_operator_file_path_names_cluster_manager"\ - "$(registration_operator_image_spec placement $placement_image_tag)" -} -exit_stack_push unset -f registration_operator_hub -registration_operator_spoke() -{ - registration_operator_$2_hub_or_spoke $1 Klusterlet klusterlet klusterlet\ - "$registration_operator_file_path_names_klusterlet"\ - "$(json6902_test_and_replace_yaml /spec/clusterName cluster1 $1)" -} -exit_stack_push unset -f registration_operator_spoke -registration_operator_deploy_hub() -{ - set -- $hub_cluster_name - registration_operator_hub $1 deploy - date - kubectl --context $1 -n open-cluster-management wait deployments/cluster-manager --for condition=available - date - # https://github.com/kubernetes/kubernetes/issues/83242 - until_true_or_n 120 kubectl --context $1 -n open-cluster-management-hub wait deployments --all --for condition=available --timeout 0 -} -exit_stack_push unset -f registration_operator_deploy_hub -registration_operator_undeploy_hub() -{ - set -- $hub_cluster_name - registration_operator_hub $1 undeploy - date - true_if_exit_status_and_stderr 1 'error: no matching resources found' \ - kubectl --context $1 wait --for delete --timeout 90s namespaces open-cluster-management-hub - date -} -exit_stack_push unset -f registration_operator_undeploy_hub -application_sample_0_deploy() -{ - mkdir -p /tmp/$USER/ocm-minikube - cp -R ocm-minikube/examples /tmp/$USER/ocm-minikube - sed -e "s,KIND_CLUSTER,${1}," -i /tmp/$USER/ocm-minikube/examples/kustomization.yaml - kubectl --context ${hub_cluster_name} apply -k /tmp/$USER/ocm-minikube/examples - condition=ready - condition=initialized - # Failed to pull image "busybox": rpc error: code = Unknown desc = Error response from daemon: toomanyrequests: You have reached your pull rate limit. You may increase the limit by authenticating and upgrading: https://www.docker.com/increase-rate-limit - until_true_or_n 150 kubectl --context ${1} wait pods/hello --for condition=${condition} --timeout 0 - unset -v condition -} -application_sample_0_undeploy() -{ - # delete may exceed 30 seconds - date - kubectl --context ${hub_cluster_name} delete -k /tmp/$USER/ocm-minikube/examples #--wait=false - date - # sed -e "s,${1},KIND_CLUSTER," -i /tmp/$USER/ocm-minikube/examples/kustomization.yaml - set +e - kubectl --context ${1} wait pods/hello --for delete --timeout 0 - # --wait=true error: no matching resources found - # --wait=false error: timed out waiting for the condition on pods/hello - set -e - date -} -application_sample_0_test() -{ - application_sample_0_deploy ${1} - application_sample_0_undeploy ${1} -} -registration_operator_bootstrap_kubectl() -{ - kubectl create namespace open-cluster-management-agent --dry-run=client -o yaml | kubectl --context $1 $2 -f- - set -- $1 "$2" $(registration_operator_kustomization_directory_path_name klusterlet $1)/bootstrap - mkdir -p $3 - cat <<-a >$3/kustomization.yaml - secretGenerator: - - name: bootstrap-hub-kubeconfig - namespace: open-cluster-management-agent - files: - - kubeconfig=hub-kubeconfig - type: Opaque - generatorOptions: - disableNameSuffixHash: true - a - cp -f $hub_kubeconfig_file_path_name $3/hub-kubeconfig - kubectl --context $1 $2 -k $3 -} -exit_stack_push unset -f registration_operator_bootstrap_kubectl -registration_operator_deploy_spoke() -{ - registration_operator_bootstrap_kubectl $1 apply - registration_operator_spoke $1 deploy - date - kubectl --context $1 -n open-cluster-management wait deployments/klusterlet --for condition=available --timeout 60s - date - # https://github.com/kubernetes/kubernetes/issues/83242 - until_true_or_n 90 kubectl --context ${1} -n open-cluster-management-agent wait deployments/klusterlet-registration-agent --for condition=available --timeout 0 - # hub register managed cluster - until_true_or_n 30 kubectl --context ${hub_cluster_name} get managedclusters/${1} - set +e - kubectl --context ${hub_cluster_name} certificate approve $(kubectl --context ${hub_cluster_name} get csr --field-selector spec.signerName=kubernetes.io/kube-apiserver-client --selector open-cluster-management.io/cluster-name=${1} -oname) - # error: one or more CSRs must be specified as or -f - kubectl --context ${hub_cluster_name} patch managedclusters/${1} -p '{"spec":{"hubAcceptsClient":true}}' --type=merge - # Error from server (InternalError): Internal error occurred: failed calling webhook "managedclustermutators.admission.cluster.open-cluster-management.io": the server is currently unable to handle the request - set -e - date - kubectl --context ${hub_cluster_name} wait managedclusters/${1} --for condition=ManagedClusterConditionAvailable - date - kubectl --context $1 -n open-cluster-management-agent wait deployments/klusterlet-work-agent --for condition=available --timeout 90s - date - #application_sample_0_test ${1} -} -exit_stack_push unset -f registration_operator_deploy_spoke -registration_operator_undeploy_spoke() -{ - kubectl --context $hub_cluster_name delete --ignore-not-found managedclusters/$1 namespaces/$1 - registration_operator_spoke $1 undeploy - registration_operator_bootstrap_kubectl $1 delete\ --ignore-not-found -} -exit_stack_push unset -f registration_operator_undeploy_spoke -registration_operator_deploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do - registration_operator_deploy_spoke $cluster_name - done; unset -v cluster_name -} -exit_stack_push unset -f registration_operator_deploy_spokes -registration_operator_undeploy_spokes() -{ - for cluster_name in $spoke_cluster_names; do - registration_operator_undeploy_spoke $cluster_name - done; unset -v cluster_name -} -exit_stack_push unset -f registration_operator_undeploy_spokes -registration_operator_deploy() -{ - registration_operator_deploy_hub - registration_operator_deploy_spokes -} -exit_stack_push unset -f registration_operator_deploy -registration_operator_undeploy() -{ - for cluster_name in $spoke_cluster_names; do - registration_operator_cr_undeploy_spoke $cluster_name - done; unset -v cluster_name - registration_operator_cr_undeploy_hub - for cluster_name in $spoke_cluster_names; do - registration_operator_undeploy_spoke $cluster_name - done; unset -v cluster_name - registration_operator_undeploy_hub -} -registration_operator_undeploy() -{ - registration_operator_undeploy_spokes - test -n spoke_cluster_names_hub && registration_operator_deploy_hub - registration_operator_undeploy_hub -} -exit_stack_push unset -f registration_operator_undeploy -foundation_operator_git_ref=dc43ec703e62594e3942c7f06d38d1897550ffea -exit_stack_push unset -v foundation_operator_git_ref -foundation_operator_image_tag=2.4.0-$foundation_operator_git_ref -exit_stack_push unset -v foundation_operator_image_tag -foundation_operator_file_path_names_hub=" -crds/action.open-cluster-management.io_managedclusteractions.yaml -crds/internal.open-cluster-management.io_managedclusterinfos.yaml -crds/imageregistry.open-cluster-management.io_managedclusterimageregistries.yaml -crds/inventory.open-cluster-management.io_baremetalassets.yaml -crds/view.open-cluster-management.io_managedclusterviews.yaml -crds/hive.openshift.io_syncsets.yaml -crds/hive.openshift.io_clusterdeployments.yaml -crds/hiveinternal.openshift.io_clustersyncs.yaml -crds/hive.openshift.io_clusterclaims.yaml -crds/hive.openshift.io_clusterpools.yaml -clusterrole.yaml -agent-clusterrole.yaml -controller.yaml -" -foundation_operator_file_path_names_hub_excluded=" -proxyserver-apiservice.yaml -proxyserver-service.yaml -proxyserver.yaml -clusterviewv1-apiservice.yaml -clusterviewv1alpha1-apiservice.yaml -" : -foundation_operator_file_path_names_klusterlet=" -agent-addon.yaml -" -exit_stack_push unset -v foundation_operator_file_path_names_hub -exit_stack_push unset -v foundation_operator_file_path_names_klusterlet -foundation_operator_kubectl() -{ - set -- $1 /tmp/$USER/open-cluster-management/foundation/$1/$2 $3 "$4" "$5" - mkdir -p $2 - cat <<-a >$2/kustomization.yaml - resources: - $(for file_path_name in $4; do - echo \ \ -\ $(github_url_file stolostron/multicloud-operators-foundation\ - deploy/foundation/$1/resources/$file_path_name\ - $foundation_operator_git_ref\ - ) - done; unset -v file_path_name) - $5 - a - kubectl --context $hub_cluster_name $3 -k $2 -} -exit_stack_push unset -f foundation_operator_kubectl -foundation_operator_kubectl_hub() -{ - foundation_operator_kubectl hub $hub_cluster_name $1\ - "$foundation_operator_file_path_names_hub"\ - "\ -patchesJson6902: - - target: - group: apps - version: v1 - kind: Deployment - name: ocm-controller - namespace: open-cluster-management - patch: |-\ -$(json6902_test_and_replace_yaml\ - /spec/template/spec/containers/0/image\ - ocm-controller\ - quay.io/open-cluster-management/multicloud-manager:$foundation_operator_image_tag\ - )\ -" -} -exit_stack_push unset -f foundation_operator_kubectl_hub -foundation_operator_kubectl_spoke() -{ - foundation_operator_kubectl klusterlet $1 $2\ - "$foundation_operator_file_path_names_klusterlet"\ - "namespace: $1" -} -exit_stack_push unset -f foundation_operator_kubectl_spoke -foundation_operator_deploy_hub() -{ - foundation_operator_kubectl_hub apply - kubectl --context $hub_cluster_name -n open-cluster-management wait deployments/ocm-controller --for condition=available -} -exit_stack_push unset -f foundation_operator_deploy_hub -foundation_operator_undeploy_hub() -{ - set +e - foundation_operator_kubectl_hub delete - # Error from server (NotFound): error when deleting "/tmp/$USER/open-cluster-management/foundation/hub/hub": services "ocm-proxyserver" not found - # Error from server (NotFound): error when deleting "/tmp/$USER/open-cluster-management/foundation/hub/hub": deployments.apps "ocm-proxyserver" not found - # Error from server (NotFound): error when deleting "/tmp/$USER/open-cluster-management/foundation/hub/hub": apiservices.apiregistration.k8s.io "v1.clusterview.open-cluster-management.io" not found - # Error from server (NotFound): error when deleting "/tmp/$USER/open-cluster-management/foundation/hub/hub": apiservices.apiregistration.k8s.io "v1alpha1.clusterview.open-cluster-management.io" not found - # Error from server (NotFound): error when deleting "/tmp/$USER/open-cluster-management/foundation/hub/hub": apiservices.apiregistration.k8s.io "v1beta1.proxy.open-cluster-management.io" not found - set -e - set +e - kubectl --context $hub_cluster_name -n open-cluster-management wait deployments/ocm-controller --for delete - # error: no matching resources found - set -e -} -exit_stack_push unset -f foundation_operator_undeploy_hub -foundation_operator_deploy_spoke() -{ - foundation_operator_kubectl_spoke $1 apply - until_true_or_n 300 kubectl --context $1 -n open-cluster-management-agent wait deployments/klusterlet-addon-workmgr --for condition=available --timeout 0 -} -exit_stack_push unset -f foundation_operator_deploy_spoke -foundation_operator_undeploy_spoke() -{ - foundation_operator_kubectl_spoke $1 delete - set +e - kubectl --context $1 -n open-cluster-management-agent wait deployments/klusterlet-addon-workmgr --for delete - # error: no matching resources found - set -e -} -exit_stack_push unset -f foundation_operator_undeploy_spoke -foundation_operator_deploy_spokes() -{ - for_each "$spoke_cluster_names" foundation_operator_deploy_spoke -} -exit_stack_push unset -f foundation_operator_deploy_spokes -foundation_operator_undeploy_spokes() -{ - for_each "$spoke_cluster_names" foundation_operator_undeploy_spoke -} -exit_stack_push unset -f foundation_operator_undeploy_spokes -foundation_operator_deploy() -{ - foundation_operator_deploy_hub - foundation_operator_deploy_spokes -} -exit_stack_push unset -f foundation_operator_deploy -foundation_operator_undeploy() -{ - foundation_operator_undeploy_spokes - foundation_operator_undeploy_hub -} -exit_stack_push unset -f foundation_operator_undeploy -subscription_operator_release_name=2.3 -exit_stack_push unset -v subscription_operator_release_name -subscription_operator_git_ref=c48c55969bc4385bc694acd8bc92e5bf4e0181d3 -exit_stack_push unset -v subscription_operator_git_ref -subscription_operator_file_names_deploy_common=" -apps.open-cluster-management.io_channels_crd.yaml -apps.open-cluster-management.io_deployables_crd.yaml -apps.open-cluster-management.io_helmreleases_crd.yaml -apps.open-cluster-management.io_placementrules_crd.yaml -apps.open-cluster-management.io_subscriptions.yaml -clusterrole.yaml -clusterrole_binding.yaml -namespace.yaml -service.yaml -service_account.yaml -" -subscription_operator_file_names_deploy_hub=" -application-operator.yaml -operator.yaml -" -subscription_operator_file_names_deploy_managed=" -operator.yaml -" -subscription_operator_file_names_examples_helmrepo_hub_channel=" -00-namespace.yaml -01-channel.yaml -02-placement.yaml -02-subscription.yaml -" -exit_stack_push unset -v subscription_operator_file_names_deploy_common -exit_stack_push unset -v subscription_operator_file_names_deploy_hub -exit_stack_push unset -v subscription_operator_file_names_deploy_managed -exit_stack_push unset -v subscription_operator_file_names_examples_helmrepo_hub_channel -subscription_operator_file_url() -{ - github_url_file stolostron/multicloud-operators-subscription $1 $subscription_operator_git_ref -} -exit_stack_push unset -f subscription_operator_file_url -subscription_operator_file_urls() -{ - for file_name in $1; do - echo "$3"$(subscription_operator_file_url $2$file_name) - done; unset -v file_name -} -exit_stack_push unset -f subscription_operator_file_urls -subscription_operator_file_urls_kubectl() -{ - kubectl --context $1 $2 $(subscription_operator_file_urls "$3" $4 '-f ') -} -exit_stack_push unset -f subscription_operator_file_urls_kubectl -subscription_operator_file_urls_kustomization() -{ - subscription_operator_file_urls "$1" $2 ' - ' -} -exit_stack_push unset -f subscription_operator_file_urls_kustomization -subscription_operator_kubectl_common() -{ - subscription_operator_file_urls_kubectl $1 $2 "$subscription_operator_file_names_deploy_common" deploy/common/ -} -exit_stack_push unset -f subscription_operator_kubectl_common -subscription_operator_deploy_common() -{ - subscription_operator_kubectl_common $1 apply -} -exit_stack_push unset -f subscription_operator_deploy_common -subscription_operator_undeploy_common() -{ - subscription_operator_kubectl_common $1 delete -} -exit_stack_push unset -f subscription_operator_undeploy_common -subscription_operator_kubectl() -{ - set -- $1 "$2" $3 "$4" "$5" "$6" open-cluster-management multicluster-operators subscription - set -- $1 "$2" $3 "$4" "$5" "$6" $7 $8 $9 /tmp/$USER/$7/$9/$1/$2 - mkdir -p ${10} - cat <<-a >${10}/kustomization.yaml - resources: - $(subscription_operator_file_urls_kustomization "$6" deploy/$1/) - patchesJson6902: - - target: - group: apps - version: v1 - kind: Deployment - name: $8-$9 - namespace: $8 - patch: |-\ -$(json6902_test_and_replace_yaml /metadata/name $8-$9 $8-$9$4)\ -$(json6902_test_and_replace_yaml /spec/template/spec/containers/0/image\ - quay.io/$7/$8-$9:latest\ - quay.io/$7/$8-$9:$subscription_operator_release_name-$subscription_operator_git_ref\ -)\ - $5 - a - kubectl --context $2 $3 -k ${10} -} -exit_stack_push unset -f subscription_operator_kubectl -subscription_operator_kubectl_hub() -{ - subscription_operator_kubectl hub $hub_cluster_name $1 '' '' "$subscription_operator_file_names_deploy_hub" -} -exit_stack_push unset -f subscription_operator_kubectl_hub -subscription_operator_kubectl_spoke() -{ - subscription_operator_kubectl managed $1 $2 "$3" "$4" "$subscription_operator_file_names_deploy_managed" -} -exit_stack_push unset -f subscription_operator_kubectl_spoke -subscription_operator_deploy_hub() -{ - subscription_operator_kubectl_hub apply - date - kubectl --context ${hub_cluster_name} -n multicluster-operators wait deployments --all --for condition=available --timeout 2m - date -} -exit_stack_push unset -f subscription_operator_deploy_hub -subscription_operator_undeploy_hub() -{ - subscription_operator_kubectl_hub delete -} -exit_stack_push unset -f subscription_operator_undeploy_hub -subscription_operator_deploy_spoke() -{ - # https://github.com/open-cluster-management-io/multicloud-operators-subscription/issues/16 - kubectl --context $hub_cluster_name label managedclusters/$1 name=$1 --overwrite - cp -f $hub_kubeconfig_file_path_name /tmp/$USER/kubeconfig - kubectl create secret generic appmgr-hub-kubeconfig --from-file=kubeconfig=/tmp/$USER/kubeconfig --dry-run=client -oyaml|kubectl --context $1 -n multicluster-operators apply -f - - subscription_operator_kubectl_spoke $1 apply "$2" "$(\ - json6902_test_and_replace_yaml /spec/template/spec/containers/0/command/2 --cluster-name='' --cluster-name=$1)$(\ - json6902_test_and_replace_yaml /spec/template/spec/containers/0/command/3 --cluster-namespace='' --cluster-namespace=$1)" - date - kubectl --context $1 -n multicluster-operators wait deployments --all --for condition=available --timeout 1m - date -} -exit_stack_push unset -f subscription_operator_deploy_spoke -subscription_operator_deploy_spoke_hub() -{ - subscription_operator_deploy_spoke $1 -mc - kubectl --context $1 label managedclusters/$1 local-cluster=true --overwrite -} -exit_stack_push unset -f subscription_operator_deploy_spoke_hub -subscription_operator_deploy_spoke_nonhub() -{ - subscription_operator_deploy_spoke $1 '' -} -exit_stack_push unset -f subscription_operator_deploy_spoke_nonhub -subscription_operator_undeploy_spoke() -{ - subscription_operator_kubectl_spoke $1 delete $2 - kubectl --context $1 -n multicluster-operators delete secret appmgr-hub-kubeconfig - kubectl --context $hub_cluster_name label managedclusters/$1 name- -} -exit_stack_push unset -f subscription_operator_undeploy_spoke -subscription_operator_undeploy_spoke_hub() -{ - kubectl --context $1 label managedclusters/$1 local-cluster- - subscription_operator_undeploy_spoke $1 -mc -} -exit_stack_push unset -f subscription_operator_undeploy_spoke_hub -subscription_operator_undeploy_spoke_nonhub() -{ - subscription_operator_undeploy_spoke $1 '' -} -exit_stack_push unset -f subscription_operator_undeploy_spoke_nonhub -subscription_operator_test_kubectl() -{ - set -- $1 $2 /tmp/$USER/open-cluster-management/subscription-test - mkdir -p $3 - cat <<-a >$3/kustomization.yaml - resources: - $(subscription_operator_file_urls_kustomization "$subscription_operator_file_names_examples_helmrepo_hub_channel" examples/helmrepo-hub-channel/) - patchesJson6902: - - target: - group: apps.open-cluster-management.io - version: v1 - kind: PlacementRule - name: nginx-pr - patch: |- - - op: add - path: /spec/clusterSelector - value: {} - - op: add - path: /spec/clusterSelector/matchLabels - value: - name: $1 - a - kubectl --context $hub_cluster_name $2 -k $3 -} -exit_stack_push unset -f subscription_operator_test_kubectl -subscription_operator_test_deploy() -{ - subscription_operator_test_kubectl $1 apply - # https://github.com/kubernetes/kubernetes/issues/83242 - until_true_or_n 90 kubectl --context $1 wait deployments --selector app=nginx-ingress --for condition=available --timeout 0 -} -exit_stack_push unset -f subscription_operator_test_deploy -subscription_operator_test_undeploy() -{ - subscription_operator_test_kubectl $1 delete - set +e - kubectl --context ${1} wait deployments --selector app=nginx-ingress --for delete --timeout 1m - # error: no matching resources found - set -e -} -exit_stack_push unset -f subscription_operator_test_undeploy -subscription_operator_test() -{ - set -- $spoke_cluster_names - subscription_operator_test_deploy $1 - subscription_operator_test_undeploy $1 -} -exit_stack_push unset -f subscription_operator_test -subscription_operator_deploy() -{ - for_each "$hub_cluster_name $spoke_cluster_names_nonhub" subscription_operator_deploy_common - subscription_operator_deploy_hub - for_each "$spoke_cluster_names_hub" subscription_operator_deploy_spoke_hub - for_each "$spoke_cluster_names_nonhub" subscription_operator_deploy_spoke_nonhub -} -exit_stack_push unset -f subscription_operator_deploy -subscription_operator_undeploy() -{ - for_each "$spoke_cluster_names_nonhub" subscription_operator_undeploy_spoke_nonhub - for_each "$spoke_cluster_names_hub" subscription_operator_undeploy_spoke_hub - subscription_operator_undeploy_hub - for_each "$spoke_cluster_names_nonhub $hub_cluster_name" subscription_operator_undeploy_common -} -exit_stack_push unset -f subscription_operator_undeploy -policy_operator_deploy_hub() -{ - # Create the namespace - policy_hub_ns="open-cluster-management" - kubectl --context ${1} get ns ${policy_hub_ns} - if [ $(kubectl --context ${1} get ns ${policy_hub_ns} | grep -c ${policy_hub_ns}) -ne 1 ]; then - kubectl --context ${1} create ns ${policy_hub_ns}; - fi - - # Apply the CRDs - policy_git_path="https://raw.githubusercontent.com/open-cluster-management-io/governance-policy-propagator/main/deploy" - kubectl --context ${1} apply -f ${policy_git_path}/crds/policy.open-cluster-management.io_policies.yaml - kubectl --context ${1} apply -f ${policy_git_path}/crds/policy.open-cluster-management.io_placementbindings.yaml - kubectl --context ${1} apply -f ${policy_git_path}/crds/policy.open-cluster-management.io_policyautomations.yaml - kubectl --context ${1} apply -f ${policy_git_path}/crds/policy.open-cluster-management.io_policysets.yaml - - # Deploy the policy-propagator - kubectl --context ${1} apply -f ${policy_git_path}/operator.yaml -n ${policy_hub_ns} - - # Ensure operator is running - until_true_or_n 300 kubectl --context ${1} -n ${policy_hub_ns} wait deployments/governance-policy-propagator --for condition=available --timeout 0 -} -exit_stack_push unset -f policy_operator_deploy_hub -policy_operator_undeploy_hub() -{ - echo "TODO: Undeploy policy framework on hub!!!" -} -exit_stack_push unset -f policy_operator_undeploy_hub -policy_operator_deploy_spoke() -{ - # Create the namespace - policy_mc_ns="open-cluster-management-agent-addon" - if [ $(kubectl --context ${1} get ns ${policy_mc_ns} | grep -c ${policy_mc_ns}) -ne 1 ]; then - kubectl --context ${1} create ns ${policy_mc_ns}; - fi - - # Create the secret to authenticate with the hub - kubectl --context ${1} -n ${policy_mc_ns} create secret generic hub-kubeconfig --from-file=kubeconfig=${hub_kubeconfig_file_path_name} --dry-run=client -oyaml|kubectl --context ${1} -n ${policy_mc_ns} apply -f - - - # Apply the policy CRD - ocm_git_path="https://raw.githubusercontent.com/open-cluster-management-io" - kubectl --context ${1} apply -f ${ocm_git_path}/governance-policy-propagator/main/deploy/crds/policy.open-cluster-management.io_policies.yaml - - # TODO: Should loop through components and invoke a common routine here - # Deploy the spec synchronization component - policy_component="governance-policy-spec-sync" - kubectl --context ${1} apply -f ${ocm_git_path}/${policy_component}/main/deploy/operator.yaml -n ${policy_mc_ns} - kubectl --context ${1} set env deployment/${policy_component} -n ${policy_mc_ns} --containers="${policy_component}" WATCH_NAMESPACE=${1} - # Ensure operator is running - until_true_or_n 300 kubectl --context ${1} -n ${policy_mc_ns} wait deployments/${policy_component} --for condition=available --timeout 0 - - # Deploy the status synchronization component - policy_component="governance-policy-status-sync" - kubectl --context ${1} apply -f ${ocm_git_path}/${policy_component}/main/deploy/operator.yaml -n ${policy_mc_ns} - kubectl --context ${1} set env deployment/${policy_component} -n ${policy_mc_ns} --containers="${policy_component}" WATCH_NAMESPACE=${1} - # Ensure operator is running - until_true_or_n 300 kubectl --context ${1} -n ${policy_mc_ns} wait deployments/${policy_component} --for condition=available --timeout 0 - - # Deploy the template synchronization component - policy_component="governance-policy-template-sync" - kubectl --context ${1} apply -f ${ocm_git_path}/${policy_component}/main/deploy/operator.yaml -n ${policy_mc_ns} - kubectl --context ${1} set env deployment/${policy_component} -n ${policy_mc_ns} --containers="${policy_component}" WATCH_NAMESPACE=${1} - # Ensure operator is running - until_true_or_n 300 kubectl --context ${1} -n ${policy_mc_ns} wait deployments/${policy_component} --for condition=available --timeout 0 - - # Apply the configuration policy CRD - policy_component="config-policy-controller" - kubectl --context ${1} apply -f ${ocm_git_path}/${policy_component}/main/deploy/crds/policy.open-cluster-management.io_configurationpolicies.yaml - - # Deploy the configuration controller - kubectl --context ${1} apply -f ${ocm_git_path}/${policy_component}/main/deploy/operator.yaml -n ${policy_mc_ns} - kubectl --context ${1} set env deployment/${policy_component} -n ${policy_mc_ns} --containers=${policy_component} WATCH_NAMESPACE=${1} - # Ensure operator is running - until_true_or_n 300 kubectl --context ${1} -n ${policy_mc_ns} wait deployments/${policy_component} --for condition=available --timeout 0 -} -exit_stack_push unset -f policy_operator_deploy_spoke -policy_operator_undeploy_spoke() -{ - echo "TODO: Undeploy policy framework on spoke!!!" -} -exit_stack_push unset -f policy_operator_undeploy_spoke -policy_operator_deploy() -{ - for_each "$hub_cluster_name" policy_operator_deploy_hub - for_each "$spoke_cluster_names" policy_operator_deploy_spoke -} -exit_stack_push unset -f policy_operator_deploy -ocm_application_samples_patch_old_undo() -{ - git_checkout $1 --\ subscriptions/book-import/placementrule.yaml\ subscriptions/book-import/subscription.yaml -} -exit_stack_push unset -f ocm_application_samples_patch_old_undo -ocm_application_samples_patch_apply() -{ - cat <<'a' | git apply --directory $1 $2 - -diff --git a/subscriptions/book-import/kustomization.yaml b/subscriptions/book-import/kustomization.yaml -index 8d35d3a..11d0760 100644 ---- a/subscriptions/book-import/kustomization.yaml -+++ b/subscriptions/book-import/kustomization.yaml -@@ -1,5 +1,4 @@ - resources: - - namespace.yaml --- application.yaml - - placementrule.yaml --- subscription.yaml -\ No newline at end of file -+- subscription.yaml -diff --git a/subscriptions/book-import/placementrule.yaml b/subscriptions/book-import/placementrule.yaml -index ec72faf..293aae9 100644 ---- a/subscriptions/book-import/placementrule.yaml -+++ b/subscriptions/book-import/placementrule.yaml -@@ -9,7 +9,7 @@ metadata: - spec: - clusterSelector: - matchLabels: -- 'usage': 'development' -+ 'usage': 'test' - clusterConditions: - - type: ManagedClusterConditionAvailable - status: "True" -\ No newline at end of file -diff --git a/subscriptions/book-import/subscription.yaml b/subscriptions/book-import/subscription.yaml -index 69fcb6f..affcc9c 100644 ---- a/subscriptions/book-import/subscription.yaml -+++ b/subscriptions/book-import/subscription.yaml -@@ -3,8 +3,8 @@ apiVersion: apps.open-cluster-management.io/v1 - kind: Subscription - metadata: - annotations: -- apps.open-cluster-management.io/git-branch: master -- apps.open-cluster-management.io/git-path: book-import/app -+ apps.open-cluster-management.io/github-branch: main -+ apps.open-cluster-management.io/github-path: book-import - labels: - app: book-import - name: book-import -a -} -exit_stack_push unset -f ocm_application_samples_patch_apply -ocm_application_samples_checkout() -{ - set -- application-samples - git_clone_and_checkout https://github.com/stolostron $1 main 65853af - exit_stack_push git_checkout_undo $1 - ocm_application_samples_patch_old_undo $1 - ocm_application_samples_patch_apply $1 - exit_stack_push ocm_application_samples_patch_apply $1 --reverse -} -exit_stack_push unset -f ocm_application_samples_checkout -ocm_application_samples_checkout_undo() -{ - exit_stack_pop - exit_stack_pop -} -exit_stack_push unset -f ocm_application_samples_checkout_undo -application_sample_deploy() -{ - kubectl --context ${hub_cluster_name} apply -k application-samples/subscriptions/channel - kubectl --context ${hub_cluster_name} label managedclusters/${1} usage=test --overwrite - kubectl --context ${hub_cluster_name} apply -k application-samples/subscriptions/book-import - # https://github.com/kubernetes/kubernetes/issues/83242 - until_true_or_n 30 kubectl --context ${1} -n book-import wait deployments --all --for condition=available --timeout 0 -} -application_sample_undeploy() -{ - kubectl --context ${hub_cluster_name} delete -k application-samples/subscriptions/book-import - date - set +e - kubectl --context ${1} -n book-import wait pods --all --for delete --timeout 1m - # error: no matching resources found - set -e - date - kubectl --context ${1} delete namespaces/book-import - date - kubectl --context ${hub_cluster_name} label managedclusters/${1} usage- - kubectl --context ${hub_cluster_name} delete -k application-samples/subscriptions/channel -} -application_sample_test() -{ - set -- $spoke_cluster_names_nonhub - ocm_application_samples_checkout - application_sample_deploy ${1} - application_sample_undeploy ${1} - ocm_application_samples_checkout_undo -} -hub_cluster_name=${hub_cluster_name:-hub} -spoke_cluster_names=${spoke_cluster_names:-${hub_cluster_name}\ cluster1} -for cluster_name in ${spoke_cluster_names}; do - if test ${cluster_name} = ${hub_cluster_name}; then - spoke_cluster_names_hub=${spoke_cluster_names_hub}\ ${cluster_name} - else - spoke_cluster_names_nonhub=${spoke_cluster_names_nonhub}\ ${cluster_name} - fi -done -for_each() -{ - for x in $1; do - eval $2 $x - done; unset -v x -} -exit_stack_push unset -f for_each -hub_kubeconfig_file_path_name=/tmp/$USER/$hub_cluster_name-config -exit_stack_push unset -v hub_kubeconfig_file_path_name -hub_kubeconfig_file_create() -{ - mkdir -p $(dirname $hub_kubeconfig_file_path_name) - kubectl --context $hub_cluster_name config view --flatten --minify >$hub_kubeconfig_file_path_name -} -exit_stack_push unset -f hub_kubeconfig_file_create -deploy() -{ - minikubes_start - registration_operator_deploy - foundation_operator_deploy - subscription_operator_deploy - policy_operator_deploy -} -exit_stack_push unset -f deploy -undeploy() -{ - policy_operator_undeploy - subscription_operator_undeploy - foundation_operator_undeploy - registration_operator_undeploy - minikubes_delete -} -exit_stack_push unset -f undeploy -exit_stack_push unset -v command -for command in "${@:-deploy}"; do - $command -done -unset -v spoke_cluster_names_nonhub -unset -v spoke_cluster_names_hub -unset -v spoke_cluster_names -unset -v hub_cluster_name -unset -f application_sample_test -unset -f application_sample_undeploy -unset -f application_sample_deploy -unset -f application_sample_0_test -unset -f application_sample_0_undeploy -unset -f application_sample_0_deploy -unset -f until_true_or_n diff --git a/hack/olm.sh b/hack/olm.sh deleted file mode 100644 index 1b4b46b89..000000000 --- a/hack/olm.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=1090,2086,1091 - -OLM_BASE_URL="https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.19.1" - -. "$(dirname $0)"/until_true_or_n.sh - -olm_deploy() -{ - kubectl --context $1 apply -f $OLM_BASE_URL/crds.yaml - kubectl --context $1 wait --for condition=established -f $OLM_BASE_URL/crds.yaml - - kubectl --context $1 apply -f $OLM_BASE_URL/olm.yaml - kubectl --context $1 rollout status -w -n olm deployment/olm-operator - kubectl --context $1 rollout status -w -n olm deployment/catalog-operator - - until_true_or_n 300 eval test \"\$\(kubectl --context $1 get -n olm csv/packageserver -o jsonpath='{.status.phase}'\)\" = Succeeded - - kubectl --context $1 rollout status -w -n olm deployment/packageserver - - kubectl --context $1 delete -n olm catalogsources.operators.coreos.com/operatorhubio-catalog -} - -olm_undeploy() -{ - kubectl --context $1 delete -n olm csv/packageserver #apiservices.apiregistration.k8s.io/v1.packages.operators.coreos.com - kubectl --context $1 delete --ignore-not-found -f $OLM_BASE_URL/olm.yaml - kubectl --context $1 delete -f $OLM_BASE_URL/crds.yaml -} - -olm_unset() -{ -# unset -f until_true_or_n - unset -f olm_unset - unset -f olm_undeploy - unset -f olm_deploy -} diff --git a/hack/podman-docker-install.sh b/hack/podman-docker-install.sh deleted file mode 100755 index 8607f8751..000000000 --- a/hack/podman-docker-install.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2046,2086 -if ! command -v docker; then - $(dirname ${0})/podman-install.sh - # shellcheck disable=1091 - . /etc/os-release - case ${NAME} in - Ubuntu) - IFS=. read -r year month <<-a - ${VERSION_ID} - a - # https://github.com/containers/podman/issues/1553#issuecomment-435984922 - # https://packages.ubuntu.com/search?suite=all&arch=amd64&keywords=podman-docker - if test ${year} -gt 21 || { test ${year} -eq 21 && test ${month} -ge 10; } - then - sudo apt-get -y install podman-docker - else - sudo ln -s /usr/bin/podman /usr/bin/docker - fi - unset -v year month - docker --version - ;; - esac -fi diff --git a/hack/podman-docker-uninstall.sh b/hack/podman-docker-uninstall.sh deleted file mode 100755 index 889c9d9f5..000000000 --- a/hack/podman-docker-uninstall.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2086 -if test "$(file -h /usr/bin/docker)" = '/usr/bin/docker: symbolic link to /usr/bin/podman' -then - # shellcheck disable=1091 - . /etc/os-release - case ${NAME} in - Ubuntu) - IFS=. read -r year month <<-a - ${VERSION_ID} - a - if test ${year} -gt 21 || { test ${year} -eq 21 && test ${month} -ge 10; } - then - sudo apt -y remove podman-docker - else - sudo rm -f /usr/bin/docker - fi - unset -v year month - ;; - esac -fi diff --git a/hack/podman-install.sh b/hack/podman-install.sh deleted file mode 100755 index c0092f6ba..000000000 --- a/hack/podman-install.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2086 -if ! command -v podman; then - # https://podman.io/getting-started/installation#linux-distributions - # shellcheck disable=1091 - . /etc/os-release - case ${NAME} in - Ubuntu) - echo 'deb https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_'${VERSION_ID}'/ /' | sudo tee /etc/apt/sources.list.d/podman.list >/dev/null - curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_${VERSION_ID}/Release.key | sudo apt-key add - - sudo apt-get update - #sudo apt-get -y upgrade - sudo apt-get -y install podman - podman --version - ;; - esac -fi diff --git a/hack/podman-uninstall.sh b/hack/podman-uninstall.sh deleted file mode 100755 index a43228ab9..000000000 --- a/hack/podman-uninstall.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2046,2086 -if command -v podman; then - $(dirname ${0})/podman-docker-uninstall.sh - # shellcheck disable=1091 - . /etc/os-release - case ${NAME} in - Ubuntu) - sudo apt -y remove podman - ;; - esac -fi diff --git a/hack/recipe_e2e/README.md b/hack/recipe_e2e/README.md deleted file mode 100644 index 2000f8f00..000000000 --- a/hack/recipe_e2e/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# Recipe End to End Test Scripts - -This directory contains scripts for testing Recipes with two Minikube clusters. -Currently, these scripts are limited to Asynchronous/Regional DR without ACM/OCM. - -## Setup - -Two Minikube clusters are required. The easiest way to deploy these is with the -`minikube-ramen.sh` scripts: `bash hack/minikube-ramen.sh deploy`. This will create -two clusters with async Rook/Ceph mirroring in KVM storage pools, named `cluster1` -and `cluster2`. Minio will be installed on each cluster, and this will be used for -the Ramen S3 store. - -Once this setup is successful, proceed with the instructions below. - -## Testing - -### First time setup - -Velero and several CRDs are required to use Recipes. Run `setup.sh` on both Minikube -clusters to install and configure these requirements. - -```bash -# working directory: ramen/hack/recipe_e2e - -# setup cluster2 -kubectl config use-context cluster2 -bash scripts/setup.sh - -# setup cluster1 -kubectl config use-context cluster1 -bash scripts/setup.sh -``` - -### E2E Tests - -#### Busybox Application - -The application under test is Busybox. A Busybox application consists of: - -1. Deployment -1. PVC - -These are created in Namespace `recipe-test`. A PV will be created on-demand for -use with the PVC. - -#### Protection of Application - -"Protection" means that Ramen has backed up the Kubernetes resources used by the -Application, as defined in the Recipe used by the VRG. The VRG used for Protection -can be found [here](protect/vrg_busybox_primary.yaml), and the Recipe [here](protect/recipe_busybox.yaml). - -```bash -# deploy and protect application -bash scripts/protect.sh -``` - -#### Failover from Cluster1 to Cluster2 - -At the beginning of this step, the application runs on Cluster1, but not Cluster2. -Cluster1 is "fenced" by setting its VRG to Secondary, then Cluster2 deploys a VRG -as Primary and restores the application as defined in the Recipe Restore Workflow. -The final test is the application running on Cluster2 and not Cluster1. Restore -objects will be present in the S3 store from this sequence. - -Detailed steps can be found -[here](../../docs/vrg-usage.md#failover-application-from-cluster1-to-cluster2). - -```bash -# failover from cluster1 to cluster2 -bash scripts/failover.sh -``` - -#### Failback from Cluster2 to Cluster1 - -At the beginning of this step, the application runs on Cluster2, but not Cluster1. -VRGs will be present on both clusters at the beginning; Cluster2's VRG as Primary -and Cluster1's as Secondary. Cluster1's VRG will be recreated as Secondary, and -Cluster2's VRG will be demoted to Secondary to enable a final sync of replicated -data, then Cluster1's VRG will be promoted to Primary and the application will be -restored. - -Detailed steps can be found -[here](../../docs/vrg-usage.md#failbackrelocate-application-from-cluster2-to-cluster1) - -```bash -# bash failback from cluster2 to cluster1 -bash scripts/failback.sh -``` - -#### Resource Teardown - -After the testing is complete, it may be desirable to reset the clusters to a -pre-failover/pre-failback state. This can be done with the `teardown.sh` script. - -```bash -# teardown cluster1 resources and clear minio-cluster1 s3 contents -bash scripts/teardown.sh cluster1 minio-cluster1 - -# teardown cluster2 resources and clear minio-cluster2 s3 contents -bash scripts/teardown.sh cluster2 minio-cluster2 -``` diff --git a/hack/recipe_e2e/config/credentials-velero-minikube b/hack/recipe_e2e/config/credentials-velero-minikube deleted file mode 100644 index 735edc7c1..000000000 --- a/hack/recipe_e2e/config/credentials-velero-minikube +++ /dev/null @@ -1,3 +0,0 @@ -[default] -aws_access_key_id = minio -aws_secret_access_key = minio123 diff --git a/hack/recipe_e2e/config/ramen_config.yaml b/hack/recipe_e2e/config/ramen_config.yaml deleted file mode 100644 index 794b38539..000000000 --- a/hack/recipe_e2e/config/ramen_config.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: ramen-dr-cluster-operator-config - namespace: ramen-system -data: - ramen_manager_config.yaml: | - s3StoreProfiles: - - s3ProfileName: minio-cluster1 - s3Bucket: velero - s3CompatibleEndpoint: http://192.168.39.173:30000 - s3Region: us-east-1 - s3SecretRef: - name: minio-s3 - namespace: ramen-system - VeleroNamespaceSecretKeyRef: - key: cloud - name: cloud-credentials - - s3ProfileName: minio-cluster2 - s3Bucket: velero - s3CompatibleEndpoint: http://192.168.39.200:30000 - s3Region: us-east-1 - s3SecretRef: - name: minio-s3 - namespace: ramen-system - VeleroNamespaceSecretKeyRef: - key: cloud - name: cloud-credentials - drClusterOperator: - deploymentAutomationEnabled: true - s3SecretDistributionEnabled: true - channelName: alpha - packageName: ramen-dr-cluster-operator - namespaceName: ramen-system - catalogSourceName: ramen-catalog - catalogSourceNamespaceName: ramen-system - clusterServiceVersionName: ramen-dr-cluster-operator.v0.0.1 - veleroNamespaceName: "velero" diff --git a/hack/recipe_e2e/config/ramen_config_base.yaml b/hack/recipe_e2e/config/ramen_config_base.yaml deleted file mode 100644 index 2e9f5b741..000000000 --- a/hack/recipe_e2e/config/ramen_config_base.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: ramen-dr-cluster-operator-config - namespace: ramen-system -data: - ramen_manager_config.yaml: | - s3StoreProfiles: - - s3ProfileName: minio-cluster1 - s3Bucket: velero - s3CompatibleEndpoint: http://minikube-ip-cluster1:30000 - s3Region: us-east-1 - s3SecretRef: - name: minio-s3 - namespace: ramen-system - VeleroNamespaceSecretKeyRef: - key: cloud - name: cloud-credentials - - s3ProfileName: minio-cluster2 - s3Bucket: velero - s3CompatibleEndpoint: http://minikube-ip-cluster2:30000 - s3Region: us-east-1 - s3SecretRef: - name: minio-s3 - namespace: ramen-system - VeleroNamespaceSecretKeyRef: - key: cloud - name: cloud-credentials - drClusterOperator: - deploymentAutomationEnabled: true - s3SecretDistributionEnabled: true - channelName: alpha - packageName: ramen-dr-cluster-operator - namespaceName: ramen-system - catalogSourceName: ramen-catalog - catalogSourceNamespaceName: ramen-system - clusterServiceVersionName: ramen-dr-cluster-operator.v0.0.1 - veleroNamespaceName: "velero" diff --git a/hack/recipe_e2e/config/ramen_secret_minio.yaml b/hack/recipe_e2e/config/ramen_secret_minio.yaml deleted file mode 100644 index c2b6ced5d..000000000 --- a/hack/recipe_e2e/config/ramen_secret_minio.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: minio-s3 - namespace: ramen-system -type: Opaque -data: - AWS_ACCESS_KEY_ID: bWluaW8= - AWS_SECRET_ACCESS_KEY: bWluaW8xMjM= diff --git a/hack/recipe_e2e/config/s3_secret.yaml b/hack/recipe_e2e/config/s3_secret.yaml deleted file mode 100644 index 8758bc8dd..000000000 --- a/hack/recipe_e2e/config/s3_secret.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: eyewall4-s3 - namespace: ramen-system -type: Opaque -data: - AWS_ACCESS_KEY_ID: bWluaW9hZG1pbg== - AWS_SECRET_ACCESS_KEY: bWluaW9hZG1pbg== diff --git a/hack/recipe_e2e/failback/vrg_busybox_secondary.yaml b/hack/recipe_e2e/failback/vrg_busybox_secondary.yaml deleted file mode 100644 index 51c39d5a7..000000000 --- a/hack/recipe_e2e/failback/vrg_busybox_secondary.yaml +++ /dev/null @@ -1,24 +0,0 @@ ---- -apiVersion: ramendr.openshift.io/v1alpha1 -kind: VolumeReplicationGroup -metadata: - name: bb - namespace: recipe-test -spec: - action: Relocate - async: - schedulingInterval: 1m - pvcSelector: - matchLabels: - appname: busybox - replicationState: secondary - s3Profiles: - - minio-cluster1 - - minio-cluster2 - kubeObjectProtection: - recipe: - name: recipe-busybox - workflow: - captureName: capture - recoverName: recover - volumeGroupName: volumes diff --git a/hack/recipe_e2e/failover/vrg_busybox_primary.yaml b/hack/recipe_e2e/failover/vrg_busybox_primary.yaml deleted file mode 100644 index 2654c6893..000000000 --- a/hack/recipe_e2e/failover/vrg_busybox_primary.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -apiVersion: ramendr.openshift.io/v1alpha1 -kind: VolumeReplicationGroup -metadata: - name: bb - namespace: recipe-test -spec: - async: - schedulingInterval: 1m - pvcSelector: - matchLabels: - appname: busybox - replicationState: primary - s3Profiles: - - minio-cluster1 - - minio-cluster2 - kubeObjectProtection: - recipe: - name: recipe-busybox - workflow: - captureName: capture - recoverName: recover - volumeGroupName: volumes diff --git a/hack/recipe_e2e/protect/recipe_busybox.yaml b/hack/recipe_e2e/protect/recipe_busybox.yaml deleted file mode 100644 index 8ca1fe047..000000000 --- a/hack/recipe_e2e/protect/recipe_busybox.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- -apiVersion: ramendr.openshift.io/v1alpha1 -kind: Recipe -metadata: - name: recipe-busybox - namespace: recipe-test -spec: - appType: busybox - hooks: - - name: service-hooks - labelSelector: - matchLabels: - appname: busybox - type: exec - ops: - - name: pre-backup - container: busybox - timeout: 10m - # Command note: "/bin/sh -c" required when using non-executable files - command: - - "/bin/sh" - - "-c" - - "date" - - name: pre-restore - container: busybox - timeout: 5m - command: - - "/bin/sh" - - "-c" - - "date" - groups: - - name: volumes - type: volume - labelSelector: - matchLabels: - appname: busybox - - name: instance-resources - backupRef: instance-resources - type: resource - includedResourceTypes: - - deployment - - pvc - - name: deployments - backupRef: instance-resources - type: resource - includedResourceTypes: - - deployment - - name: pvcs - backupRef: instance-resources - type: resource - includedResourceTypes: - - pvc - captureWorkflow: - sequence: - - hook: service-hooks/pre-backup - - group: instance-resources - recoverWorkflow: - sequence: - - hook: service-hooks/pre-restore - - group: pvcs - - group: deployments diff --git a/hack/recipe_e2e/protect/vrg_busybox_primary.yaml b/hack/recipe_e2e/protect/vrg_busybox_primary.yaml deleted file mode 100644 index 6659ecbc8..000000000 --- a/hack/recipe_e2e/protect/vrg_busybox_primary.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: ramendr.openshift.io/v1alpha1 -kind: VolumeReplicationGroup -metadata: - name: bb - namespace: recipe-test -spec: - async: - schedulingInterval: 1m - pvcSelector: - matchLabels: - appname: busybox - replicationState: primary - s3Profiles: - - minio-cluster1 - - minio-cluster2 - kubeObjectProtection: - recipeRef: - name: recipe-busybox diff --git a/hack/recipe_e2e/scripts/cleanup_s3.sh b/hack/recipe_e2e/scripts/cleanup_s3.sh deleted file mode 100644 index b9f93f8c0..000000000 --- a/hack/recipe_e2e/scripts/cleanup_s3.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -S3_STORE_ALIAS=${1:-minio-cluster1} -NAMESPACE=${2:-recipe-test} -BUCKET=${3:-velero} - -echo "cleanup_s3.sh: S3_STORE_ALIAS: '$S3_STORE_ALIAS' NAMESPACE: '$NAMESPACE' BUCKET: '$BUCKET'" -S3_PATH=$S3_STORE_ALIAS/$BUCKET/$NAMESPACE - -if [[ $(mc ls "$S3_PATH" | wc -l) -gt 0 ]]; then - echo "removing all contents from $S3_PATH" - mc rm -r --force "$S3_PATH" -fi diff --git a/hack/recipe_e2e/scripts/deploy_primary.sh b/hack/recipe_e2e/scripts/deploy_primary.sh deleted file mode 100644 index 05245b359..000000000 --- a/hack/recipe_e2e/scripts/deploy_primary.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -kubectl apply -f config/ramen_secret_minio.yaml - -kubectl apply -f config/ramen_config.yaml - -kubectl apply -f protect/vrg_busybox_primary.yaml - -kubectl apply -f protect/recipe_busybox.yaml diff --git a/hack/recipe_e2e/scripts/failback.sh b/hack/recipe_e2e/scripts/failback.sh deleted file mode 100644 index de04f47ca..000000000 --- a/hack/recipe_e2e/scripts/failback.sh +++ /dev/null @@ -1,125 +0,0 @@ -#!/bin/bash -# Recipe e2e failback script - -set -x -set -e - -VRG_NAME=bb -VR_NAME=busybox-pvc -PVC_NAME=busybox-pvc -DEPLOYMENT_NAME=busybox -#BUCKET_NAME=velero -NAMESPACE=recipe-test -CLUSTER_TO=cluster1 -CLUSTER_FROM=cluster2 -#MINIO_PROFILE=minio-cluster1 - -INDEX_DATA_READY=0 -#INDEX_DATA_PROTECTED=1 -INDEX_CLUSTER_DATA_READY=2 -INDEX_CLUSTER_DATA_PROTECTED=3 - -# shellcheck source=hack/recipe_e2e/scripts/recipe_e2e_functions.sh -source "scripts/recipe_e2e_functions.sh" -export -f is_restore_hook_successful -export -f get_restore_index -export -f wait_for_vrg_state - -# scenario: cluster1 vrg=secondary, cluster2 vrg=primary - -# verify original states: cluster1 VRG=Secondary, cluster2 VRG=Primary -if [[ $(kubectl get vrg/$VRG_NAME -n $NAMESPACE -o yaml --context $CLUSTER_FROM | grep state: | awk '{print $2}') != "Primary" ]]; then - echo "$CLUSTER_FROM VRG $VRG should be in Primary state to begin Failback. Exiting." - exit 1 -fi - -# 1) set cluster1 VRG to secondary (should already be done) -if [[ $(kubectl get vrg/$VRG_NAME -n $NAMESPACE -o yaml --context $CLUSTER_TO | grep state: | awk '{print $2}') != "Secondary" ]]; then - echo "$CLUSTER_TO VRG $VRG should be in Secondary state to begin Failback. Exiting." - exit 1 -fi - -# 2) undeploy application on cluster1 (should already be done) -wait_for_resource_deletion all -n $NAMESPACE --context $CLUSTER_TO - -# 3) Delete the VRG on cluster1 here. Sequence: 1) VR, 2) VRG, 3) PVC (+PV) -# VRG deletion sequence part 1/3: delete VR -if [[ $(kubectl get vr/$VR_NAME -n $NAMESPACE --context $CLUSTER_TO --no-headers | wc -l ) -gt 0 ]]; then - kubectl delete vr/$VR_NAME -n $NAMESPACE --context $CLUSTER_TO -fi - -# VRG deletion sequence part 2/3: delete VRG -if [[ $(kubectl get vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_TO --no-headers | wc -l) -gt 0 ]]; then - kubectl delete vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_TO -fi - -# PVC RetainPolicy may change from Retain to Delete after VRG is deleted. Keep Retain policy. -PV_NAME=$(kubectl get pv --context $CLUSTER_TO | grep $NAMESPACE/$PVC_NAME| awk '{print $1}') -RECLAIM_POLICY=$(kubectl get pv/"$PV_NAME" --context $CLUSTER_TO -o=jsonpath="{.spec.persistentVolumeReclaimPolicy}") -if [[ "$RECLAIM_POLICY" != "Retain" ]]; then - echo "changing PV reclaim policy from $RECLAIM_POLICY to Retain" - KUBE_EDITOR="sed -i 's/persistentVolumeReclaimPolicy: $RECLAIM_POLICY/persistentVolumeReclaimPolicy: Retain/g'" kubectl edit pv/"$PV_NAME" --context $CLUSTER_TO -fi - -# VRG deletion sequence part 3/3: delete PVC -if [[ $(kubectl get pvc/$PVC_NAME -n $NAMESPACE --context $CLUSTER_TO | wc -l) -gt 0 ]]; then - # delete PVC - kubectl delete pvc/$PVC_NAME -n $NAMESPACE --context $CLUSTER_TO -fi - -# 4) unfence VRGs; not necessary - -# 5) unfence from VRG kube objects volume data - nothing to be done here - -# 6) wait for cluster2 VRG to have condition ClusterDataProtected=True -wait_for_vrg_condition_status $INDEX_CLUSTER_DATA_PROTECTED True vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_FROM - -# 7) change cluster2 vrg from Primary to Secondary, notify VRG of Failback procedure -KUBE_EDITOR="sed -i 's/replicationState: primary/replicationState: secondary/g'" kubectl edit vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_FROM -kubectl patch vrg/$VRG_NAME --type json -p '[{"op": add, "path":/spec/action, "value": Relocate}]' -n $NAMESPACE --context $CLUSTER_FROM - -# 8) undeploy cluster2 application and wait for it to be deleted -kubectl delete deployment/$DEPLOYMENT_NAME -n $NAMESPACE --context $CLUSTER_FROM - -wait_for_resource_deletion all -n $NAMESPACE --context $CLUSTER_FROM - -# 9a) setup cluster1 VRG again, but start as Secondary -kubectl apply -f failback/vrg_busybox_secondary.yaml --context $CLUSTER_TO - -# 9b) wait for cluster1's VRG to have DataReady status -wait_for_vrg_condition_status $INDEX_DATA_READY True vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_TO - -# 10) Delete PVCs on cluster2 so VRG can transition to Secondary. Sequence: 1) VR, 2) PVC (+PV) -# VRG deletion sequence part 1/2: delete VR -if [[ $(kubectl get vr/$VR_NAME -n $NAMESPACE --context $CLUSTER_FROM --no-headers | wc -l ) -gt 0 ]]; then - kubectl delete vr/$VR_NAME -n $NAMESPACE --context $CLUSTER_FROM -fi - -# VRG deletion sequence part 2/2: delete PVC -if [[ $(kubectl get pvc/$PVC_NAME -n $NAMESPACE --context $CLUSTER_FROM | wc -l) -gt 0 ]]; then - # delete PVC - kubectl delete pvc/$PVC_NAME -n $NAMESPACE --context $CLUSTER_FROM -fi - -# wait for cluster2 VRG to transition to Secondary: -wait_for_vrg_state "Secondary" vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_FROM - -# 11) change cluster1 vrg to Primary -KUBE_EDITOR="sed -i 's/replicationState: secondary/replicationState: primary/g'" kubectl edit vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_TO -kubectl patch vrg/$VRG_NAME --type json -p '[{"op": add, "path":/spec/action, "value": Relocate}]' -n $NAMESPACE --context $CLUSTER_TO - -# 12) wait for cluster1's VRG to have condition ClusterReadyStatus=True -wait_for_vrg_condition_status $INDEX_CLUSTER_DATA_READY True vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_TO - -# 13) wait for cluster1's VRG to have condition DataReady=True -wait_for_vrg_condition_status $INDEX_DATA_READY True vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_TO - -# wait for cluster1 to transition to Primary -wait_for_vrg_state "Primary" vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_TO - -# wait for new application to come online -kubectl wait deployments/$DEPLOYMENT_NAME -n $NAMESPACE --for condition=available --timeout=60s --context=$CLUSTER_TO - -# TODO: check backups/restores here - -echo "failback successful" diff --git a/hack/recipe_e2e/scripts/failover.sh b/hack/recipe_e2e/scripts/failover.sh deleted file mode 100644 index 11e4ba1f2..000000000 --- a/hack/recipe_e2e/scripts/failover.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash -# Recipe e2e failover script - -set -e - -VRG_NAME=bb -BUCKET_NAME=velero -NAMESPACE=recipe-test -CLUSTER_FROM=cluster1 -CLUSTER_TO=cluster2 -MINIO_PROFILE=minio-cluster1 -DEPLOYMENT_NAME=busybox -WORKING_DIRECTORY=$(pwd) - -# shellcheck source=hack/recipe_e2e/scripts/recipe_e2e_functions.sh -source "scripts/recipe_e2e_functions.sh" -export -f is_restore_hook_successful -export -f get_restore_index -export -f wait_for_vrg_state -export -f wait_for_pv_unbound -export -f wait_for_resource_deletion -export -f wait_for_resource_creation - -set -x - -# ensure cluster is ready for failover -if [[ $(kubectl get vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_FROM --no-headers | wc -l) -eq 0 ]]; then - echo "VRG $VRG_NAME not found in '$NAMESPACE' namespace. Exiting." - exit 1 -fi - -if [[ $(kubectl get vrg/$VRG_NAME -n $NAMESPACE -o yaml --context $CLUSTER_FROM | grep state: | awk '{print $2}') != "Primary" ]]; then - echo "VRG $VRG should be in Primary state to begin Failover. Exiting." - exit 1 -fi - -# ensure VRG is in Primary state -wait_for_vrg_state "Primary" vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_FROM - -# create fence on cluster1: change cluster1 vrg from Primary to Secondary -KUBE_EDITOR="sed -i 's/replicationState: primary/replicationState: secondary/g'" kubectl edit vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_FROM -kubectl patch vrg/$VRG_NAME --type json -p '[{"op": add, "path":/spec/action, "value": Failover}]' -n $NAMESPACE --context $CLUSTER_FROM - -# undeploy cluster1 application -kubectl delete deployment/$DEPLOYMENT_NAME -n $NAMESPACE --context $CLUSTER_FROM - -# wait for application to be deleted -wait_for_resource_deletion all -n $NAMESPACE --context $CLUSTER_FROM - -# cluster1 VRG should be Secondary -wait_for_vrg_state "Secondary" vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_FROM - -# switch to cluster2 -kubectl config use-context $CLUSTER_TO - -# change to Ramen directory -cd ../.. - -# deploy Ramen on cluster2 -make deploy-dr-cluster - -# wait for ramen-system to run -kubectl wait deployment.apps/ramen-dr-cluster-operator -n ramen-system --context $CLUSTER_TO --for condition=available --timeout 60s - -# optionally create namespace on Failover cluster -if [[ $(kubectl get namespace $NAMESPACE --no-headers --context $CLUSTER_TO | wc -l) -eq 0 ]]; then - kubectl create namespace $NAMESPACE --context $CLUSTER_TO -fi - -# configure Ramen; this creates Primary VRG -cd "$WORKING_DIRECTORY" -bash scripts/deploy_primary.sh - -# wait for deployment to exist -wait_for_resource_creation deployment/$DEPLOYMENT_NAME -n $NAMESPACE --context $CLUSTER_TO - -# wait for new application to come online -kubectl wait deployment/$DEPLOYMENT_NAME -n $NAMESPACE --for condition=available --timeout=60s --context=$CLUSTER_TO - -# application is ready. Verify restore contents and backup hook (order of Workflow matters) -RESTORE_RESULTS_FILE=restore-$NAMESPACE--$VRG_NAME--$RESTORE_NUMBER-logs.gz -RESTORE_INDEX=$(get_restore_index $MINIO_PROFILE/$BUCKET_NAME/$NAMESPACE/$VRG_NAME/kube-objects) - -# verify hook: should run as Backup, not Restore -is_restore_hook_successful $MINIO_PROFILE/$BUCKET_NAME/$NAMESPACE/$VRG_NAME/kube-objects/0/$BUCKET_NAME/backups/$NAMESPACE--$VRG_NAME--0--use-backup-not-restore-restore-0--$MINIO_PROFILE/velero-backup.json - -RESTORE_NUMBER=1 -RESTORE_RESULTS_FILE=restore-$NAMESPACE--$VRG_NAME--$RESTORE_NUMBER-logs.gz -RESTORE_GROUP_1=$MINIO_PROFILE/$BUCKET_NAME/$NAMESPACE/$VRG_NAME/kube-objects/$RESTORE_INDEX/velero/restores/recipe-test--bb--$RESTORE_NUMBER/$RESTORE_RESULTS_FILE -verify_restore_success "$RESTORE_GROUP_1" - -RESTORE_NUMBER=2 -RESTORE_RESULTS_FILE=restore-$NAMESPACE--$VRG_NAME--$RESTORE_NUMBER-logs.gz -RESTORE_GROUP_2=$MINIO_PROFILE/$BUCKET_NAME/$NAMESPACE/$VRG_NAME/kube-objects/$RESTORE_INDEX/velero/restores/recipe-test--bb--$RESTORE_NUMBER/$RESTORE_RESULTS_FILE -verify_restore_success "$RESTORE_GROUP_2" - - # cluster2 vrg should transition to Primary -wait_for_vrg_state "Primary" vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER_TO - -# remove Failover action from old Primary -kubectl patch vrg/$VRG_NAME --type json -p '[{"op": remove, "path":/spec/action}]' -n $NAMESPACE --context $CLUSTER_FROM - -# show S3 contents -mc du -r $MINIO_PROFILE - -# wait for conditions DataReady, ClusterDataReady on cluster2 -echo "failover successful" - diff --git a/hack/recipe_e2e/scripts/protect.sh b/hack/recipe_e2e/scripts/protect.sh deleted file mode 100644 index cb3e64cc7..000000000 --- a/hack/recipe_e2e/scripts/protect.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash -CLUSTER=cluster1 -MINIO_PROFILE=minio-cluster1 -BUCKET_NAME=velero -NAMESPACE=recipe-test -VRG_NAME=bb -BACKUP_GROUP_NAME=instance-resources -BACKUP_HOOK_NAME=service-hooks-pre-backup -BACKUP_START_INDEX=1 -WORKING_DIRECTORY=$(pwd) - -set -x -set -e - -# shellcheck source=hack/recipe_e2e/scripts/recipe_e2e_functions.sh -source "scripts/recipe_e2e_functions.sh" -export -f wait_for_and_check_backup_success -export -f wait_for_vrg_state - -# set context to cluster1 -kubectl config use-context $CLUSTER - -# change to base ramen directory -cd ../.. - -# deploy ramen to cluster1 -make deploy-dr-cluster - -# wait for ramen deployment to become available -kubectl wait deployment.apps/ramen-dr-cluster-operator -n ramen-system --context $CLUSTER --for condition=available --timeout=60s - -# optionally create namespace -if [[ $(kubectl get namespace $NAMESPACE --no-headers --context $CLUSTER | wc -l) -eq 0 ]]; then - kubectl create namespace $NAMESPACE --context $CLUSTER -fi - -# cluster1: setup app -application_sample_namespace_name=$NAMESPACE bash hack/minikube-ramen.sh application_sample_deploy $CLUSTER - -cd "$WORKING_DIRECTORY" - -# wait for deployment to become ready -kubectl wait deployments.apps/busybox -n $NAMESPACE --for condition=available --timeout=60s --context=$CLUSTER - -# setup s3 -kubectl apply -f config/ramen_secret_minio.yaml -kubectl apply -f config/ramen_config.yaml - -# create VRG -kubectl apply -f protect/vrg_busybox_primary.yaml -kubectl apply -f protect/recipe_busybox.yaml - -# wait for VRG to become Primary -wait_for_vrg_state "Primary" vrg/$VRG_NAME -n $NAMESPACE --context $CLUSTER - -# check s3 for backups -mc du -r $MINIO_PROFILE - -# wait for backups to be created according to Recipe spec -# backups should be successful (totalItems > 0); 5m replication period by default, starts at index 1 -BACKUP_HOOK=$MINIO_PROFILE/$BUCKET_NAME/$NAMESPACE/$VRG_NAME/kube-objects/$BACKUP_START_INDEX/velero/backups/$NAMESPACE--$VRG_NAME--$BACKUP_START_INDEX--$BACKUP_HOOK_NAME--$MINIO_PROFILE/velero-backup.json -wait_for_and_check_backup_success $BACKUP_HOOK - -BACKUP_RESOURCES=$MINIO_PROFILE/$BUCKET_NAME/$NAMESPACE/$VRG_NAME/kube-objects/$BACKUP_START_INDEX/velero/backups/$NAMESPACE--$VRG_NAME--$BACKUP_START_INDEX--$BACKUP_GROUP_NAME--$MINIO_PROFILE/velero-backup.json -wait_for_and_check_backup_success $BACKUP_RESOURCES - -echo "protection successful" -exit 0 diff --git a/hack/recipe_e2e/scripts/recipe_e2e_functions.sh b/hack/recipe_e2e/scripts/recipe_e2e_functions.sh deleted file mode 100644 index dcee4891d..000000000 --- a/hack/recipe_e2e/scripts/recipe_e2e_functions.sh +++ /dev/null @@ -1,239 +0,0 @@ -#!/bin/bash -# functions used across recipe e2e scripts - -# wait for a file to exist in minio s3 storage, by input path -function wait_for_mc_file() { - local TIMEOUT_MAX=120 - local TIMEOUT=$TIMEOUT_MAX - local INTERVAL=5 - local FILE=${*:3} # usage: 'mc find FILE' - - set +e - - while ((TIMEOUT > 0)); do - if [[ $("${@}" | wc -l) -gt 0 ]]; then - echo "file '$FILE' exists" - set -e - return 0 - fi - echo "file '$FILE' does not yet exist. Waiting $INTERVAL seconds..." - sleep $INTERVAL - TIMEOUT=$((TIMEOUT - INTERVAL)) - done - - set -e - echo "file '$FILE' was not created within $TIMEOUT_MAX seconds" - exit 1 -} - -# is_backup_successful FILE (full path to json object) -function is_backup_successful() { - FILE=$1 - - # shellcheck disable=SC2086 - RESULT=$(mc cat $FILE | grep '"phase":' | awk '{print $2}') - if [[ "$RESULT" == '"Completed",' ]]; then - COMPLETED=1 - else - COMPLETED=0 - fi - - # test for backup objects > 0 - BACKUP_INFO=$(mc cat "$FILE" | grep itemsBackedUp) - ITEMS=$(echo "$BACKUP_INFO" | awk '{print $2}') - - if [[ $COMPLETED -eq 1 && $ITEMS -gt 0 ]]; then - echo 1 - else - echo 0 - exit 1 - fi -} - -# is_hook_successful FILE (full path to json object) -function is_restore_hook_successful() { - FILE=$1 - - # shellcheck disable=SC2086 - RESULT=$(mc cat $FILE | grep '"phase":' | awk '{print $2}') - if [[ "$RESULT" == '"Completed",' ]]; then - echo "1" - return 0 - else - echo "0" - echo "restore hook was not successful" - return 1 - fi -} - -function wait_for_and_check_backup_success() { - wait_for_mc_file mc find "$1" - is_backup_successful "$1" - - echo "$1 was created and backed up items successfully" -} - -function wait_for_vrg_state() { - local STATE=$1 - local RESOURCE=${*:2} - - local TIMEOUT_MAX=300 - local TIMEOUT=$TIMEOUT_MAX - local INTERVAL=5 - - while ((TIMEOUT > 0)); do - # shellcheck disable=SC2086 - RESULT=$(kubectl get $RESOURCE -o=jsonpath='{.status.state}') - - if [[ "$RESULT" == "$STATE" ]]; then - echo "'$RESOURCE' found in $STATE state" - return 0 - fi - echo "$RESOURCE not in $STATE state. Waiting $INTERVAL seconds..." - sleep $INTERVAL - TIMEOUT=$((TIMEOUT - INTERVAL)) - done - - echo "$RESOURCE' did not gain $STATE state within $TIMEOUT_MAX seconds" - exit 1 -} - -function wait_for_pv_unbound() { - local RESOURCE=${*} - - local TIMEOUT_MAX=60 - local TIMEOUT=$TIMEOUT_MAX - local INTERVAL=5 - - while ((TIMEOUT > 0)); do - STATE=$(kubectl get pv/"$RESOURCE" --no-headers | awk '{print $5}') - - if [[ "$STATE" == "Available" || "$STATE" == "Released" ]]; then - echo "'$RESOURCE' found in $STATE state" - return 0 - fi - echo "$RESOURCE in $STATE state. Waiting $INTERVAL seconds..." - sleep $INTERVAL - TIMEOUT=$((TIMEOUT - INTERVAL)) - done - - echo "$RESOURCE' did not become Available or Released within $TIMEOUT_MAX seconds" - exit 1 -} - -function get_restore_index() { - if [[ $(mc ls "$1"/0/velero/restores | wc -l) -gt 0 ]]; then - echo "0" - exit 0 - fi - - if [[ $(mc ls "$1"/1/velero/restores | wc -l) -gt 0 ]]; then - echo "1" - exit 0 - fi - - echo "couldn't find restores" - exit 1 -} - -function verify_restore_success() { - FILE=$1 - - # clear existing results - if [[ -e /tmp/$FILE ]]; then - rm /tmp/"$FILE" - fi - - # log files are compressed; unzip them - mc cp "$FILE" /tmp/"$FILE".gz - gunzip /tmp/"$FILE".gz - - # grep for "grep -e "Restored [0-9]* items" -o | awk '{print $2}'" and check > 0 - RESTORED_ITEMS=$(grep /tmp/restore_file -e "Restored [0-9]* items" -o | awk '{print $2}') - - if [[ $RESTORED_ITEMS -gt 0 ]]; then - echo "$FILE restore success: found $RESTORED_ITEMS restored items" - else - echo "$FILE restore failed. Could not find restored items." - exit 1 - fi -} - -function wait_for_resource_creation() { - local RESOURCE=${*} - - local TIMEOUT_MAX=60 - local TIMEOUT=$TIMEOUT_MAX - local INTERVAL=5 - - while ((TIMEOUT > 0)); do - # shellcheck disable=SC2086 - COUNT=$(kubectl get $RESOURCE --no-headers | wc -l) - if [[ $COUNT -gt 0 ]]; then - echo "resource '$RESOURCE' exists" - return 0 - fi - echo "Found $COUNT resources with 'kubectl get $RESOURCE'. Waiting $INTERVAL seconds..." - sleep $INTERVAL - TIMEOUT=$((TIMEOUT - INTERVAL)) - done - - echo "$RESOURCE' doesn't exist after $TIMEOUT_MAX seconds" - exit 1 -} - -function wait_for_resource_deletion() { - local RESOURCE=${*} - - local TIMEOUT_MAX=120 - local TIMEOUT=$TIMEOUT_MAX - local INTERVAL=5 - - while ((TIMEOUT > 0)); do - # shellcheck disable=SC2086 - COUNT=$(kubectl get $RESOURCE --no-headers | wc -l) - if [[ $COUNT -eq 0 ]]; then - echo "resource '$RESOURCE' deleted" - return 0 - fi - echo "Found $COUNT resources with 'kubectl get $RESOURCE'. Waiting $INTERVAL seconds..." - sleep $INTERVAL - TIMEOUT=$((TIMEOUT - INTERVAL)) - done - - echo "$RESOURCE' still exists after $TIMEOUT_MAX seconds" - exit 1 -} - -function remove_ramen_finalizers() { - local RESOURCE=${*} - echo "removing Ramen finalizers from $RESOURCE" - KUBE_EDITOR="sed -i 's|- volumereplicationgroups.ramendr.openshift.io/pvc-vr-protection||g'" kubectl edit "$RESOURCE" - KUBE_EDITOR="sed -i 's|- volumereplicationgroups.ramendr.openshift.io/vrg-protection||g'" kubectl edit "$RESOURCE" -} - -# usage: wait_for_vrg_condition_status index condition resource -function wait_for_vrg_condition_status() { - local CONDITION_INDEX=$1 - local CONDITION_STATUS=$2 - local RESOURCE=${*:3} - - local TIMEOUT_MAX=600 - local TIMEOUT=$TIMEOUT_MAX - local INTERVAL=5 - - while ((TIMEOUT > 0)); do - # shellcheck disable=SC2086 - RESULT=$(kubectl get $RESOURCE -o=jsonpath="{.status.conditions[$CONDITION_INDEX].status}") - if [[ "$RESULT" == "$CONDITION_STATUS" ]]; then - echo "$RESOURCE has condition $CONDITION_INDEX status $CONDITION_STATUS" - return 0 - fi - echo "$RESOURCE has condition $CONDITION_INDEX status $RESULT. Waiting $INTERVAL seconds..." - sleep $INTERVAL - TIMEOUT=$((TIMEOUT - INTERVAL)) - done - - echo "$RESOURCE' condition $CONDITION_INDEX did not achieve $CONDITION_STATUS after $TIMEOUT_MAX seconds" - exit 1 -} diff --git a/hack/recipe_e2e/scripts/reload_minikube_image.sh b/hack/recipe_e2e/scripts/reload_minikube_image.sh deleted file mode 100644 index 0d3a511d9..000000000 --- a/hack/recipe_e2e/scripts/reload_minikube_image.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -# usage: reload_minikube_image.sh quay.io/ramendr/ramen-operator cluster2 - -set -e -set -x - -IMAGE=${1:-quay.io/ramendr/ramen-operator} -CONTEXT=${2:-cluster1} - -# if most current image already loaded, exit -CURRENT_HOST_IMAGE=$(docker images | grep "$IMAGE" | awk '{print $3}') -CURRENT_MINIKUBE_IMAGE=$(minikube ssh --profile "$CONTEXT" "docker images" | grep "$IMAGE" | awk '{print $3}') - -if [[ "$CURRENT_HOST_IMAGE" == "$CURRENT_MINIKUBE_IMAGE" ]]; then - echo "current image already loaded" - exit 0 -fi - -# clean up existing image -#minikube ssh --profile=$CONTEXT "docker images" | grep $IMAGE -if [[ $(CURRENT_MINIKUBE_IMAGE | wc -l) -gt 0 ]]; then - echo "cleanup existing image" - minikube ssh --profile="$CONTEXT" "docker image rm $IMAGE" -fi - -if [[ -e ~/.minikube/cache/images/"${IMAGE}" ]]; then - echo "removing cached image" - rm ~/.minikube/cache/images/"${IMAGE}" -fi - -if [[ -e "$HOME/.minikube/cache/images/${IMAGE}_latest" ]]; then - echo "removing latest cached image" - rm "$HOME/.minikube/cache/images/${IMAGE}_latest" -fi - -echo "loading new image" -minikube image load "$IMAGE" --profile="$CONTEXT" - -echo "loaded image:" -minikube ssh --profile="$CONTEXT" "docker images | grep $IMAGE" diff --git a/hack/recipe_e2e/scripts/setup.sh b/hack/recipe_e2e/scripts/setup.sh deleted file mode 100644 index 7f1b19faf..000000000 --- a/hack/recipe_e2e/scripts/setup.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash -# setup scripts. Run on each cluster -set -x -set -e - -# velero -VELERO_NAMESPACE=velero -CLUSTER=$(kubectl config current-context) -MINIKUBE_IP=$(minikube ip --profile "$CLUSTER") -WORKING_DIRECTORY=$(pwd) -VELERO_INSTALL_DIRECTORY=~/Downloads/velero-v1.9.3-linux-amd64/ - -if [[ $(kubectl get deployment.apps/velero -n $VELERO_NAMESPACE --no-headers | wc -l) -eq 0 ]]; then - if [[ ! -e $VELERO_INSTALL_DIRECTORY ]]; then - set +x - echo "Velero install directory not detected." - echo "Download Velero executable v1.9.3 and unzip to directory $VELERO_INSTALL_DIRECTORY" - echo "Then run command below, then run these scripts again." - echo "cp config/credentials-velero-minikube $VELERO_INSTALL_DIRECTORY" - exit 1 - fi - #echo "Velero 1.9 required. See comments in this file for setup process." - cd $VELERO_INSTALL_DIRECTORY - ./velero install \ - --provider aws \ - --plugins velero/velero-plugin-for-aws:v1.2.1 \ - --bucket velero \ - --secret-file ./credentials-velero-minikube \ - --use-volume-snapshots=false \ - --backup-location-config "region=minio,s3ForcePathStyle=true,s3Url=http://$MINIKUBE_IP:9000" - -fi - -# TODO: check minikube IP matches ramen config, opt-out if it matches - -# update ramen_config -cd "$WORKING_DIRECTORY" -sed s/minikube-ip-cluster1/"$MINIKUBE_IP"/g config/ramen_config_base.yaml > config/ramen_config.yaml -sed -i s/minikube-ip-cluster2/"$(minikube ip --profile cluster2)"/g config/ramen_config.yaml - -# recipe -if [[ $(kubectl get crd/recipes.ramendr.openshift.io --no-headers | wc -l) -eq 0 ]]; then - echo "installing recipe CRD" - - kubectl apply -f https://raw.githubusercontent.com/RamenDR/recipe/main/config/crd/bases/ramendr.openshift.io_recipes.yaml -fi - -# mc -if [[ $(command -v mc | wc -l) -eq 0 ]]; then - echo "MC (Minio Client) is required. Download here: https://github.com/minio/mc#binary-download" -fi - -# mc aliases -mc alias set minio-cluster1 http://"$(minikube ip --profile cluster1)":30000 minio minio123 -mc alias set minio-cluster2 http://"$(minikube ip --profile cluster2)":30000 minio minio123 - -# mc buckets -if [[ $(mc ls minio-cluster1 | grep velero -c) -eq 0 ]]; then - mc mb minio-cluster1/velero -fi - -if [[ $(mc ls minio-cluster2 | grep velero -c) -eq 0 ]]; then - mc mb minio-cluster2/velero -fi - -# backube -if [[ $(kubectl get crd | grep replicationdestinations.volsync.backube -c) -eq 0 ]]; then - kubectl apply -f https://raw.githubusercontent.com/backube/volsync/117eec0a92e9b3eb0c042dc4d2bb1853ddcbe07d/config/crd/bases/volsync.backube_replicationdestinations.yaml -fi - -if [[ $(kubectl get crd | grep replicationsources.volsync.backube -c) -eq 0 ]]; then - kubectl apply -f https://raw.githubusercontent.com/backube/volsync/117eec0a92e9b3eb0c042dc4d2bb1853ddcbe07d/config/crd/bases/volsync.backube_replicationsources.yaml -fi - -# load ramen images, recipe images -cd ~/go/src/github.com/tjanssen3/ramen - -if [[ $(kubectl get all -n ramen-system | wc -l) -gt 0 ]]; then - make undeploy-dr-cluster -fi - -cd "$WORKING_DIRECTORY" -bash scripts/reload_minikube_image.sh quay.io/ramendr/ramen-operator "$CLUSTER" -bash scripts/reload_minikube_image.sh quay.io/ramendr/ramen-dr-cluster-operator-bundle "$CLUSTER" -minikube image load controller:latest --profile="$CLUSTER" - diff --git a/hack/recipe_e2e/scripts/teardown.sh b/hack/recipe_e2e/scripts/teardown.sh deleted file mode 100644 index 227e93574..000000000 --- a/hack/recipe_e2e/scripts/teardown.sh +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash -set -x - -CLUSTER=${1:-cluster1} -MINIO_ALIAS=${2:-minio-cluster1} -NAMESPACE=${3:-recipe-test} -#BUCKET=${4:-velero} -VRG_NAME=${5:-bb} -VR_NAME=busybox-pvc -PVC_NAME=busybox-pvc -DEPLOYMENT_NAME=busybox - -BASE_RAMEN_DIRECTORY=../.. -WORKING_DIRECTORY=$(pwd) - -# shellcheck source=hack/recipe_e2e/scripts/recipe_e2e_functions.sh -source "scripts/recipe_e2e_functions.sh" - -echo "tearing down cluster Ramen and namespace '$NAMESPACE' in '$CLUSTER' and Minio alias '$MINIO_ALIAS'" - -# delete application -if [[ $(kubectl get all -n "$NAMESPACE" --context "$CLUSTER" | wc -l) -gt 0 ]]; then - echo "undeploying application" - kubectl delete deployment/"$DEPLOYMENT_NAME" -n "$NAMESPACE" --context "$CLUSTER" -fi - -# VRG deletion sequence part 1/3: delete VR -if [[ $(kubectl get vr/"$VR_NAME" -n "$NAMESPACE" --context "$CLUSTER" --no-headers | wc -l ) -gt 0 ]]; then - kubectl delete vr/"$VR_NAME" -n "$NAMESPACE" --context "$CLUSTER" -fi - -# VRG deletion sequence part 2/3: delete VRG -if [[ $(kubectl get vrg/"$VRG_NAME" -n "$NAMESPACE" --context "$CLUSTER" --no-headers | wc -l) -gt 0 ]]; then - kubectl delete vrg/"$VRG_NAME" -n "$NAMESPACE" --context "$CLUSTER" -fi - -# VRG deletion sequence part 3/3: delete PVC -if [[ $(kubectl get pvc/"$PVC_NAME" -n "$NAMESPACE" --context "$CLUSTER" | wc -l) -gt 0 ]]; then - # delete PVC - kubectl delete pvc/"$PVC_NAME" -n "$NAMESPACE" --context "$CLUSTER" -fi - -# delete ramen -if [[ $(kubectl get all -n ramen-system --context "$CLUSTER" | wc -l) -gt 0 ]]; then - echo "undeploying ramen-system" - kubectl config use-context "$CLUSTER" - cd "$BASE_RAMEN_DIRECTORY" || exit - make undeploy-dr-cluster - cd "$WORKING_DIRECTORY" || exit -fi - -# cleanup s3 store - bash scripts/cleanup_s3.sh "$MINIO_ALIAS" "$NAMESPACE" - -# cleanup PVs if Available or Released -for pv in $(kubectl get pv --context "$CLUSTER" | grep "$PVC_NAME" | awk '{print $1}'); -do - STATUS=$(kubectl get pv/"$pv" --context "$CLUSTER" --no-headers | awk '{print $5}') - echo "pv/$pv status: $STATUS. " - - if [[ "$STATUS" == "Available" || "$STATUS" == "Released" ]]; then - kubectl delete pv/"$pv" --context "$CLUSTER" - else - printf "Skipping.\n" - fi -done - -echo "'$CLUSTER' cleanup complete" diff --git a/hack/rook-mirror-secret-template.yaml b/hack/rook-mirror-secret-template.yaml deleted file mode 100644 index 43dcbaa18..000000000 --- a/hack/rook-mirror-secret-template.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -apiVersion: v1 -stringData: - pool: - token: -kind: Secret -metadata: - name: - namespace: rook-ceph -type: Opaque diff --git a/hack/shell_option_store_restore.sh b/hack/shell_option_store_restore.sh deleted file mode 100644 index e399c4cb4..000000000 --- a/hack/shell_option_store_restore.sh +++ /dev/null @@ -1,44 +0,0 @@ -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck shell=sh disable=2086 -shell_option_store() -{ - case $- in - *$2*) - eval $1=set\\ -$2 - ;; - *) - eval $1=set\\ +$2 - ;; - esac -} 2>/dev/null -shell_option_disable() -{ - case $- in - *$2*) - set +$2 - eval $1=set\\ -$2 - ;; - *) - unset -v $1 - ;; - esac -} 2>/dev/null -shell_option_enable() -{ - case $- in - *$2*) - unset -v $1 - ;; - *) - set -$2 - eval $1=set\\ +$2 - ;; - esac -} 2>/dev/null -shell_option_restore() -{ - eval \$$1 - unset -v $1 -} 2>/dev/null diff --git a/hack/shio-demo.sh b/hack/shio-demo.sh deleted file mode 100755 index acbc28441..000000000 --- a/hack/shio-demo.sh +++ /dev/null @@ -1,637 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=1090,1091,2046,2086 -set -e - -# subshell ? -if test $(basename -- $0) = shio-demo.sh; then - ramen_hack_directory_path_name=$(dirname -- $0) -else - ramen_hack_directory_path_name=${ramen_hack_directory_path_name-hack} - test -d "$ramen_hack_directory_path_name" - shell_configure() { - unset -f shell_configure - exit_stack_push PS1=\'$PS1\' - PS1='\[\033[01;32m\]$\[\033[00m\] ' - exit_stack_push PS4=\'$PS4\' - PS4=' -$ ' - set +e - } - set -- shell_configure -fi -. $ramen_hack_directory_path_name/exit_stack.sh -exit_stack_push unset -v ramen_hack_directory_path_name -. $ramen_hack_directory_path_name/minikube.sh; exit_stack_push minikube_unset -. $ramen_hack_directory_path_name/true_if_exit_status_and_stderr.sh; exit_stack_push unset -f true_if_exit_status_and_stderr - -json_to_yaml() { - python3 -c 'import sys, yaml, json; print(yaml.dump(json.loads(sys.stdin.read()),default_flow_style=False))' -}; exit_stack_push unset -f json_to_yaml - -command_sequence() { - cat <<-a - #!/bin/sh - - # Deployed already: infrastructure - infra_list - - # Deploy application - app_deploy - - # Protect application - app_protect - s3_objects_list - - # Failover application from cluster1 to cluster2 - app_list cluster2 - app_failover - - # Failback application from cluster2 to cluster1 - app_list cluster1 - app_failback - app_list cluster2 - a -}; exit_stack_push unset -f command_sequence - -infra_deploy() { - $ramen_hack_directory_path_name/minikube-ramen.sh deploy - $ramen_hack_directory_path_name/velero-test.sh velero_deploy cluster1 - $ramen_hack_directory_path_name/velero-test.sh velero_deploy cluster2 - velero_secret_deploy cluster1 - velero_secret_deploy cluster2 - for cluster_name in $s3_store_cluster_names; do - mc alias set $cluster_name $(minikube_minio_url $cluster_name) minio minio123 - done; unset -v cluster_name - app_operator_deploy cluster1 - app_operator_deploy cluster2 - infra_list -}; exit_stack_push unset -f infra_deploy - -infra_list() { - set -x - minikube profile list - kubectl --context cluster1 --namespace ramen-system get deploy - kubectl --context cluster2 --namespace ramen-system get deploy - kubectl --context cluster1 --namespace velero get deploy/velero secret/s3secret - kubectl --context cluster2 --namespace velero get deploy/velero secret/s3secret - mc tree cluster1 - mc tree cluster2 - app_opperator_list cluster1 - app_opperator_list cluster2 - { set +x; } 2>/dev/null -}; exit_stack_push unset -f infra_list - -infra_undeploy() { - app_operator_undeploy cluster2 - app_operator_undeploy cluster1 - velero_secret_undeploy cluster2 - velero_secret_undeploy cluster1 - $ramen_hack_directory_path_name/minikube-ramen.sh undeploy -}; exit_stack_push unset -f infra_undeploy - -velero_secret_yaml() { - kubectl create secret generic s3secret --from-literal aws='[default] -aws_access_key_id=minio -aws_secret_access_key=minio123 -' --dry-run=client -oyaml --namespace velero -}; exit_stack_push unset -f velero_secret_yaml - -velero_secret_deploy() { - velero_secret_yaml|kubectl --context "$1" apply -f - -}; exit_stack_push unset -f velero_secret_deploy - -velero_secret_undeploy() { - velero_secret_yaml|kubectl --context "$1" delete --ignore-not-found -f - -}; exit_stack_push unset -f velero_secret_undeploy - -velero_secret_list() { - velero_secret_yaml|kubectl --context "$1" get -f - -}; exit_stack_push unset -f velero_secret_list - -ns_list() { - kubectl --context "$1" get namespace $2 -}; exit_stack_push unset -f ns_list - -ns_get() { - kubectl --context "$1" get namespace $2 -oyaml -}; exit_stack_push unset -f ns_get - -namespace_yaml() { - cat <<-a - --- - apiVersion: v1 - kind: Namespace - metadata: - name: $1 - a -}; exit_stack_push unset -f namespace_yaml - -app_operator_namespace_name=o -app_operator_recipe_name=r -exit_stack_push unset -v app_operator_namespace_name -exit_stack_push unset -v app_operator_recipe_name - -app_operator_yaml() { - namespace_yaml $app_operator_namespace_name - app_operator_recipe_yaml -}; exit_stack_push unset -f app_operator_yaml - -app_operator_deploy() { - app_operator_yaml|kubectl --context "$1" apply -f - -}; exit_stack_push unset -f app_operator_deploy - -app_operator_undeploy() { - app_operator_yaml|kubectl --context "$1" delete --ignore-not-found -f - -}; exit_stack_push unset -f app_operator_undeploy - -app_operator_list() { - app_operator_yaml|kubectl --context "$1" get -f - -}; exit_stack_push unset -f app_operator_list - -app_namespace_0_name=a -app_namespace_1_name=asdf -app_namespace_2_name=b -app_namespace_names=$app_namespace_0_name\ $app_namespace_1_name\ $app_namespace_2_name -exit_stack_push unset -f app_namespace_0_name app_namespace_1_name app_namespace_2_name app_namespace_names - -app_label_key=appname -app_label_value=busybox -app_label=$app_label_key=$app_label_value -app_label_yaml=$app_label_key:\ $app_label_value -app_labels_yaml="\ - labels: - $app_label_yaml" -exit_stack_push unset -v app_label_key app_label_value app_label app_label_yaml app_labels_yaml - -app_namespace_yaml() { - namespace_yaml $1 - echo "$app_labels_yaml" -}; exit_stack_push unset -f app_namespace_yaml - -app_namespaces_yaml() { - for namespace_name in $app_namespace_names; do - app_namespace_yaml $namespace_name - done; unset -v namespace_name -}; exit_stack_push unset -f app_namespaces_yaml - -app_namespaces_deploy() { - app_namespaces_yaml|kubectl --context "$1" apply -f - -}; exit_stack_push unset -f app_namespaces_deploy - -app_namespaces_undeploy() { - app_namespaces_yaml|kubectl --context "$1" delete --ignore-not-found -f - -}; exit_stack_push unset -f app_namespaces_undeploy - -app_naked_pod_name=busybox -app_clothed_pod_name=busybox -app_configmap_name=asdf -app_secret_name=$app_configmap_name -exit_stack_push unset -v app_naked_pod_name app_clothed_pod_name app_configmap_name app_secret_name - -app_namespaced_yaml() { - echo --- - kubectl --dry-run=client -oyaml --namespace "$1" create -k https://github.com/RamenDR/ocm-ramen-samples/busybox - echo --- - kubectl --dry-run=client -oyaml --namespace "$1" create configmap $app_configmap_name - echo "$app_labels_yaml" - echo --- - kubectl --dry-run=client -oyaml --namespace "$1" create secret generic $app_secret_name --from-literal=key1=value1 - echo "$app_labels_yaml" - echo --- - kubectl --dry-run=client -oyaml --namespace "$1" run $app_naked_pod_name -l$app_label --image busybox -- sh -c while\ true\;do\ date\;sleep\ 60\;done -}; exit_stack_push unset -v app_namespaced_yaml - -app_less_namespaces_yaml() { - for namespace_name in $app_namespace_names; do - app_namespaced_yaml $namespace_name - done; unset -v namespace_name -}; exit_stack_push unset -v app_less_namespaces_yaml - -app_yaml() { - app_namespaces_yaml - app_less_namespaces_yaml -}; exit_stack_push unset -v app_yaml - -app_deploy() { - set -- cluster1 - app_yaml|kubectl --context "$1" apply -f - - app_pvs_label "$1" - app_list "$1" -}; exit_stack_push unset -f app_deploy - -app_pvs_label() { - for namespace_name in $app_namespace_names; do - kubectl --context "$1" --namespace $namespace_name -l$app_label wait pvc --for jsonpath='{.status.phase}'=Bound - kubectl --context "$1" label $(pv_names_claimed_by_namespace "$1" $namespace_name) $app_label --overwrite - done; unset -v namespace_name -}; exit_stack_push unset -f app_pvs_label - -app_less_namespaces_undeploy() { - app_less_namespaces_yaml|kubectl --context "$1" delete --ignore-not-found -f - - app_list $1 -}; exit_stack_push unset -f app_less_namespaces_undeploy - -app_undeploy() { - app_yaml|kubectl --context "$1" delete --ignore-not-found -f - - app_list $1 -}; exit_stack_push unset -f app_undeploy - -app_operator_recipe_yaml() { - cat <<-a - --- - apiVersion: ramendr.openshift.io/v1alpha1 - kind: Recipe - metadata: - namespace: $app_operator_namespace_name - name: $app_operator_recipe_name - spec: - appType: "" - volumes: - includedNamespaces: - - \$ns0 - - \$ns1_2 - name: "" - type: volume - groups: - - includedNamespaces: - - \$ns0 - - \$ns1_2 - name: "" - type: resource - - excludedResourceTypes: - - deploy - - po - - pv - - rs - - volumereplications - - vrg - name: everything-but-deploy-po-pv-rs-vr-vrg - type: resource - - includedResourceTypes: - - deployments - - pods - labelSelector: - matchExpressions: - - key: pod-template-hash - operator: DoesNotExist - name: deployments-and-naked-pods - type: resource - hooks: - - name: busybox1 - namespace: \$ns1 - type: exec - labelSelector: - matchExpressions: - - key: pod-template-hash - operator: Exists - ops: - - name: date - container: $app_clothed_pod_name - command: - - date - - name: busybox0 - namespace: \$ns0 - type: exec - labelSelector: - matchExpressions: - - key: pod-template-hash - operator: DoesNotExist - ops: - - name: fail-succeed - container: $app_naked_pod_name - command: - - sh - - -c - - "rm /tmp/a||! touch /tmp/a" - captureWorkflow: - sequence: - - group: "" - recoverWorkflow: - sequence: - - group: everything-but-deploy-po-pv-rs-vr-vrg - - group: deployments-and-naked-pods - - hook: busybox0/fail-succeed - a -# TODO restore once PR 871 is merged -# - hook: busybox1/date -}; exit_stack_push unset -f app_operator_recipe_yaml - -app_operator_recipe_get() { - kubectl --context "$1" --namespace $app_operator_namespace_name get -oyaml recipe/$app_operator_recipe_name -}; exit_stack_push unset -f app_operator_recipe_get - -app_list() { - app_list_custom "$1" --show-labels - echo - app_list_custom "$1" --sort-by=.metadata.creationTimestamp\ -\ -ocustom-columns=Kind:.kind,Namespace:.metadata.namespace,Name:.metadata.name,CreationTime:.metadata.creationTimestamp\ - -}; exit_stack_push unset -f app_list - -app_list_custom() { - kubectl --context "$1" -A -l$app_label get ns,cm,secret,deploy,rs,po,pvc,pv,recipe,vrg,vr $2 -}; exit_stack_push unset -f app_list_custom - -vrg_namespace_name=ramen-system -exit_stack_push unset -v vrg_namespace_name - -vrg_apply() { - vrg_appendix=" - kubeObjectProtection: - captureInterval: 1m - recipeRef: - namespace: $app_operator_namespace_name - name: $app_operator_recipe_name - recipeParameters: - ns0: - - $app_namespace_0_name - ns1: - - $app_namespace_1_name - ns1_2: - - $app_namespace_1_name - - $app_namespace_2_name$3${4:+ - action: $4}"\ - cluster_names=$s3_store_cluster_names\ - $ramen_hack_directory_path_name/minikube-ramen.sh application_sample_vrg_deploy$2 "$1" "$vrg_namespace_name" "$app_label_yaml" -}; exit_stack_push unset -f vrg_apply - -vrg_deploy() { - vrg_apply "$1" "$2" "$3" $4 - vrg_list "$1" -}; exit_stack_push unset -f vrg_deploy - -vrg_deploy_failover() { - vrg_deploy "$1" "$2" "$3" Failover -}; exit_stack_push unset -f vrg_deploy_failover - -vrg_deploy_relocate() { - vrg_deploy "$1" "$2" "$3" Relocate -}; exit_stack_push unset -f vrg_deploy_relocate - -vrg_undeploy() { - cluster_names=$s3_store_cluster_names $ramen_hack_directory_path_name/minikube-ramen.sh application_sample_vrg_undeploy "$1" "$vrg_namespace_name" -}; exit_stack_push unset -f vrg_undeploy - -vrg_demote() { - vrg_deploy_$2 "$1" _sec -# time kubectl --context "$1" --namespace "$vrg_namespace_name" wait vrg/bb --for condition=clusterdataprotected=false -}; exit_stack_push unset -f vrg_demote - -vrg_final_sync() { - vrg_apply $1 '' ' - prepareForFinalSync: true' - time kubectl --context "$1" --namespace "$vrg_namespace_name" wait vrg/bb --for jsonpath='{.status.prepareForFinalSyncComplete}'=true - vrg_apply $1 '' ' - runFinalSync: true' - time kubectl --context "$1" --namespace "$vrg_namespace_name" wait vrg/bb --for jsonpath='{.status.finalSyncComplete}'=true -}; exit_stack_push unset -f vrg_final_sync - -vrg_fence() { - vrg_demote "$1" failover -}; exit_stack_push unset -f vrg_fence - -vrg_finalizer0_remove() { - true_if_exit_status_and_stderr 1 'Error from server (NotFound): volumereplicationgroups.ramendr.openshift.io "bb" not found' \ - kubectl --context "$1" --namespace "$vrg_namespace_name" patch vrg/bb --type json -p '[{"op":remove, "path":/metadata/finalizers/0}]' -}; exit_stack_push unset -f vrg_finalizer0_remove - -vr_finalizer0_remove() { - true_if_exit_status_and_stderr 1 'Error from server (NotFound): volumereplications.replication.storage.openshift.io "busybox-pvc" not found' \ - kubectl --context "$1" --namespace "$vrg_namespace_name" patch volumereplication/busybox-pvc --type json -p '[{"op":remove, "path":/metadata/finalizers/0}]' -}; exit_stack_push unset -f vr_finalizer0_remove - -vrg_get() { - kubectl --context "$1" --namespace "$vrg_namespace_name" get vrg/bb --ignore-not-found -oyaml -}; exit_stack_push unset -f vrg_get - -vrg_spec_get() { - kubectl --context "$1" --namespace "$vrg_namespace_name" get vrg/bb -ojsonpath='{.spec}'|jq -}; exit_stack_push unset -f vrg_spec_get - -vrg_conditions_get() { - kubectl --context "$1" --namespace "$vrg_namespace_name" get vrg/bb -ojsonpath='{.status.conditions}'|jq -}; exit_stack_push unset -f vrg_conditions_get - -vrg_list() { - set -x - kubectl --context "$1" --namespace "$vrg_namespace_name" get vrg/bb --ignore-not-found - { set +x;} 2>/dev/null -}; exit_stack_push unset -f vrg_list - -vrg_get_s3() { - mc cp -q $(app_s3_object_name_prefix $1)v1alpha1.VolumeReplicationGroup/a /tmp/a.json.gz;gzip -df /tmp/a.json.gz;json_to_yaml /dev/null - vr_label -}; exit_stack_push unset -f vrg_primary_status_wait - -vr_label() { - for namespace_name in $app_namespace_names; do - set -x - kubectl --context "$1" --namespace "$namespace_name" label vr/busybox-pvc "$app_label" --overwrite - { set +x;} 2>/dev/null - done; unset -v namespace_name -}; exit_stack_push unset -f vr_label - -vr_get() { - kubectl --context "$1" --selector "$app_label" get volumereplication/busybox-pvc --ignore-not-found -oyaml -}; exit_stack_push unset -f vr_get - -vr_list() { - kubectl --context "$1" --selector "$app_label" get volumereplication/busybox-pvc --ignore-not-found -}; exit_stack_push unset -f vr_list - -vr_delete() { - kubectl --context "$1" --selector "$app_label" delete volumereplication/busybox-pvc --ignore-not-found -}; exit_stack_push unset -f vr_delete - -pvc_get() { - kubectl --context "$1" --selector "$app_label" get pvc/busybox-pvc --ignore-not-found -oyaml -}; exit_stack_push unset -f pvc_get - -pv_names_claimed_by_namespace() { - kubectl --context "$1" get pv -ojsonpath='{range .items[?(@.spec.claimRef.namespace=="'$2'")]} pv/{.metadata.name}{end}' -}; exit_stack_push unset -f pv_names_claimed_by_namespace - -pv_names() { - kubectl --context "$1" get pv -ojsonpath='{range .items[?(@.spec.claimRef.name=="busybox-pvc")]} pv/{.metadata.name}{end}' -}; exit_stack_push unset -f pv_names - -pv_list() { - kubectl --context "$1" get $(pv_names $1) --show-kind -}; exit_stack_push unset -f pv_list - -pv_get() { - kubectl --context "$1" get $(pv_names $1) -oyaml -}; exit_stack_push unset -f pv_get - -pv_delete() { - kubectl --context "$1" delete $(pv_names $1) -}; exit_stack_push unset -f pv_delete - -pv_unretain() { - kubectl --context "$1" patch $(pv_names $1) --type json -p '[{"op":add, "path":/spec/persistentVolumeReclaimPolicy, "value":Delete}]' -}; exit_stack_push unset -f pv_unretain - -app_protect() { - set -- cluster1 - vrg_deploy $1 - vrg_primary_status_wait $1 -# app_protection_info 1 -}; exit_stack_push unset -f app_protect - -app_unprotect() { - vrg_undeploy $1 - kubectl --context "$1" --namespace "$vrg_namespace_name" delete events --all - velero_kube_objects_list $1 - s3_objects_list -}; exit_stack_push unset -f app_unprotect - -app_failover() { - set -- cluster1 cluster2 - vrg_fence $1 - app_recover $2 failover -}; exit_stack_push unset -f app_failover - -app_failback() { - set -- cluster1 cluster2 - app_undeploy_failback $1 failover -# vrg_final_sync $2 - app_undeploy_failback $2 relocate app_recover_failback\ $1\ $2 -}; exit_stack_push unset -f app_failback - -app_recover() { - app_namespaces_deploy $1 - vrg_deploy_$2 $1 - vrg_primary_status_wait $1 - app_list $1 -}; exit_stack_push unset -f app_recover - -app_undeploy_failback() { - vrg_demote $1 $2 - # "PVC not being deleted. Not ready to become Secondary" - set -x - time kubectl --context "$1" --namespace "$vrg_namespace_name" wait vrg/bb --timeout -1s --for condition=clusterdataprotected - { set +x; } 2>/dev/null - time app_less_namespaces_undeploy $1& # pvc finalizer remains until vrg deletes its vr - set -x - time kubectl --context "$1" --namespace "$vrg_namespace_name" wait vrg/bb --timeout -1s --for jsonpath='{.status.state}'=Secondary - { set +x; } 2>/dev/null - $3 - vrg_undeploy $1& - time wait - time app_namespaces_undeploy $1 -}; exit_stack_push unset -f app_undeploy_failback - -app_recover_failback() { - # "VolumeReplication resource for the pvc as Secondary is in sync with Primary" - set -x - time kubectl --context "$2" --namespace "$vrg_namespace_name" wait vrg/bb --timeout -1s --for condition=dataprotected - { set +x; } 2>/dev/null - app_recover "$1" relocate -}; exit_stack_push unset -f app_recover_failback - -app_velero_kube_object_name=$vrg_namespace_name--bb-- -exit_stack_push unset -v app_velero_kube_object_name - -s3_objects_list() { - for cluster_name in $s3_store_cluster_names; do - mc tree $cluster_name - mc ls $cluster_name --recursive - done; unset -v cluster_name -}; exit_stack_push unset -f s3_objects_list - -s3_objects_delete() { - for cluster_name in $s3_store_cluster_names; do - mc rm $cluster_name/bucket/ --recursive --force\ - ||true # https://github.com/minio/mc/issues/3868 - done; unset -v cluster_name -}; exit_stack_push unset -f s3_objects_list - -app_s3_object_name_prefix() { - echo $1/bucket/$vrg_namespace_name/bb/ -}; exit_stack_push unset -f app_s3_object_name_prefix - -app_s3_object_name_prefix_velero() { - echo $(app_s3_object_name_prefix $2)kube-objects/$1/velero/ -}; exit_stack_push unset -f app_s3_object_name_prefix_velero - -app_s3_objects_delete() { - for cluster_name in $s3_store_cluster_names; do - mc rm $(app_s3_object_name_prefix $cluster_name) --recursive --force\ - ||true # https://github.com/minio/mc/issues/3868 - done; unset -v cluster_name -}; exit_stack_push unset -f app_objects_delete - -app_protection_info() { - for cluster_name in $s3_store_cluster_names; do - set -- "$1" $(app_s3_object_name_prefix_velero "$1" $cluster_name) $app_velero_kube_object_name$1----minio-on-$cluster_name - velero_backup_log $2 $3 - velero_backup_backup_object $2 $3 - velero_backup_resource_list $2 $3 - done; unset -v cluster_name -}; exit_stack_push unset -f app_protection_info - -app_recovery_info() { - for cluster_name in $s3_store_cluster_names; do - set -- "$1" "$2" $(app_s3_object_name_prefix_velero "$1" $cluster_name) $app_velero_kube_object_name$2 - velero_restore_log $3 $4 - velero_restore_results $3 $4 - done; unset -v cluster_name -}; exit_stack_push unset -f app_recovery_info - -velero_backup_backup_object() { - mc cp -q $1backups/$2/velero-backup.json /tmp/$2-velero-backup.json;json_to_yaml &2 & - tee_pid=$! - case $- in *e*) e=-e; set +e;; *) e= ; esac - "$@" 2>$stderr_pipe_name1 - set "$e" -- $? - unset -v e - stderr=$(cat $stderr_pipe_name2) - wait $tee_pid - unset -v tee_pid - rm -f $stderr_pipe_name1 $stderr_pipe_name2 - unset -v stderr_pipe_name1 stderr_pipe_name2 - if test "$1" -eq "$exit_status_expected" && test "$stderr" = "$stderr_expected"; then - set -- 0 - fi - unset -v stderr stderr_expected exit_status_expected - return "$1" -} diff --git a/hack/uidmap-install.sh b/hack/uidmap-install.sh deleted file mode 100755 index ed744587e..000000000 --- a/hack/uidmap-install.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=1091 -if ! command -v newuidmap -then - . /etc/os-release - case ${NAME} in - "Ubuntu") - set +e - sudo apt-get install -y uidmap - # dpkg: error processing package lvm2 (--configure): - # installed lvm2 package post-installation script subprocess returned error exit status 1 - set -e - ;; - esac -fi diff --git a/hack/until_true_or_n.sh b/hack/until_true_or_n.sh deleted file mode 100644 index 97dc83dba..000000000 --- a/hack/until_true_or_n.sh +++ /dev/null @@ -1,20 +0,0 @@ -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck shell=sh disable=2086 -until_true_or_n() -{ - { case ${-} in *x*) set +x; x='unset -v x; set -x';; esac; } 2>/dev/null - n=${1} - shift - date - until test ${n} -eq 0 || "${@}" - do - sleep 1 - n=$((n-1)) - done - date - unset -v n - eval ${x} - "${@}" -} diff --git a/hack/velero-install.sh b/hack/velero-install.sh deleted file mode 100755 index 033210063..000000000 --- a/hack/velero-install.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2046,2086 -set -ex -set -- ${1:?} $(dirname $0) -set -- $1 $2 $(cd $2/..;go list -m github.com/vmware-tanzu/velero|cut -d\ -f2 -|cut -c2- -|tr . \ ) -if command -v $1/velero; then - IFS=. read -r x y z <<-a - $(velero version --client-only|cut -zf2|cut -dv -f2) - a - if test $x -gt $3 ||\ - { test $x -eq $3 &&\ - { test $y -gt $4 ||\ - { test $y -eq $4 &&\ - test $z -ge $5;};};} - then - : exit - fi -fi -set -- $1 $2 $3.$4 v$3.$4.$5 -set -- $1 $2 $3 $4 velero-$4-linux-amd64 -curl --silent --show-error --location https://github.com/vmware-tanzu/velero/releases/download/$4/$5.tar.gz|tar -xzC$1 --strip-components 1 $5/velero -#wget --quiet --directory-prefix $2/test\ -curl --silent --show-error --location --remote-name-all --remote-time --output-dir $2/test\ - https://raw.githubusercontent.com/vmware-tanzu/velero/release-$3/config/crd/v1/bases/velero.io_backups.yaml\ - https://raw.githubusercontent.com/vmware-tanzu/velero/release-$3/config/crd/v1/bases/velero.io_backupstoragelocations.yaml\ - https://raw.githubusercontent.com/vmware-tanzu/velero/release-$3/config/crd/v1/bases/velero.io_restores.yaml\ - diff --git a/hack/velero-test.sh b/hack/velero-test.sh deleted file mode 100755 index a8ea3b73a..000000000 --- a/hack/velero-test.sh +++ /dev/null @@ -1,315 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=1090,1091,2046,2086 -set -e -ramen_hack_directory_path_name=$(dirname $0) -. $ramen_hack_directory_path_name/exit_stack.sh -exit_stack_push unset -v ramen_hack_directory_path_name -. $ramen_hack_directory_path_name/minikube.sh -exit_stack_push minikube_unset -velero_directory_path_name=~/.local/bin - -. $ramen_hack_directory_path_name/until_true_or_n.sh - -velero_crds_kubectl() -{ - kubectl --context $1 $2\ - -f https://raw.githubusercontent.com/vmware-tanzu/velero/main/config/crd/v1/bases/velero.io_backupstoragelocations.yaml\ - -f https://raw.githubusercontent.com/vmware-tanzu/velero/main/config/crd/v1/bases/velero.io_backups.yaml\ - -f https://raw.githubusercontent.com/vmware-tanzu/velero/main/config/crd/v1/bases/velero.io_restores.yaml\ - -} -velero_deploy() -{ - set -- $1 apply - velero_crds_kubectl $1 $2 - # https://github.com/vmware-tanzu/velero/blob/main/pkg/install/resources.go - # https://github.com/vmware-tanzu/velero/blob/main/pkg/install/deployment.go - cat <<-a|kubectl --context $1 $2 -f - - --- - apiVersion: v1 - kind: Namespace - metadata: - name: velero - --- - apiVersion: rbac.authorization.k8s.io/v1 - kind: ClusterRoleBinding - metadata: - name: velero - subjects: - - kind: ServiceAccount - namespace: velero - name: velero - roleRef: - - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: cluster-admin - a -} -velero_undeploy() -{ - set -- $1 delete - velero_crds_kubectl $1 $2 -} -velero_deploy() -{ - set -- $1 $velero_directory_path_name - $ramen_hack_directory_path_name/velero-install.sh $2 - $2/velero --kubecontext $1 install\ - --no-secret\ - --no-default-backup-location\ - --use-volume-snapshots=false\ - --plugins velero/velero-plugin-for-aws:v1.4.0\ - -} -velero_undeploy() -{ - date - kubectl --context $1 delete namespace/velero clusterrolebinding/velero - date - kubectl --context $1 delete crds -l component=velero -} -velero_undeploy() -{ - velero --kubecontext $1 uninstall --force -} -minio_deploy() -{ - $ramen_hack_directory_path_name/ocm-minikube-ramen.sh rook_ceph_deploy_spoke\ $1 minio_deploy\ $1 -} -velero_backup() -{ - # TODO get s3 configuration from ramen config map and secret - cat <<-a|kubectl --context $1 -n velero apply -f - - --- - apiVersion: v1 - kind: Secret - metadata: - name: s3secret - stringData: - aws: | - [default] - aws_access_key_id=$3 - aws_secret_access_key=$4 - --- - apiVersion: velero.io/v1 - kind: BackupStorageLocation - metadata: - name: l - spec: - provider: aws - objectStorage: - bucket: $5 - config: - region: us-east-1 - s3ForcePathStyle: "true" - s3Url: $2 - credential: - name: s3secret - key: aws - --- - apiVersion: velero.io/v1 - kind: Backup - metadata: - name: b - spec: - storageLocation: l$7 - a - #until_true_or_n 30 eval test \"\$\(kubectl --context $1 -n velero get backups/b -ojsonpath='{.status.phase}'\)\" = Completed - minio_bucket_list $6 $2 $3 $4 $5 -} -velero_backup_namespace() -{ - velero_backup $1 $3 $4 $5 $6 $7 " - includedNamespaces: - - $2 - includedResources: - - po - labelSelector: - matchExpressions: - - key: pod-template-hash - operator: Exists" -} -velero_backup_dummy() -{ - velero_backup $1 $2 $3 $4 $5 $6 " - includedNamespaces: - - velero - includedResources: - - secrets - labelSelector: - matchLabels: - dummyKey: dummyValue" -} -velero_restore_backup() -{ - velero_backup_dummy $1 $2 $3 $4 $5 $5 $7 - velero_restore_create $1 b b -} -velero_restore_create() -{ - cat <<-a|kubectl --context $1 -n velero apply -f - - --- - apiVersion: velero.io/v1 - kind: Restore - metadata: - name: $2 - spec: - backupName: $3 - a -# until_true_or_n 30 eval test \"\$\(kubectl --context $1 -n velero get restores/$2 -ojsonpath='{.status.phase}'\)\" = Completed -} -velero_backup_delete() -{ - cat <<-a|kubectl --context $1 -n velero apply -f - - --- - apiVersion: velero.io/v1 - kind: DeleteBackupRequest - metadata: - name: $2 - spec: - backupName: $3 - a -# until_true_or_n 30 eval test \"\$\(kubectl --context $1 -n velero get restores/$2 -ojsonpath='{.status.phase}'\)\" = Completed -} -minio_bucket_list() -{ - # TODO install mc - mc alias set $1 $2 $3 $4 - mc ls $1/$5/backups/b/ - mc tree $1/$5 -} -s3_username=minio -s3_password=minio123 -s3_bucket_name=bucket -velero_backup_test() -{ - objects_deploy $1 $3 -# velero_deploy $1 -# minio_deploy $2 - velero_backup_namespace $1 $3 $(minikube_minio_url $2) $s3_username $s3_password $s3_bucket_name $2 -# velero_undeploy $1 -} -velero_restore_test() -{ -# velero_deploy $1 - velero_restore_backup $1 $(minikube_minio_url $1) $s3_username $s3_password $s3_bucket_name $1 -# velero_undeploy $1 -} -velero_test() -{ - set -- cluster2 cluster2 default - set -- cluster1 cluster2 default - velero_backup_test $1 $2 $3 - velero_restore_test $2 -} -velero_objects_get() -{ - velero --kubecontext $1 get backup-locations - velero --kubecontext $1 get backups - velero --kubecontext $1 get restores -} -velero_objects_delete() -{ - velero --kubecontext $1 delete --all --confirm restores - velero --kubecontext $1 delete --all --confirm backups - velero --kubecontext $1 delete --all --confirm backup-locations -} -namespace_deploy() -{ - kubectl create --dry-run=client -oyaml namespace $2|kubectl --context $1 apply -f- -} -namespace_objects_get() -{ - kubectl --context $1 -n$2 get vrg,configmaps,secrets,deployments,replicasets,pods,pvc,pv -} -get() -{ - velero_objects_get $1 - namespace_objects_get $1 $2 -} -objects_kubectl() -{ -# $(kubectl create --dry-run=client -oyaml -n$2 configmap asdf) -# --- -# $(kubectl create --dry-run=client -oyaml -n$2 secret generic asdf --from-literal=asdf1=asdf2) -# --- - cat <<-a|kubectl --context $1 $3 -f- - --- - $(kubectl create --dry-run=client -oyaml namespace $2) - --- - $(kubectl create --dry-run=client -oyaml -n$2 deploy asdf --image busybox -- sh -c while\ true\;do\ date\;sleep\ 60\;done) - --- - a -} -objects_deploy() -{ - objects_kubectl $1 $2 apply -} -objects_undeploy() -{ - objects_kubectl $1 $2 delete\ --ignore-not-found -} -vrg_kubectl() -{ - cat <<-a|kubectl --context $1 $3 -f- - --- - apiVersion: ramendr.openshift.io/v1alpha1 - kind: VolumeReplicationGroup - metadata: - name: bb - namespace: $2 - spec: - async: - replicationClassSelector: {} - schedulingInterval: 1m - pvcSelector: - matchLabels: - appname: busybox - replicationState: primary - s3Profiles: - # - minio-on-cluster1 - - minio-on-hub - a -} -vrg_deploy() -{ - vrg_kubectl $1 $2 apply -} -vrg_undeploy() -{ - vrg_kubectl $1 $2 delete -} -failover1() -{ - set -- $cluster_names $namespace_name - undeploy $2 $3 - vrg_deploy $1 $3 -} -failover2() -{ - set -- $cluster_names $namespace_name - undeploy $1 $3 - vrg_deploy $2 $3 -} -od1() { set -- $cluster_names; objects_deploy $1 $namespace_name; } -od2() { set -- $cluster_names; objects_deploy $2 $namespace_name; } -ou1() { set -- $cluster_names; objects_undeploy $1 $namespace_name; } -ou2() { set -- $cluster_names; objects_undeploy $2 $namespace_name; } -d1() { set -- $cluster_names; vrg_deploy $1 $namespace_name; } -d2() { set -- $cluster_names; vrg_deploy $2 $namespace_name; } -u1() { set -- $cluster_names; vrg_undeploy $1 $namespace_name; } -u2() { set -- $cluster_names; vrg_undeploy $2 $namespace_name; } -g1() { set -- $cluster_names; namespace_objects_get $1 $namespace_name; } -g2() { set -- $cluster_names; namespace_objects_get $2 $namespace_name; } -vg1() { set -- $cluster_names; velero_objects_get $1; } -vg2() { set -- $cluster_names; velero_objects_get $2; } -vu1() { set -- $cluster_names; velero_objects_delete $1; } -vu2() { set -- $cluster_names; velero_objects_delete $2; } -cluster_names=cluster1\ hub -namespace_name=asdf -set -x -"${@:-velero_test}" diff --git a/hack/velero-uninstall.sh b/hack/velero-uninstall.sh deleted file mode 100755 index 3dcbd21a3..000000000 --- a/hack/velero-uninstall.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -# SPDX-FileCopyrightText: The RamenDR authors -# SPDX-License-Identifier: Apache-2.0 - -# shellcheck disable=2086 -set -x -set -e -rm -f $1/velero