From f853f0b8ba6cfc8779f0249ab7614490deb98e06 Mon Sep 17 00:00:00 2001 From: "Joe M." Date: Wed, 18 Dec 2024 09:24:28 -0800 Subject: [PATCH] VEC-454 TLS small refactor, remove TLS footgun, multi-roll handling (#83) * feat: enhance GKE setup script with configurable node types and counts for AVS and Aerospike nodes --- kubernetes/.gitignore | 2 - kubernetes/full-create-and-install-gke.sh | 160 ++++-- kubernetes/logs/avs-insecure | 379 ------------- kubernetes/logs/avs-real-insecure | 276 --------- kubernetes/logs/avs-secure | 69 --- kubernetes/logs/eks-avs-secure | 654 ---------------------- 6 files changed, 100 insertions(+), 1440 deletions(-) delete mode 100644 kubernetes/.gitignore delete mode 100644 kubernetes/logs/avs-insecure delete mode 100644 kubernetes/logs/avs-real-insecure delete mode 100644 kubernetes/logs/avs-secure delete mode 100644 kubernetes/logs/eks-avs-secure diff --git a/kubernetes/.gitignore b/kubernetes/.gitignore deleted file mode 100644 index 7fda01b..0000000 --- a/kubernetes/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -features.conf -temp-helm \ No newline at end of file diff --git a/kubernetes/full-create-and-install-gke.sh b/kubernetes/full-create-and-install-gke.sh index ce4b5e7..edeae5c 100755 --- a/kubernetes/full-create-and-install-gke.sh +++ b/kubernetes/full-create-and-install-gke.sh @@ -12,9 +12,15 @@ WORKSPACE="$(pwd)" PROJECT_ID="$(gcloud config get-value project)" # Prepend the current username to the cluster name USERNAME=$(whoami) -CHART_VERSION="0.7.0" +CHART_VERSION="0.6.0" +REVERSE_DNS_AVS="" # Default values DEFAULT_CLUSTER_NAME_SUFFIX="avs" +DEFAULT_MACHINE_TYPE="n2d-standard-4" +DEFAULT_NUM_AVS_NODES=3 +DEFAULT_NUM_QUERY_NODES=0 +DEFAULT_NUM_INDEX_NODES=0 +DEFAULT_NUM_AEROSPIKE_NODES=3 # Function to display the script usage usage() { @@ -22,22 +28,47 @@ usage() { echo "Options:" echo " --chart-location, -l If specified expects a local directory for AVS Helm chart (default: official repo)" echo " --cluster-name, -c Override the default cluster name (default: ${USERNAME}-${PROJECT_ID}-${DEFAULT_CLUSTER_NAME_SUFFIX})" - echo " --run-insecure, -i Run setup cluster without auth or tls. No argument required." + echo " --machine-type, -m Specify the machine type (default: ${DEFAULT_MACHINE_TYPE})" + echo " --num-avs-nodes, -a Specify the number of AVS nodes (default: ${DEFAULT_NUM_AVS_NODES})" + echo " --num-query-nodes, -q Specify the number of AVS query nodes (default: ${DEFAULT_NUM_QUERY_NODES})" + echo " --num-index-nodes, -i Specify the number of AVS index nodes (default: ${DEFAULT_NUM_INDEX_NODES})" + echo " --num-aerospike-nodes, -s Specify the number of Aerospike nodes (default: ${DEFAULT_NUM_AEROSPIKE_NODES})" + echo " --run-insecure, -I Run setup cluster without auth or tls. No argument required." echo " --help, -h Show this help message" exit 1 } # Parse command line arguments -while [[ "$#" -gt 0 ]]; do +while [[ "$#" -gt 0 ]]; +do case $1 in --chart-location|-l) CHART_LOCATION="$2"; shift 2 ;; --cluster-name|-c) CLUSTER_NAME_OVERRIDE="$2"; shift 2 ;; - --run-insecure|-i) RUN_INSECURE=1; shift ;; # just flag no argument - --help|-h) usage ;; # Display the help/usage if --help or -h is passed - *) echo "Unknown parameter passed: $1"; usage ;; # Unknown parameter triggers usage + --machine-type|-m) MACHINE_TYPE="$2"; shift 2 ;; + --num-avs-nodes|-a) NUM_AVS_NODES="$2"; shift 2 ;; + --num-query-nodes|-q) NUM_QUERY_NODES="$2"; NODE_TYPES=1; shift 2 ;; + --num-index-nodes|-i) NUM_INDEX_NODES="$2"; NODE_TYPES=1; shift 2 ;; + --num-aerospike-nodes|-s) NUM_AEROSPIKE_NODES="$2"; shift 2 ;; + --run-insecure|-I) RUN_INSECURE=1; shift ;; + --help|-h) usage ;; + *) echo "Unknown parameter passed: $1"; + usage ;; esac done +if [ -n "$NODE_TYPES" ] +then + if ((RUN_INSECURE != 1 && NODE_TYPES == 1)); then + echo "Error: This script has a limitation that it cannot currently use both node types and secure mode. For secure deployments please do not set num-query-nodes nor num-index-nodes." + exit 1 + fi + + echo "setting number of nodes equal to query + index nodes" + NUM_AVS_NODES=$((NUM_QUERY_NODES + NUM_INDEX_NODES)) +fi + + + # Function to print environment variables for verification print_env() { echo "Environment Variables:" @@ -49,12 +80,15 @@ print_env() { echo "export FEATURES_CONF=$FEATURES_CONF" echo "export CHART_LOCATION=$CHART_LOCATION" echo "export RUN_INSECURE=$RUN_INSECURE" + echo "export MACHINE_TYPE=$MACHINE_TYPE" + echo "export NUM_AVS_NODES=$NUM_AVS_NODES" + echo "export NUM_QUERY_NODES=$NUM_QUERY_NODES" + echo "export NUM_INDEX_NODES=$NUM_INDEX_NODES" + echo "export NUM_AEROSPIKE_NODES=$NUM_AEROSPIKE_NODES" } - # Function to set environment variables set_env_variables() { - # Use provided cluster name or fallback to the default if [ -n "$CLUSTER_NAME_OVERRIDE" ]; then export CLUSTER_NAME="${USERNAME}-${CLUSTER_NAME_OVERRIDE}" @@ -67,7 +101,12 @@ set_env_variables() { export ZONE="us-central1-c" export FEATURES_CONF="$WORKSPACE/features.conf" export BUILD_DIR="$WORKSPACE/generated" - export REVERSE_DNS_AVS + export REVERSE_DNS_AVS="does.not.exist" + export MACHINE_TYPE="${MACHINE_TYPE:-${DEFAULT_MACHINE_TYPE}}" + export NUM_AVS_NODES="${NUM_AVS_NODES:-${DEFAULT_NUM_AVS_NODES}}" + export NUM_QUERY_NODES="${NUM_QUERY_NODES:-${DEFAULT_NUM_QUERY_NODES}}" + export NUM_INDEX_NODES="${NUM_INDEX_NODES:-${DEFAULT_NUM_INDEX_NODES}}" + export NUM_AEROSPIKE_NODES="${NUM_AEROSPIKE_NODES:-${DEFAULT_NUM_AEROSPIKE_NODES}}" } reset_build() { @@ -76,13 +115,14 @@ reset_build() { mv -f "$BUILD_DIR" "$temp_dir" fi mkdir -p "$BUILD_DIR/input" "$BUILD_DIR/output" "$BUILD_DIR/secrets" "$BUILD_DIR/certs" "$BUILD_DIR/manifests" - cp "$FEATURES_CONF" "$BUILD_DIR/secrets/features.conf" - cp "$WORKSPACE/manifests/avs-values.yaml" "$BUILD_DIR/manifests/avs-values.yaml" + cp "$FEATURES_CONF" "$BUILD_DIR/secrets/features.conf" + cp "$WORKSPACE/manifests/avs-values.yaml" "$BUILD_DIR/manifests/avs-values.yaml" cp "$WORKSPACE/manifests/aerospike-cr.yaml" "$BUILD_DIR/manifests/aerospike-cr.yaml" -# override aerospike-cr.yaml with secure version if run insecure not specified + # override aerospike-cr.yaml with secure version if run insecure not specified if [[ "${RUN_INSECURE}" != 1 ]]; then cp $WORKSPACE/manifests/aerospike-cr-auth.yaml $BUILD_DIR/manifests/aerospike-cr.yaml + cp $WORKSPACE/manifests/avs-values-auth.yaml $BUILD_DIR/manifests/avs-values.yaml fi } @@ -283,66 +323,56 @@ create_gke_cluster() { if ! gcloud container clusters describe "$CLUSTER_NAME" --zone "$ZONE" &> /dev/null; then echo "Cluster $CLUSTER_NAME does not exist. Creating..." else - echo "Cluster $CLUSTER_NAME already exists. Skipping creation." + # currently erroring early if cluster already exists. If you would like to recreate remove this block + echo "Error: Cluster $CLUSTER_NAME already exists. Please use a new cluster name or delete the existing cluster." return fi echo "$(date '+%Y-%m-%d %H:%M:%S') - Starting GKE cluster creation..." - if ! gcloud container clusters create "$CLUSTER_NAME" \ + gcloud container clusters create "$CLUSTER_NAME" \ --project "$PROJECT_ID" \ --zone "$ZONE" \ --num-nodes 1 \ --disk-type "pd-standard" \ - --disk-size "100"; then - echo "Failed to create GKE cluster" - exit 1 - else - echo "GKE cluster created successfully." - fi + --disk-size "100"; echo "Creating Aerospike node pool..." - if ! gcloud container node-pools create "$NODE_POOL_NAME_AEROSPIKE" \ + gcloud container node-pools create "$NODE_POOL_NAME_AEROSPIKE" \ --cluster "$CLUSTER_NAME" \ --project "$PROJECT_ID" \ --zone "$ZONE" \ - --num-nodes 3 \ + --num-nodes "$NUM_AEROSPIKE_NODES" \ --local-ssd-count 2 \ --disk-type "pd-standard" \ --disk-size "100" \ - --machine-type "n2d-standard-32"; then - echo "Failed to create Aerospike node pool" - exit 1 - else - echo "Aerospike node pool added successfully." - fi + --machine-type "$MACHINE_TYPE"; echo "Labeling Aerospike nodes..." kubectl get nodes -l cloud.google.com/gke-nodepool="$NODE_POOL_NAME_AEROSPIKE" -o name | \ xargs -I {} kubectl label {} aerospike.com/node-pool=default-rack --overwrite echo "Adding AVS node pool..." - if ! gcloud container node-pools create "$NODE_POOL_NAME_AVS" \ + gcloud container node-pools create "$NODE_POOL_NAME_AVS" \ --cluster "$CLUSTER_NAME" \ --project "$PROJECT_ID" \ --zone "$ZONE" \ - --num-nodes 3 \ + --num-nodes "$NUM_AVS_NODES" \ --disk-type "pd-standard" \ --disk-size "100" \ - --machine-type "n2d-standard-32"; then - echo "Failed to create AVS node pool" - exit 1 - else - echo "AVS node pool added successfully." - fi + --machine-type "$MACHINE_TYPE"; echo "Labeling AVS nodes..." kubectl get nodes -l cloud.google.com/gke-nodepool="$NODE_POOL_NAME_AVS" -o name | \ xargs -I {} kubectl label {} aerospike.com/node-pool=avs --overwrite - echo "Setting up namespaces..." + kubectl create namespace aerospike || true + kubectl create namespace avs || true + } setup_aerospike() { + echo "Setting up namespaces..." + kubectl create namespace aerospike || true # Idempotent namespace creation echo "Deploying Aerospike Kubernetes Operator (AKO)..." @@ -396,7 +426,7 @@ setup_aerospike() { # Function to setup AVS node pool and namespace setup_avs() { - kubectl create namespace avs + kubectl create namespace avs || true echo "Setting secrets for AVS cluster..." kubectl --namespace avs create secret generic auth-secret --from-literal=password='admin123' @@ -431,27 +461,39 @@ get_reverse_dns() { echo "Reverse DNS: $REVERSE_DNS_AVS" } -# Function to deploy AVS Helm chart deploy_avs_helm_chart() { echo "Deploying AVS Helm chart..." helm repo add aerospike-helm https://artifact.aerospike.io/artifactory/api/helm/aerospike-helm helm repo update -# Installs AVS query nodes - helm install avs-app aerospike-helm/aerospike-vector-search\ - --set replicaCount=2 \ - --set aerospikeVectorSearchConfig.cluster.node-roles[0]=query \ - --values $BUILD_DIR/manifests/avs-values.yaml \ - --namespace avs\ - --version $CHART_VERSION\ - --atomic --wait -# Install AVS index-update node - helm install avs-app-update aerospike-helm/aerospike-vector-search\ - --set replicaCount=1 \ - --set aerospikeVectorSearchConfig.cluster.node-roles[0]=index-update \ - --values $BUILD_DIR/manifests/avs-values.yaml \ - --namespace avs\ - --version $CHART_VERSION\ - --atomic --wait + + # Installs AVS query nodes + if [ -n "$NODE_TYPES" ]; then + if (( NUM_QUERY_NODES > 0 )); then + helm install avs-app aerospike-helm/aerospike-vector-search \ + --set replicaCount="$NUM_QUERY_NODES" \ + --set aerospikeVectorSearchConfig.cluster.node-roles[0]=query \ + --values $BUILD_DIR/manifests/avs-values.yaml \ + --namespace avs \ + --version $CHART_VERSION \ + --atomic --wait + fi + if (( NUM_INDEX_NODES > 0 )); then + helm install avs-app-update aerospike-helm/aerospike-vector-search \ + --set replicaCount="$NUM_INDEX_NODES" \ + --set aerospikeVectorSearchConfig.cluster.node-roles[0]=index-update \ + --values $BUILD_DIR/manifests/avs-values.yaml \ + --namespace avs \ + --version $CHART_VERSION \ + --atomic --wait + fi + else + helm install avs-app aerospike-helm/aerospike-vector-search \ + --set replicaCount="$NUM_AVS_NODES" \ + --values $BUILD_DIR/manifests/avs-values.yaml \ + --namespace avs \ + --version $CHART_VERSION \ + --atomic --wait + fi } # Function to setup monitoring @@ -484,25 +526,23 @@ print_final_instructions() { echo "Setup Complete!" } - - #This script runs in this order. main() { set_env_variables print_env reset_build create_gke_cluster - setup_aerospike deploy_istio get_reverse_dns if [[ "${RUN_INSECURE}" != 1 ]]; then generate_certs fi + setup_aerospike setup_avs deploy_avs_helm_chart setup_monitoring print_final_instructions } -# Run the main function -main + +main \ No newline at end of file diff --git a/kubernetes/logs/avs-insecure b/kubernetes/logs/avs-insecure deleted file mode 100644 index e2b266f..0000000 --- a/kubernetes/logs/avs-insecure +++ /dev/null @@ -1,379 +0,0 @@ -+ trap 'echo "Error: $? at line $LINENO" >&2' ERR -++ pwd -+ WORKSPACE=/home/joem/src/aerospike-vector/kubernetes -++ gcloud config get-value project -+ PROJECT_ID=performance-eco -++ whoami -+ USERNAME=joem -+ CHART_VERSION=0.7.0 -+ DEFAULT_CLUSTER_NAME_SUFFIX=avs -+ [[ 3 -gt 0 ]] -+ case $1 in -+ RUN_INSECURE=1 -+ shift -+ [[ 2 -gt 0 ]] -+ case $1 in -+ CLUSTER_NAME_OVERRIDE=avs-insecure2 -+ shift 2 -+ [[ 0 -gt 0 ]] -+ main -+ set_env_variables -+ '[' -n avs-insecure2 ']' -+ export CLUSTER_NAME=joem-avs-insecure2 -+ CLUSTER_NAME=joem-avs-insecure2 -+ export NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ export NODE_POOL_NAME_AVS=avs-pool -+ NODE_POOL_NAME_AVS=avs-pool -+ export ZONE=us-central1-c -+ ZONE=us-central1-c -+ export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ export BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated -+ BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated -+ export REVERSE_DNS_AVS -+ print_env -+ echo 'Environment Variables:' -Environment Variables: -+ echo 'export PROJECT_ID=performance-eco' -export PROJECT_ID=performance-eco -+ echo 'export CLUSTER_NAME=joem-avs-insecure2' -export CLUSTER_NAME=joem-avs-insecure2 -+ echo 'export NODE_POOL_NAME_AEROSPIKE=aerospike-pool' -export NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ echo 'export NODE_POOL_NAME_AVS=avs-pool' -export NODE_POOL_NAME_AVS=avs-pool -+ echo 'export ZONE=us-central1-c' -export ZONE=us-central1-c -+ echo 'export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf' -export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ echo 'export CHART_LOCATION=' -export CHART_LOCATION= -+ echo 'export RUN_INSECURE=1' -export RUN_INSECURE=1 -+ reset_build -+ '[' -d /home/joem/src/aerospike-vector/kubernetes/generated ']' -++ mktemp -d /tmp/avs-deploy-previous.XXXXXX -+ temp_dir=/tmp/avs-deploy-previous.seR081 -+ mv -f /home/joem/src/aerospike-vector/kubernetes/generated /tmp/avs-deploy-previous.seR081 -+ mkdir -p /home/joem/src/aerospike-vector/kubernetes/generated/input /home/joem/src/aerospike-vector/kubernetes/generated/output /home/joem/src/aerospike-vector/kubernetes/generated/secrets /home/joem/src/aerospike-vector/kubernetes/generated/certs /home/joem/src/aerospike-vector/kubernetes/generated/manifests -+ cp /home/joem/src/aerospike-vector/kubernetes/features.conf /home/joem/src/aerospike-vector/kubernetes/generated/secrets/features.conf -+ docker run --rm -v /home/joem/src/aerospike-vector/kubernetes:/workdir -w /workdir mikefarah/yq e '.aerospikeVectorSearchConfig.cluster *= (load("manifests/avs-values-role-query.yaml"))' /workdir/manifests/avs-values.yaml -+ docker run --rm -v /home/joem/src/aerospike-vector/kubernetes:/workdir -w /workdir mikefarah/yq e '.aerospikeVectorSearchConfig.cluster *= (load("manifests/avs-values-role-update.yaml"))' /workdir/manifests/avs-values.yaml -+ cp /home/joem/src/aerospike-vector/kubernetes/manifests/aerospike-cr.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/ -+ [[ 1 != 1 ]] -+ create_gke_cluster -+ gcloud container clusters describe joem-avs-insecure2 --zone us-central1-c -+ echo 'Cluster joem-avs-insecure2 does not exist. Creating...' -Cluster joem-avs-insecure2 does not exist. Creating... -++ date '+%Y-%m-%d %H:%M:%S' -+ echo '2024-12-05 16:57:32 - Starting GKE cluster creation...' -2024-12-05 16:57:32 - Starting GKE cluster creation... -+ gcloud container clusters create joem-avs-insecure2 --project performance-eco --zone us-central1-c --num-nodes 1 --disk-type pd-standard --disk-size 100 -Note: The Kubelet readonly port (10255) is now deprecated. Please update your workloads to use the recommended alternatives. See https://cloud.google.com/kubernetes-engine/docs/how-to/disable-kubelet-readonly-port for ways to check usage and for migration instructions. -Note: Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most 1008 node(s). -Creating cluster joem-avs-insecure2 in us-central1-c... -.........................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................done. -Created [https://container.googleapis.com/v1/projects/performance-eco/zones/us-central1-c/clusters/joem-avs-insecure2]. -To inspect the contents of your cluster, go to: https://console.cloud.google.com/kubernetes/workload_/gcloud/us-central1-c/joem-avs-insecure2?project=performance-eco -kubeconfig entry generated for joem-avs-insecure2. -NAME LOCATION MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS -joem-avs-insecure2 us-central1-c 1.30.5-gke.1699000 35.188.115.25 e2-medium 1.30.5-gke.1699000 1 RUNNING -+ echo 'GKE cluster created successfully.' -GKE cluster created successfully. -+ echo 'Creating Aerospike node pool...' -Creating Aerospike node pool... -+ gcloud container node-pools create aerospike-pool --cluster joem-avs-insecure2 --project performance-eco --zone us-central1-c --num-nodes 3 --local-ssd-count 2 --disk-type pd-standard --disk-size 100 --machine-type n2d-standard-32 -Creating node pool aerospike-pool... -.......................................................................................................................................................................................................................................................................................................................................................................................................................done. -Created [https://container.googleapis.com/v1/projects/performance-eco/zones/us-central1-c/clusters/joem-avs-insecure2/nodePools/aerospike-pool]. -NAME MACHINE_TYPE DISK_SIZE_GB NODE_VERSION -aerospike-pool n2d-standard-32 100 1.30.5-gke.1699000 -+ echo 'Aerospike node pool added successfully.' -Aerospike node pool added successfully. -+ echo 'Labeling Aerospike nodes...' -Labeling Aerospike nodes... -+ xargs -I '{}' kubectl label '{}' aerospike.com/node-pool=default-rack --overwrite -+ kubectl get nodes -l cloud.google.com/gke-nodepool=aerospike-pool -o name -node/gke-joem-avs-insecure2-aerospike-pool-e5f993df-c6nd labeled -node/gke-joem-avs-insecure2-aerospike-pool-e5f993df-g3np labeled -node/gke-joem-avs-insecure2-aerospike-pool-e5f993df-pxzw labeled -+ echo 'Adding AVS node pool...' -Adding AVS node pool... -+ gcloud container node-pools create avs-pool --cluster joem-avs-insecure2 --project performance-eco --zone us-central1-c --num-nodes 3 --disk-type pd-standard --disk-size 100 --machine-type n2d-standard-32 -Creating node pool avs-pool... -.............................................................................................................................................................................................................................................................................................................................................................................................done. -Created [https://container.googleapis.com/v1/projects/performance-eco/zones/us-central1-c/clusters/joem-avs-insecure2/nodePools/avs-pool]. -NAME MACHINE_TYPE DISK_SIZE_GB NODE_VERSION -avs-pool n2d-standard-32 100 1.30.5-gke.1699000 -+ echo 'AVS node pool added successfully.' -AVS node pool added successfully. -+ echo 'Labeling AVS nodes...' -Labeling AVS nodes... -+ kubectl get nodes -l cloud.google.com/gke-nodepool=avs-pool -o name -+ xargs -I '{}' kubectl label '{}' aerospike.com/node-pool=avs --overwrite -node/gke-joem-avs-insecure2-avs-pool-30c30d84-5jmx labeled -node/gke-joem-avs-insecure2-avs-pool-30c30d84-5xhh labeled -node/gke-joem-avs-insecure2-avs-pool-30c30d84-lkrf labeled -+ echo 'Setting up namespaces...' -Setting up namespaces... -+ setup_aerospike -+ kubectl create namespace aerospike -namespace/aerospike created -+ echo 'Deploying Aerospike Kubernetes Operator (AKO)...' -Deploying Aerospike Kubernetes Operator (AKO)... -+ kubectl get ns olm -+ echo 'Installing OLM...' -Installing OLM... -+ curl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.25.0/install.sh -+ bash -s v0.25.0 -customresourcedefinition.apiextensions.k8s.io/catalogsources.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/clusterserviceversions.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/installplans.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/olmconfigs.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/operatorconditions.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/operatorgroups.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/operators.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/subscriptions.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/catalogsources.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/clusterserviceversions.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/installplans.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/olmconfigs.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/operatorconditions.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/operatorgroups.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/operators.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/subscriptions.operators.coreos.com condition met -namespace/olm created -namespace/operators created -serviceaccount/olm-operator-serviceaccount created -clusterrole.rbac.authorization.k8s.io/system:controller:operator-lifecycle-manager created -clusterrolebinding.rbac.authorization.k8s.io/olm-operator-binding-olm created -olmconfig.operators.coreos.com/cluster created -deployment.apps/olm-operator created -deployment.apps/catalog-operator created -clusterrole.rbac.authorization.k8s.io/aggregate-olm-edit created -clusterrole.rbac.authorization.k8s.io/aggregate-olm-view created -operatorgroup.operators.coreos.com/global-operators created -operatorgroup.operators.coreos.com/olm-operators created -clusterserviceversion.operators.coreos.com/packageserver created -catalogsource.operators.coreos.com/operatorhubio-catalog created -Waiting for deployment "olm-operator" rollout to finish: 0 of 1 updated replicas are available... -deployment "olm-operator" successfully rolled out -deployment "catalog-operator" successfully rolled out -Package server phase: Installing -Package server phase: Succeeded -deployment "packageserver" successfully rolled out -+ kubectl get subscription my-aerospike-kubernetes-operator --namespace operators -+ echo 'Installing AKO subscription...' -Installing AKO subscription... -+ kubectl create -f https://operatorhub.io/install/aerospike-kubernetes-operator.yaml -subscription.operators.coreos.com/my-aerospike-kubernetes-operator created -+ echo 'Waiting for AKO to be ready...' -Waiting for AKO to be ready... -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO setup is still in progress...' -AKO setup is still in progress... -+ sleep 10 -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO setup is still in progress...' -AKO setup is still in progress... -+ sleep 10 -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO setup is still in progress...' -AKO setup is still in progress... -+ sleep 10 -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO is ready.' -AKO is ready. -+ kubectl --namespace operators wait --for=condition=available --timeout=180s deployment/aerospike-operator-controller-manager -deployment.apps/aerospike-operator-controller-manager condition met -+ break -+ echo 'Granting permissions to the target namespace...' -Granting permissions to the target namespace... -+ kubectl --namespace aerospike create serviceaccount aerospike-operator-controller-manager -serviceaccount/aerospike-operator-controller-manager created -+ kubectl create clusterrolebinding aerospike-cluster --clusterrole=aerospike-cluster --serviceaccount=aerospike:aerospike-operator-controller-manager -clusterrolebinding.rbac.authorization.k8s.io/aerospike-cluster created -+ echo 'Setting secrets for Aerospike cluster...' -Setting secrets for Aerospike cluster... -+ kubectl --namespace aerospike create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets -secret/aerospike-secret created -+ kubectl --namespace aerospike create secret generic auth-secret --from-literal=password=admin123 -secret/auth-secret created -+ kubectl --namespace aerospike create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs -secret/aerospike-tls created -+ echo 'Adding storage class...' -Adding storage class... -+ kubectl apply -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/master/config/samples/storage/gce_ssd_storage_class.yaml -storageclass.storage.k8s.io/ssd created -+ echo 'Deploying Aerospike cluster...' -Deploying Aerospike cluster... -+ kubectl apply -f /home/joem/src/aerospike-vector/kubernetes/generated/manifests/aerospike-cr.yaml -aerospikecluster.asdb.aerospike.com/aerocluster created -+ deploy_istio -+ echo 'Deploying Istio' -Deploying Istio -+ helm repo add istio https://istio-release.storage.googleapis.com/charts -"istio" has been added to your repositories -+ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "jetstack" chart repository -...Successfully got an update from the "istio" chart repository -...Successfully got an update from the "aerospike-io" chart repository -...Successfully got an update from the "stable" chart repository -Update Complete. ⎈Happy Helming!⎈ -+ helm install istio-base istio/base --namespace istio-system --set defaultRevision=default --create-namespace --wait -NAME: istio-base -LAST DEPLOYED: Thu Dec 5 17:07:34 2024 -NAMESPACE: istio-system -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -Istio base successfully installed! - -To learn more about the release, try: - $ helm status istio-base -n istio-system - $ helm get all istio-base -n istio-system -+ helm install istiod istio/istiod --namespace istio-system --create-namespace --wait -NAME: istiod -LAST DEPLOYED: Thu Dec 5 17:07:46 2024 -NAMESPACE: istio-system -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -"istiod" successfully installed! - -To learn more about the release, try: - $ helm status istiod -n istio-system - $ helm get all istiod -n istio-system - -Next steps: - * Deploy a Gateway: https://istio.io/latest/docs/setup/additional-setup/gateway/ - * Try out our tasks to get started on common configurations: - * https://istio.io/latest/docs/tasks/traffic-management - * https://istio.io/latest/docs/tasks/security/ - * https://istio.io/latest/docs/tasks/policy-enforcement/ - * Review the list of actively supported releases, CVE publications and our hardening guide: - * https://istio.io/latest/docs/releases/supported-releases/ - * https://istio.io/latest/news/security/ - * https://istio.io/latest/docs/ops/best-practices/security/ - -For further documentation see https://istio.io website -+ helm install istio-ingress istio/gateway --values ./manifests/istio/istio-ingressgateway-values.yaml --namespace istio-ingress --create-namespace --wait -NAME: istio-ingress -LAST DEPLOYED: Thu Dec 5 17:07:59 2024 -NAMESPACE: istio-ingress -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -"istio-ingress" successfully installed! - -To learn more about the release, try: - $ helm status istio-ingress -n istio-ingress - $ helm get all istio-ingress -n istio-ingress - -Next steps: - * Deploy an HTTP Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/ingress-control/ - * Deploy an HTTPS Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/secure-ingress/ -+ kubectl apply -f manifests/istio/gateway.yaml -gateway.networking.istio.io/avs-gw created -+ kubectl apply -f manifests/istio/avs-virtual-service.yaml -virtualservice.networking.istio.io/avs-vs created -+ get_reverse_dns -++ kubectl get svc istio-ingress -n istio-ingress -o 'jsonpath={.status.loadBalancer.ingress[0].ip}' -+ INGRESS_IP=34.28.28.145 -++ dig +short -x 34.28.28.145 -+ REVERSE_DNS_AVS=145.28.28.34.bc.googleusercontent.com. -+ echo 'Reverse DNS: 145.28.28.34.bc.googleusercontent.com.' -Reverse DNS: 145.28.28.34.bc.googleusercontent.com. -+ [[ 1 != 1 ]] -+ setup_avs -+ kubectl create namespace avs -namespace/avs created -+ echo 'Setting secrets for AVS cluster...' -Setting secrets for AVS cluster... -+ kubectl --namespace avs create secret generic auth-secret --from-literal=password=admin123 -secret/auth-secret created -+ kubectl --namespace avs create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs -secret/aerospike-tls created -+ kubectl --namespace avs create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets -secret/aerospike-secret created -+ deploy_avs_helm_chart -+ echo 'Deploying AVS Helm chart...' -Deploying AVS Helm chart... -+ helm repo add aerospike-helm https://artifact.aerospike.io/artifactory/api/helm/aerospike-helm -"aerospike-helm" has been added to your repositories -+ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "istio" chart repository -...Successfully got an update from the "jetstack" chart repository -...Successfully got an update from the "aerospike-io" chart repository -...Successfully got an update from the "aerospike-helm" chart repository -...Successfully got an update from the "stable" chart repository -Update Complete. ⎈Happy Helming!⎈ -+ helm install avs-app-query --set replicaCount=2 --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values-update.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.7.0 --atomic --wait -NAME: avs-app-query -LAST DEPLOYED: Thu Dec 5 17:09:16 2024 -NAMESPACE: avs -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: - -+ helm install avs-app-update --set replicaCount=1 --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values-query.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.7.0 --atomic --wait -NAME: avs-app-update -LAST DEPLOYED: Thu Dec 5 17:09:41 2024 -NAMESPACE: avs -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: - -+ setup_monitoring -+ echo 'Adding monitoring setup...' -Adding monitoring setup... -+ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -"prometheus-community" has been added to your repositories -+ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "istio" chart repository -...Successfully got an update from the "jetstack" chart repository -...Successfully got an update from the "aerospike-helm" chart repository -...Successfully got an update from the "aerospike-io" chart repository -...Successfully got an update from the "prometheus-community" chart repository -...Successfully got an update from the "stable" chart repository -Update Complete. ⎈Happy Helming!⎈ -+ helm install monitoring-stack prometheus-community/kube-prometheus-stack --namespace monitoring --create-namespace -NAME: monitoring-stack -LAST DEPLOYED: Thu Dec 5 17:10:10 2024 -NAMESPACE: monitoring -STATUS: deployed -REVISION: 1 -NOTES: -kube-prometheus-stack has been installed. Check its status by running: - kubectl --namespace monitoring get pods -l "release=monitoring-stack" - -Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. -+ echo 'Applying additional monitoring manifests...' -Applying additional monitoring manifests... -+ kubectl apply -f manifests/monitoring/aerospike-exporter-service.yaml -service/aerospike-exporter created -+ kubectl apply -f manifests/monitoring/aerospike-servicemonitor.yaml -servicemonitor.monitoring.coreos.com/aerospike-monitor created -+ kubectl apply -f manifests/monitoring/avs-servicemonitor.yaml -servicemonitor.monitoring.coreos.com/avs-monitor created -+ print_final_instructions -+ echo Your new deployment is available at 145.28.28.34.bc.googleusercontent.com.. -Your new deployment is available at 145.28.28.34.bc.googleusercontent.com.. -+ echo Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. -Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. -+ [[ 1 != 1 ]] -+ echo 'Setup Complete!' -Setup Complete! diff --git a/kubernetes/logs/avs-real-insecure b/kubernetes/logs/avs-real-insecure deleted file mode 100644 index d929231..0000000 --- a/kubernetes/logs/avs-real-insecure +++ /dev/null @@ -1,276 +0,0 @@ -+ trap 'echo "Error: $? at line $LINENO" >&2' ERR -++ pwd -+ WORKSPACE=/home/joem/src/aerospike-vector/kubernetes -++ gcloud config get-value project -+ PROJECT_ID=performance-eco -++ whoami -+ USERNAME=joem -+ CHART_VERSION=0.7.0 -+ DEFAULT_CLUSTER_NAME_SUFFIX=avs -+ [[ 3 -gt 0 ]] -+ case $1 in -+ RUN_INSECURE=1 -+ shift -+ [[ 2 -gt 0 ]] -+ case $1 in -+ CLUSTER_NAME_OVERRIDE=avs-real-insecure -+ shift 2 -+ [[ 0 -gt 0 ]] -+ main -+ set_env_variables -+ '[' -n avs-real-insecure ']' -+ export CLUSTER_NAME=joem-avs-real-insecure -+ CLUSTER_NAME=joem-avs-real-insecure -+ export NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ export NODE_POOL_NAME_AVS=avs-pool -+ NODE_POOL_NAME_AVS=avs-pool -+ export ZONE=us-central1-c -+ ZONE=us-central1-c -+ export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ export BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated -+ BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated -+ export REVERSE_DNS_AVS -+ print_env -+ echo 'Environment Variables:' -Environment Variables: -+ echo 'export PROJECT_ID=performance-eco' -export PROJECT_ID=performance-eco -+ echo 'export CLUSTER_NAME=joem-avs-real-insecure' -export CLUSTER_NAME=joem-avs-real-insecure -+ echo 'export NODE_POOL_NAME_AEROSPIKE=aerospike-pool' -export NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ echo 'export NODE_POOL_NAME_AVS=avs-pool' -export NODE_POOL_NAME_AVS=avs-pool -+ echo 'export ZONE=us-central1-c' -export ZONE=us-central1-c -+ echo 'export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf' -export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ echo 'export CHART_LOCATION=' -export CHART_LOCATION= -+ echo 'export RUN_INSECURE=1' -export RUN_INSECURE=1 -+ reset_build -+ '[' -d /home/joem/src/aerospike-vector/kubernetes/generated ']' -++ mktemp -d /tmp/avs-deploy-previous.XXXXXX -+ temp_dir=/tmp/avs-deploy-previous.9tWhiW -+ mv -f /home/joem/src/aerospike-vector/kubernetes/generated /tmp/avs-deploy-previous.9tWhiW -+ mkdir -p /home/joem/src/aerospike-vector/kubernetes/generated/input /home/joem/src/aerospike-vector/kubernetes/generated/output /home/joem/src/aerospike-vector/kubernetes/generated/secrets /home/joem/src/aerospike-vector/kubernetes/generated/certs /home/joem/src/aerospike-vector/kubernetes/generated/manifests -+ cp /home/joem/src/aerospike-vector/kubernetes/features.conf /home/joem/src/aerospike-vector/kubernetes/generated/secrets/features.conf -+ cp /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values-auth.yaml /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values-role-query.yaml /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values-role-update.yaml /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/ -+ cp /home/joem/src/aerospike-vector/kubernetes/manifests/aerospike-cr.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/ -+ [[ 1 != 1 ]] -+ create_gke_cluster -+ gcloud container clusters describe joem-avs-real-insecure --zone us-central1-c -+ echo 'Cluster joem-avs-real-insecure already exists. Skipping creation.' -Cluster joem-avs-real-insecure already exists. Skipping creation. -+ return -+ setup_aerospike -+ kubectl create namespace aerospike -namespace/aerospike created -+ echo 'Deploying Aerospike Kubernetes Operator (AKO)...' -Deploying Aerospike Kubernetes Operator (AKO)... -+ kubectl get ns olm -+ echo 'OLM is already installed in olm namespace. Skipping installation.' -OLM is already installed in olm namespace. Skipping installation. -+ kubectl get subscription my-aerospike-kubernetes-operator --namespace operators -+ echo 'Installing AKO subscription...' -Installing AKO subscription... -+ kubectl create -f https://operatorhub.io/install/aerospike-kubernetes-operator.yaml -subscription.operators.coreos.com/my-aerospike-kubernetes-operator created -+ echo 'Waiting for AKO to be ready...' -Waiting for AKO to be ready... -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO is ready.' -AKO is ready. -+ kubectl --namespace operators wait --for=condition=available --timeout=180s deployment/aerospike-operator-controller-manager -deployment.apps/aerospike-operator-controller-manager condition met -+ break -+ echo 'Granting permissions to the target namespace...' -Granting permissions to the target namespace... -+ kubectl --namespace aerospike create serviceaccount aerospike-operator-controller-manager -serviceaccount/aerospike-operator-controller-manager created -+ kubectl create clusterrolebinding aerospike-cluster --clusterrole=aerospike-cluster --serviceaccount=aerospike:aerospike-operator-controller-manager -clusterrolebinding.rbac.authorization.k8s.io/aerospike-cluster created -+ echo 'Setting secrets for Aerospike cluster...' -Setting secrets for Aerospike cluster... -+ kubectl --namespace aerospike create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets -secret/aerospike-secret created -+ kubectl --namespace aerospike create secret generic auth-secret --from-literal=password=admin123 -secret/auth-secret created -+ kubectl --namespace aerospike create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs -secret/aerospike-tls created -+ echo 'Adding storage class...' -Adding storage class... -+ kubectl apply -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/master/config/samples/storage/gce_ssd_storage_class.yaml -storageclass.storage.k8s.io/ssd created -+ echo 'Deploying Aerospike cluster...' -Deploying Aerospike cluster... -+ kubectl apply -f /home/joem/src/aerospike-vector/kubernetes/generated/manifests/aerospike-cr.yaml -aerospikecluster.asdb.aerospike.com/aerocluster created -+ deploy_istio -+ echo 'Deploying Istio' -Deploying Istio -+ helm repo add istio https://istio-release.storage.googleapis.com/charts -"istio" has been added to your repositories -+ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "istio" chart repository -...Successfully got an update from the "jetstack" chart repository -...Successfully got an update from the "aerospike-io" chart repository -...Successfully got an update from the "stable" chart repository -Update Complete. ⎈Happy Helming!⎈ -+ helm install istio-base istio/base --namespace istio-system --set defaultRevision=default --create-namespace --wait -NAME: istio-base -LAST DEPLOYED: Wed Dec 4 21:02:03 2024 -NAMESPACE: istio-system -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -Istio base successfully installed! - -To learn more about the release, try: - $ helm status istio-base -n istio-system - $ helm get all istio-base -n istio-system -+ helm install istiod istio/istiod --namespace istio-system --create-namespace --wait -NAME: istiod -LAST DEPLOYED: Wed Dec 4 21:02:12 2024 -NAMESPACE: istio-system -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -"istiod" successfully installed! - -To learn more about the release, try: - $ helm status istiod -n istio-system - $ helm get all istiod -n istio-system - -Next steps: - * Deploy a Gateway: https://istio.io/latest/docs/setup/additional-setup/gateway/ - * Try out our tasks to get started on common configurations: - * https://istio.io/latest/docs/tasks/traffic-management - * https://istio.io/latest/docs/tasks/security/ - * https://istio.io/latest/docs/tasks/policy-enforcement/ - * Review the list of actively supported releases, CVE publications and our hardening guide: - * https://istio.io/latest/docs/releases/supported-releases/ - * https://istio.io/latest/news/security/ - * https://istio.io/latest/docs/ops/best-practices/security/ - -For further documentation see https://istio.io website -+ helm install istio-ingress istio/gateway --values ./manifests/istio/istio-ingressgateway-values.yaml --namespace istio-ingress --create-namespace --wait -NAME: istio-ingress -LAST DEPLOYED: Wed Dec 4 21:02:22 2024 -NAMESPACE: istio-ingress -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -"istio-ingress" successfully installed! - -To learn more about the release, try: - $ helm status istio-ingress -n istio-ingress - $ helm get all istio-ingress -n istio-ingress - -Next steps: - * Deploy an HTTP Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/ingress-control/ - * Deploy an HTTPS Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/secure-ingress/ -+ kubectl apply -f manifests/istio/gateway.yaml -gateway.networking.istio.io/avs-gw created -+ kubectl apply -f manifests/istio/avs-virtual-service.yaml -virtualservice.networking.istio.io/avs-vs created -+ get_reverse_dns -++ kubectl get svc istio-ingress -n istio-ingress -o 'jsonpath={.status.loadBalancer.ingress[0].ip}' -+ INGRESS_IP=34.173.147.65 -++ dig +short -x 34.173.147.65 -+ REVERSE_DNS_AVS=65.147.173.34.bc.googleusercontent.com. -+ echo 'Reverse DNS: 65.147.173.34.bc.googleusercontent.com.' -Reverse DNS: 65.147.173.34.bc.googleusercontent.com. -+ [[ 1 != 1 ]] -+ setup_avs -+ kubectl create namespace avs -namespace/avs created -+ echo 'Setting secrets for AVS cluster...' -Setting secrets for AVS cluster... -+ kubectl --namespace avs create secret generic auth-secret --from-literal=password=admin123 -secret/auth-secret created -+ kubectl --namespace avs create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs -secret/aerospike-tls created -+ kubectl --namespace avs create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets -secret/aerospike-secret created -+ deploy_avs_helm_chart -+ echo 'Deploying AVS Helm chart...' -Deploying AVS Helm chart... -+ helm repo add aerospike-helm https://artifact.aerospike.io/artifactory/api/helm/aerospike-helm -"aerospike-helm" has been added to your repositories -+ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "jetstack" chart repository -...Successfully got an update from the "istio" chart repository -...Successfully got an update from the "aerospike-io" chart repository -...Successfully got an update from the "aerospike-helm" chart repository -...Successfully got an update from the "stable" chart repository -Update Complete. ⎈Happy Helming!⎈ -+ '[' -z '' ']' -+ helm install avs-app-query --set replicaCount=2 --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values.yaml --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values-role-query.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.7.0 --atomic --wait -NAME: avs-app-query -LAST DEPLOYED: Wed Dec 4 21:03:39 2024 -NAMESPACE: avs -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: - -+ helm install avs-app-update --set replicaCount=1 --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values.yaml --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values-role-update.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.7.0 --atomic --wait -NAME: avs-app-update -LAST DEPLOYED: Wed Dec 4 21:04:03 2024 -NAMESPACE: avs -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: - -+ setup_monitoring -+ echo 'Adding monitoring setup...' -Adding monitoring setup... -+ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -"prometheus-community" has been added to your repositories -+ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "jetstack" chart repository -...Successfully got an update from the "istio" chart repository -...Successfully got an update from the "aerospike-io" chart repository -...Successfully got an update from the "aerospike-helm" chart repository -...Successfully got an update from the "prometheus-community" chart repository -...Successfully got an update from the "stable" chart repository -Update Complete. ⎈Happy Helming!⎈ -+ helm install monitoring-stack prometheus-community/kube-prometheus-stack --namespace monitoring --create-namespace -NAME: monitoring-stack -LAST DEPLOYED: Wed Dec 4 21:04:33 2024 -NAMESPACE: monitoring -STATUS: deployed -REVISION: 1 -NOTES: -kube-prometheus-stack has been installed. Check its status by running: - kubectl --namespace monitoring get pods -l "release=monitoring-stack" - -Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. -+ echo 'Applying additional monitoring manifests...' -Applying additional monitoring manifests... -+ kubectl apply -f manifests/monitoring/aerospike-exporter-service.yaml -service/aerospike-exporter created -+ kubectl apply -f manifests/monitoring/aerospike-servicemonitor.yaml -servicemonitor.monitoring.coreos.com/aerospike-monitor created -+ kubectl apply -f manifests/monitoring/avs-servicemonitor.yaml -servicemonitor.monitoring.coreos.com/avs-monitor created -+ print_final_instructions -+ echo Your new deployment is available at 65.147.173.34.bc.googleusercontent.com.. -Your new deployment is available at 65.147.173.34.bc.googleusercontent.com.. -+ echo Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. -Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. -+ [[ 1 != 1 ]] -+ echo 'Setup Complete!' -Setup Complete! diff --git a/kubernetes/logs/avs-secure b/kubernetes/logs/avs-secure deleted file mode 100644 index cb89740..0000000 --- a/kubernetes/logs/avs-secure +++ /dev/null @@ -1,69 +0,0 @@ -+ trap 'echo "Error: $? at line $LINENO" >&2' ERR -++ pwd -+ WORKSPACE=/home/joem/src/aerospike-vector/kubernetes -++ gcloud config get-value project -+ PROJECT_ID=performance-eco -++ whoami -+ USERNAME=joem -+ DEFAULT_CLUSTER_NAME_SUFFIX=avs -+ [[ 2 -gt 0 ]] -+ case $1 in -+ CLUSTER_NAME_OVERRIDE=avs-secure -+ shift 2 -+ [[ 0 -gt 0 ]] -+ main -+ set_env_variables -+ '[' -n avs-secure ']' -+ export CLUSTER_NAME=joem-avs-secure -+ CLUSTER_NAME=joem-avs-secure -+ export NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ export NODE_POOL_NAME_AVS=avs-pool -+ NODE_POOL_NAME_AVS=avs-pool -+ export ZONE=us-central1-c -+ ZONE=us-central1-c -+ export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ export BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated -+ BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated -+ export REVERSE_DNS_AVS -+ print_env -+ echo 'Environment Variables:' -Environment Variables: -+ echo 'export PROJECT_ID=performance-eco' -export PROJECT_ID=performance-eco -+ echo 'export CLUSTER_NAME=joem-avs-secure' -export CLUSTER_NAME=joem-avs-secure -+ echo 'export NODE_POOL_NAME_AEROSPIKE=aerospike-pool' -export NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ echo 'export NODE_POOL_NAME_AVS=avs-pool' -export NODE_POOL_NAME_AVS=avs-pool -+ echo 'export ZONE=us-central1-c' -export ZONE=us-central1-c -+ echo 'export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf' -export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ echo 'export CHART_LOCATION=' -export CHART_LOCATION= -+ echo 'export RUN_INSECURE=' -export RUN_INSECURE= -+ reset_build -+ '[' -d /home/joem/src/aerospike-vector/kubernetes/generated ']' -++ mktemp -d /tmp/avs-deploy-previous.XXXXXX -+ temp_dir=/tmp/avs-deploy-previous.360dzx -+ mv -f /home/joem/src/aerospike-vector/kubernetes/generated /tmp/avs-deploy-previous.360dzx -+ mkdir -p /home/joem/src/aerospike-vector/kubernetes/generated/input /home/joem/src/aerospike-vector/kubernetes/generated/output /home/joem/src/aerospike-vector/kubernetes/generated/secrets /home/joem/src/aerospike-vector/kubernetes/generated/certs /home/joem/src/aerospike-vector/kubernetes/generated/manifests -+ cp /home/joem/src/aerospike-vector/kubernetes/features.conf /home/joem/src/aerospike-vector/kubernetes/generated/secrets/features.conf -+ [[ '' == 1 ]] -+ cp /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values-auth.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values.yaml -+ cp /home/joem/src/aerospike-vector/kubernetes/manifests/aerospike-cr-auth.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/aerospike-cr.yaml -+ create_gke_cluster -++ date '+%Y-%m-%d %H:%M:%S' -+ echo '2024-11-12 17:22:11 - Starting GKE cluster creation...' -2024-11-12 17:22:11 - Starting GKE cluster creation... -+ gcloud container clusters create joem-avs-secure --project performance-eco --zone us-central1-c --num-nodes 1 --disk-type pd-standard --disk-size 100 -Note: The Kubelet readonly port (10255) is now deprecated. Please update your workloads to use the recommended alternatives. See https://cloud.google.com/kubernetes-engine/docs/how-to/disable-kubelet-readonly-port for ways to check usage and for migration instructions. -Note: Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most 1008 node(s). -ERROR: (gcloud.container.clusters.create) ResponseError: code=409, message=Already exists: projects/performance-eco/zones/us-central1-c/clusters/joem-avs-secure. -+ echo 'Failed to create GKE cluster' -Failed to create GKE cluster -+ exit 1 diff --git a/kubernetes/logs/eks-avs-secure b/kubernetes/logs/eks-avs-secure deleted file mode 100644 index a2a080f..0000000 --- a/kubernetes/logs/eks-avs-secure +++ /dev/null @@ -1,654 +0,0 @@ -SHELL=/usr/bin/fish -SESSION_MANAGER=local/pop-os:@/tmp/.ICE-unix/1993,unix/pop-os:/tmp/.ICE-unix/1993 -WINDOWID=304087055 -QT_ACCESSIBILITY=1 -COLORTERM=truecolor -XDG_CONFIG_DIRS=/etc/xdg/xdg-regolith-x11:/etc/xdg -XDG_MENU_PREFIX=gnome-flashback- -rvm_delete_flag=0 -GNOME_DESKTOP_SESSION_ID=this-is-deprecated -GTK_IM_MODULE=ibus -rvm_prefix=/home/joem -I3SOCK=/run/user/1000/i3/ipc-socket.2399 -SBT_HOME=/home/joem/.sdkman/candidates/sbt/current -JAVA_HOME=/usr/lib/jvm/zulu21/ -SSH_AUTH_SOCK=/run/user/1000/keyring/ssh -SDKMAN_CANDIDATES_DIR=/home/joem/.sdkman/candidates -XMODIFIERS=@im=ibus -DESKTOP_SESSION=regolith-x11 -EDITOR=vim -GOBIN=/usr/joem/src/GO/bin -GTK_MODULES=gail:atk-bridge:gail:atk-bridge -PWD=/home/joem/src/aerospike-vector/kubernetes -XDG_SESSION_DESKTOP=regolith-x11 -LOGNAME=joem -XDG_SESSION_TYPE=x11 -rvm_version=1.29.12 latest -MANPATH=/home/joem/.local/kitty.app/share/man -GPG_AGENT_INFO=/run/user/1000/gnupg/S.gpg-agent:0:1 -SYSTEMD_EXEC_PID=1934 -OMF_PATH=/home/joem/.local/share/omf -XAUTHORITY=/run/user/1000/gdm/Xauthority -WINDOWPATH=2 -HOME=/home/joem -USERNAME=joem -LANG=en_US.UTF-8 -XDG_CURRENT_DESKTOP=Regolith:GNOME-Flashback:GNOME -STARSHIP_SHELL=fish -KITTY_WINDOW_ID=1 -INVOCATION_ID=d7041ecbf64545b386eb5fd9c7b12453 -STARSHIP_SESSION_KEY=2492563991011428 -rvm_bin_path=/home/joem/.rvm/bin -GEM_PATH= -XDG_SESSION_CLASS=user -TERM=xterm-kitty -TERMINFO=/home/joem/.local/kitty.app/lib/kitty/terminfo -SCALA_HOME=/home/joem/.sdkman/candidates/scala/current -USER=joem -MANPAGER=less -X -SDKMAN_DIR=/home/joem/.sdkman -DISPLAY=:1 -SHLVL=3 -SPARK_HOME=/home/joem/.sdkman/candidates/spark/3.0.0/ -QT_IM_MODULE=ibus -SDKMAN_CANDIDATES_API=https://api.sdkman.io/2 -DESKTOP_AUTOSTART_ID=10e57284c3427c254c173144456866459900000019930016 -EEREPO=/home/joem/src/citrusleaf/aerospike-server-enterprise -rvm_ruby_string=system -AWS_SDK_LOAD_CONFIG=1 -XDG_RUNTIME_DIR=/run/user/1000 -COMPIZ_CONFIG_PROFILE=gnome-flashback -XDG_DATA_DIRS=/usr/share/regolith-x11:/usr/share/gnome:/home/joem/.local/share/flatpak/exports/share:/var/lib/flatpak/exports/share:/usr/local/share/:/usr/share/ -PATH=/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:/snap/bin:/home/joem/.local/bin:/home/joem/.cargo/bin:/home/joem/.local/kitty.app/bin:/usr/joem/src/GO/bin:~/bin:/home/joem/.yarn/bin:~kafka/kafka/bin:/usr/local/bin/bin:/home/joem/.rvm/bin:/home/joem/.krew/bin -GDMSESSION=regolith-x11 -DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/1000/bus -SDKMAN_PLATFORM=linuxx64 -DEBUG=true -CPATH=/usr/include/lua5.2 -OMF_CONFIG=/home/joem/.config/omf -GIO_LAUNCHED_DESKTOP_FILE_PID=2324 -GIO_LAUNCHED_DESKTOP_FILE=/usr/share/applications/regolith-x11.desktop -rvm_path=/home/joem/.rvm -GOPATH=/usr/joem/src/GO -_=/bin/printenv -+ trap 'echo "Error: $? at line $LINENO" >&2' ERR -++ pwd -+ WORKSPACE=/home/joem/src/aerospike-vector/kubernetes -++ whoami -+ USERNAME=joem -+ PROFILE=default -+ DEFAULT_CLUSTER_NAME_SUFFIX=avs -+ [[ 2 -gt 0 ]] -+ case $1 in -+ CLUSTER_NAME_OVERRIDE=avs-secure -+ shift 2 -+ [[ 0 -gt 0 ]] -+ main -+ set_env_variables -+ '[' -n avs-secure ']' -+ export CLUSTER_NAME=joem-avs-secure -+ CLUSTER_NAME=joem-avs-secure -+ export NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ export NODE_POOL_NAME_AVS=avs-pool -+ NODE_POOL_NAME_AVS=avs-pool -+ export REGION=eu-central-1 -+ REGION=eu-central-1 -+ export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ export BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated -+ BUILD_DIR=/home/joem/src/aerospike-vector/kubernetes/generated -+ export REVERSE_DNS_AVS -+ print_env -+ echo 'Environment Variables:' -Environment Variables: -+ echo 'export CLUSTER_NAME=joem-avs-secure' -export CLUSTER_NAME=joem-avs-secure -+ echo 'export NODE_POOL_NAME_AEROSPIKE=aerospike-pool' -export NODE_POOL_NAME_AEROSPIKE=aerospike-pool -+ echo 'export NODE_POOL_NAME_AVS=avs-pool' -export NODE_POOL_NAME_AVS=avs-pool -+ echo 'export REGION=eu-central-1' -export REGION=eu-central-1 -+ echo 'export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf' -export FEATURES_CONF=/home/joem/src/aerospike-vector/kubernetes/features.conf -+ echo 'export CHART_LOCATION=' -export CHART_LOCATION= -+ echo 'export RUN_INSECURE=' -export RUN_INSECURE= -+ reset_build -+ '[' -d /home/joem/src/aerospike-vector/kubernetes/generated ']' -++ mktemp -d /tmp/avs-deploy-previous.XXXXXX -+ temp_dir=/tmp/avs-deploy-previous.XylEiA -+ mv -f /home/joem/src/aerospike-vector/kubernetes/generated /tmp/avs-deploy-previous.XylEiA -+ mkdir -p /home/joem/src/aerospike-vector/kubernetes/generated/input /home/joem/src/aerospike-vector/kubernetes/generated/output /home/joem/src/aerospike-vector/kubernetes/generated/secrets /home/joem/src/aerospike-vector/kubernetes/generated/certs /home/joem/src/aerospike-vector/kubernetes/generated/manifests -+ cp /home/joem/src/aerospike-vector/kubernetes/features.conf /home/joem/src/aerospike-vector/kubernetes/generated/secrets/features.conf -+ [[ '' == 1 ]] -+ cp /home/joem/src/aerospike-vector/kubernetes/manifests/avs-values-auth.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values.yaml -+ cp /home/joem/src/aerospike-vector/kubernetes/manifests/aerospike-cr-auth.yaml /home/joem/src/aerospike-vector/kubernetes/generated/manifests/aerospike-cr.yaml -+ create_eks_cluster -++ date '+%Y-%m-%d %H:%M:%S' -+ echo '2024-11-12 17:23:05 - Starting EKS cluster creation...' -2024-11-12 17:23:05 - Starting EKS cluster creation... -+ set -x -+ eksctl create cluster --name joem-avs-secure --region eu-central-1 --profile default --with-oidc --without-nodegroup --alb-ingress-access --external-dns-access --set-kubeconfig-context -2024-11-12 17:23:06 [ℹ] eksctl version 0.194.0 -2024-11-12 17:23:06 [ℹ] using region eu-central-1 -2024-11-12 17:23:08 [ℹ] setting availability zones to [eu-central-1c eu-central-1b eu-central-1a] -2024-11-12 17:23:08 [ℹ] subnets for eu-central-1c - public:192.168.0.0/19 private:192.168.96.0/19 -2024-11-12 17:23:08 [ℹ] subnets for eu-central-1b - public:192.168.32.0/19 private:192.168.128.0/19 -2024-11-12 17:23:08 [ℹ] subnets for eu-central-1a - public:192.168.64.0/19 private:192.168.160.0/19 -2024-11-12 17:23:08 [ℹ] using Kubernetes version 1.30 -2024-11-12 17:23:08 [ℹ] creating EKS cluster "joem-avs-secure" in "eu-central-1" region with -2024-11-12 17:23:08 [ℹ] if you encounter any issues, check CloudFormation console or try 'eksctl utils describe-stacks --region=eu-central-1 --cluster=joem-avs-secure' -2024-11-12 17:23:08 [ℹ] Kubernetes API endpoint access will use default of {publicAccess=true, privateAccess=false} for cluster "joem-avs-secure" in "eu-central-1" -2024-11-12 17:23:08 [ℹ] CloudWatch logging will not be enabled for cluster "joem-avs-secure" in "eu-central-1" -2024-11-12 17:23:08 [ℹ] you can enable it with 'eksctl utils update-cluster-logging --enable-types={SPECIFY-YOUR-LOG-TYPES-HERE (e.g. all)} --region=eu-central-1 --cluster=joem-avs-secure' -2024-11-12 17:23:08 [ℹ] default addons kube-proxy, coredns, vpc-cni were not specified, will install them as EKS addons -2024-11-12 17:23:08 [ℹ] -2 sequential tasks: { create cluster control plane "joem-avs-secure", - 5 sequential sub-tasks: { - 1 task: { create addons }, - wait for control plane to become ready, - associate IAM OIDC provider, - no tasks, - update VPC CNI to use IRSA if required, - } -} -2024-11-12 17:23:08 [ℹ] building cluster stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:23:09 [ℹ] deploying stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:23:39 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:24:10 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:25:11 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:26:12 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:27:13 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:28:14 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:29:15 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:30:16 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:31:17 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:32:18 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-cluster" -2024-11-12 17:32:23 [ℹ] creating addon -2024-11-12 17:32:23 [ℹ] successfully created addon -2024-11-12 17:32:24 [ℹ] creating addon -2024-11-12 17:32:24 [ℹ] successfully created addon -2024-11-12 17:32:25 [!] recommended policies were found for "vpc-cni" addon, but since OIDC is disabled on the cluster, eksctl cannot configure the requested permissions; the recommended way to provide IAM permissions for "vpc-cni" addon is via pod identity associations; after addon creation is completed, add all recommended policies to the config file, under `addon.PodIdentityAssociations`, and run `eksctl update addon` -2024-11-12 17:32:25 [ℹ] creating addon -2024-11-12 17:32:26 [ℹ] successfully created addon -2024-11-12 17:34:32 [ℹ] deploying stack "eksctl-joem-avs-secure-addon-vpc-cni" -2024-11-12 17:34:33 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-addon-vpc-cni" -2024-11-12 17:35:04 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-addon-vpc-cni" -2024-11-12 17:35:04 [ℹ] updating addon -2024-11-12 17:35:15 [ℹ] addon "vpc-cni" active -2024-11-12 17:35:15 [ℹ] waiting for the control plane to become ready -2024-11-12 17:35:15 [✔] saved kubeconfig as "/home/joem/.kube/config" -2024-11-12 17:35:15 [ℹ] no tasks -2024-11-12 17:35:15 [✔] all EKS cluster resources for "joem-avs-secure" have been created -2024-11-12 17:35:15 [✔] created 0 nodegroup(s) in cluster "joem-avs-secure" -2024-11-12 17:35:15 [✔] created 0 managed nodegroup(s) in cluster "joem-avs-secure" -2024-11-12 17:35:16 [ℹ] kubectl command should work with "/home/joem/.kube/config", try 'kubectl get nodes' -2024-11-12 17:35:16 [✔] EKS cluster "joem-avs-secure" in "eu-central-1" region is ready -+ echo 'EKS cluster created successfully.' -EKS cluster created successfully. -+ eksctl create addon --name aws-ebs-csi-driver --cluster joem-avs-secure --region eu-central-1 --profile default --force -2024-11-12 17:35:19 [ℹ] Kubernetes version "1.30" in use by cluster "joem-avs-secure" -2024-11-12 17:35:19 [!] IRSA has been deprecated; the recommended way to provide IAM permissions for "aws-ebs-csi-driver" addon is via pod identity associations; after addon creation is completed, run `eksctl utils migrate-to-pod-identity` -2024-11-12 17:35:19 [ℹ] creating role using recommended policies for "aws-ebs-csi-driver" addon -2024-11-12 17:35:20 [ℹ] deploying stack "eksctl-joem-avs-secure-addon-aws-ebs-csi-driver" -2024-11-12 17:35:21 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-addon-aws-ebs-csi-driver" -2024-11-12 17:35:51 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-addon-aws-ebs-csi-driver" -2024-11-12 17:36:49 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-addon-aws-ebs-csi-driver" -2024-11-12 17:36:49 [ℹ] creating addon -+ echo 'Creating Aerospike node pool...' -Creating Aerospike node pool... -+ eksctl create nodegroup --cluster joem-avs-secure --name aerospike-pool --node-type m5dn.xlarge --nodes 3 --nodes-min 3 --nodes-max 3 --region eu-central-1 --profile default --node-volume-size 100 --node-volume-type gp2 --managed -2024-11-12 17:36:52 [ℹ] will use version 1.30 for new nodegroup(s) based on control plane version -2024-11-12 17:36:57 [ℹ] nodegroup "aerospike-pool" will use "" [AmazonLinux2/1.30] -2024-11-12 17:36:59 [ℹ] 1 nodegroup (aerospike-pool) was included (based on the include/exclude rules) -2024-11-12 17:36:59 [ℹ] will create a CloudFormation stack for each of 1 managed nodegroups in cluster "joem-avs-secure" -2024-11-12 17:37:00 [ℹ] -2 sequential tasks: { fix cluster compatibility, 1 task: { 1 task: { create managed nodegroup "aerospike-pool" } } -} -2024-11-12 17:37:00 [ℹ] checking cluster stack for missing resources -2024-11-12 17:37:01 [ℹ] cluster stack has all required resources -2024-11-12 17:37:03 [ℹ] building managed nodegroup stack "eksctl-joem-avs-secure-nodegroup-aerospike-pool" -2024-11-12 17:37:03 [ℹ] deploying stack "eksctl-joem-avs-secure-nodegroup-aerospike-pool" -2024-11-12 17:37:04 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-nodegroup-aerospike-pool" -2024-11-12 17:37:34 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-nodegroup-aerospike-pool" -2024-11-12 17:38:15 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-nodegroup-aerospike-pool" -2024-11-12 17:39:23 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-nodegroup-aerospike-pool" -2024-11-12 17:39:23 [ℹ] no tasks -2024-11-12 17:39:23 [✔] created 0 nodegroup(s) in cluster "joem-avs-secure" -2024-11-12 17:39:24 [ℹ] nodegroup "aerospike-pool" has 3 node(s) -2024-11-12 17:39:24 [ℹ] node "ip-192-168-30-3.eu-central-1.compute.internal" is ready -2024-11-12 17:39:24 [ℹ] node "ip-192-168-55-125.eu-central-1.compute.internal" is ready -2024-11-12 17:39:24 [ℹ] node "ip-192-168-64-180.eu-central-1.compute.internal" is ready -2024-11-12 17:39:24 [ℹ] waiting for at least 3 node(s) to become ready in "aerospike-pool" -2024-11-12 17:39:25 [ℹ] nodegroup "aerospike-pool" has 3 node(s) -2024-11-12 17:39:25 [ℹ] node "ip-192-168-30-3.eu-central-1.compute.internal" is ready -2024-11-12 17:39:25 [ℹ] node "ip-192-168-55-125.eu-central-1.compute.internal" is ready -2024-11-12 17:39:25 [ℹ] node "ip-192-168-64-180.eu-central-1.compute.internal" is ready -2024-11-12 17:39:25 [✔] created 1 managed nodegroup(s) in cluster "joem-avs-secure" -2024-11-12 17:39:26 [ℹ] checking security group configuration for all nodegroups -2024-11-12 17:39:26 [ℹ] all nodegroups have up-to-date cloudformation templates -+ echo 'Aerospike node pool added successfully.' -Aerospike node pool added successfully. -+ echo 'Labeling Aerospike nodes...' -Labeling Aerospike nodes... -+ kubectl get nodes -l eks.amazonaws.com/nodegroup=aerospike-pool -o name -+ xargs -I '{}' kubectl label '{}' aerospike.com/node-pool=default-rack --overwrite -node/ip-192-168-30-3.eu-central-1.compute.internal labeled -node/ip-192-168-55-125.eu-central-1.compute.internal labeled -node/ip-192-168-64-180.eu-central-1.compute.internal labeled -+ echo 'Adding AVS node pool...' -Adding AVS node pool... -+ eksctl create nodegroup --cluster joem-avs-secure --name avs-pool --node-type m5dn.xlarge --nodes 3 --nodes-min 3 --nodes-max 3 --region eu-central-1 --profile default --node-volume-size 100 --node-volume-type gp2 --managed -2024-11-12 17:39:37 [ℹ] will use version 1.30 for new nodegroup(s) based on control plane version -2024-11-12 17:39:43 [ℹ] nodegroup "avs-pool" will use "" [AmazonLinux2/1.30] -2024-11-12 17:39:47 [ℹ] 1 existing nodegroup(s) (aerospike-pool) will be excluded -2024-11-12 17:39:47 [ℹ] 1 nodegroup (avs-pool) was included (based on the include/exclude rules) -2024-11-12 17:39:47 [ℹ] will create a CloudFormation stack for each of 1 managed nodegroups in cluster "joem-avs-secure" -2024-11-12 17:39:48 [ℹ] -2 sequential tasks: { fix cluster compatibility, 1 task: { 1 task: { create managed nodegroup "avs-pool" } } -} -2024-11-12 17:39:48 [ℹ] checking cluster stack for missing resources -2024-11-12 17:39:49 [ℹ] cluster stack has all required resources -2024-11-12 17:39:51 [ℹ] building managed nodegroup stack "eksctl-joem-avs-secure-nodegroup-avs-pool" -2024-11-12 17:39:52 [ℹ] deploying stack "eksctl-joem-avs-secure-nodegroup-avs-pool" -2024-11-12 17:39:52 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-nodegroup-avs-pool" -2024-11-12 17:40:23 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-nodegroup-avs-pool" -2024-11-12 17:40:55 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-nodegroup-avs-pool" -2024-11-12 17:42:06 [ℹ] waiting for CloudFormation stack "eksctl-joem-avs-secure-nodegroup-avs-pool" -2024-11-12 17:42:08 [ℹ] no tasks -2024-11-12 17:42:08 [✔] created 0 nodegroup(s) in cluster "joem-avs-secure" -2024-11-12 17:42:09 [ℹ] nodegroup "avs-pool" has 3 node(s) -2024-11-12 17:42:09 [ℹ] node "ip-192-168-18-214.eu-central-1.compute.internal" is ready -2024-11-12 17:42:09 [ℹ] node "ip-192-168-60-46.eu-central-1.compute.internal" is ready -2024-11-12 17:42:09 [ℹ] node "ip-192-168-82-234.eu-central-1.compute.internal" is ready -2024-11-12 17:42:09 [ℹ] waiting for at least 3 node(s) to become ready in "avs-pool" -2024-11-12 17:42:09 [ℹ] nodegroup "avs-pool" has 3 node(s) -2024-11-12 17:42:09 [ℹ] node "ip-192-168-18-214.eu-central-1.compute.internal" is ready -2024-11-12 17:42:09 [ℹ] node "ip-192-168-60-46.eu-central-1.compute.internal" is ready -2024-11-12 17:42:09 [ℹ] node "ip-192-168-82-234.eu-central-1.compute.internal" is ready -2024-11-12 17:42:09 [✔] created 1 managed nodegroup(s) in cluster "joem-avs-secure" -2024-11-12 17:42:16 [ℹ] checking security group configuration for all nodegroups -2024-11-12 17:42:16 [ℹ] all nodegroups have up-to-date cloudformation templates -+ echo 'AVS node pool added successfully.' -AVS node pool added successfully. -+ echo 'Labeling AVS nodes...' -Labeling AVS nodes... -+ kubectl get nodes -l eks.amazonaws.com/nodegroup=avs-pool -o name -+ xargs -I '{}' kubectl label '{}' aerospike.com/node-pool=avs --overwrite -node/ip-192-168-18-214.eu-central-1.compute.internal labeled -node/ip-192-168-60-46.eu-central-1.compute.internal labeled -node/ip-192-168-82-234.eu-central-1.compute.internal labeled -+ echo 'Setting up namespaces...' -Setting up namespaces... -+ kubectl create namespace aerospike -namespace/aerospike created -+ kubectl create namespace avs -namespace/avs created -+ deploy_istio -+ echo 'Deploying Istio' -Deploying Istio -+ helm repo add istio https://istio-release.storage.googleapis.com/charts -"istio" already exists with the same configuration, skipping -+ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "istio" chart repository -...Successfully got an update from the "aerospike-helm" chart repository -...Successfully got an update from the "jetstack" chart repository -...Successfully got an update from the "prometheus-community" chart repository -...Successfully got an update from the "stable" chart repository -Update Complete. ⎈Happy Helming!⎈ -+ helm install istio-base istio/base --namespace istio-system --set defaultRevision=default --create-namespace --wait -NAME: istio-base -LAST DEPLOYED: Tue Nov 12 17:42:34 2024 -NAMESPACE: istio-system -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -Istio base successfully installed! - -To learn more about the release, try: - $ helm status istio-base -n istio-system - $ helm get all istio-base -n istio-system -+ helm install istiod istio/istiod --namespace istio-system --create-namespace --wait -NAME: istiod -LAST DEPLOYED: Tue Nov 12 17:42:54 2024 -NAMESPACE: istio-system -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -"istiod" successfully installed! - -To learn more about the release, try: - $ helm status istiod -n istio-system - $ helm get all istiod -n istio-system - -Next steps: - * Deploy a Gateway: https://istio.io/latest/docs/setup/additional-setup/gateway/ - * Try out our tasks to get started on common configurations: - * https://istio.io/latest/docs/tasks/traffic-management - * https://istio.io/latest/docs/tasks/security/ - * https://istio.io/latest/docs/tasks/policy-enforcement/ - * Review the list of actively supported releases, CVE publications and our hardening guide: - * https://istio.io/latest/docs/releases/supported-releases/ - * https://istio.io/latest/news/security/ - * https://istio.io/latest/docs/ops/best-practices/security/ - -For further documentation see https://istio.io website -+ helm install istio-ingress istio/gateway --values ./manifests/istio/istio-ingressgateway-values.yaml --namespace istio-ingress --create-namespace --wait -NAME: istio-ingress -LAST DEPLOYED: Tue Nov 12 17:43:15 2024 -NAMESPACE: istio-ingress -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: -"istio-ingress" successfully installed! - -To learn more about the release, try: - $ helm status istio-ingress -n istio-ingress - $ helm get all istio-ingress -n istio-ingress - -Next steps: - * Deploy an HTTP Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/ingress-control/ - * Deploy an HTTPS Gateway: https://istio.io/latest/docs/tasks/traffic-management/ingress/secure-ingress/ -+ kubectl apply -f manifests/istio/gateway.yaml -gateway.networking.istio.io/avs-gw created -+ kubectl apply -f manifests/istio/avs-virtual-service.yaml -virtualservice.networking.istio.io/avs-vs created -+ get_reverse_dns -++ kubectl get svc istio-ingress -n istio-ingress -o 'jsonpath={.status.loadBalancer.ingress[0].hostname}' -+ REVERSE_DNS_AVS=a392672ac93974ff7aff90ce248a0daf-a96448fff4f0cc23.elb.eu-central-1.amazonaws.com -+ echo 'Hostname DNS: a392672ac93974ff7aff90ce248a0daf-a96448fff4f0cc23.elb.eu-central-1.amazonaws.com' -Hostname DNS: a392672ac93974ff7aff90ce248a0daf-a96448fff4f0cc23.elb.eu-central-1.amazonaws.com -+ [[ '' != 1 ]] -+ generate_certs -+ echo 'Generating certificates...' -Generating certificates... -+ echo 'Generate Root' -Generate Root -+ openssl genrsa -out /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.key 2048 -+ openssl req -x509 -new -nodes -config /home/joem/src/aerospike-vector/kubernetes/ssl/openssl_ca.conf -extensions v3_ca -key /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.key -sha256 -days 3650 -out /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.pem -subj /C=UK/ST=London/L=London/O=abs/OU=Support/CN=ca.aerospike.com -+ echo 'Generate Requests & Private Key' -Generate Requests & Private Key -+ SVC_NAME=aerospike-cluster.aerospike.svc.cluster.local -+ COMMON_NAME=asd.aerospike.com -+ openssl req -new -nodes -config /home/joem/src/aerospike-vector/kubernetes/ssl/openssl.conf -extensions v3_req -out /home/joem/src/aerospike-vector/kubernetes/generated/input/asd.aerospike.com.req -keyout /home/joem/src/aerospike-vector/kubernetes/generated/output/asd.aerospike.com.key -subj /C=UK/ST=London/L=London/O=abs/OU=Server/CN=asd.aerospike.com -.......+.....+............+......+..........+..+.......+........+.+......+.....+....+..+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.....+.+.....+....+............+.........+...+........+.......+......+.....+....+..+....+......+........+...+...+....+..+...+......+............+...+................+...+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.........+.....+...+....+........+.......+...........+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -.......+.........+..........+..+.......+...+..+......+................+...+..+.+.....+...+......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.........+..+..........+.....+....+...+......+.....+....+.....+..........+........+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*..+.........+....+......+.........+......+..+..........+.........+.....+..........+.....+..............................+.....................+.........+...+...+.......+......+.........+..+.+........+.+.....+.+............+...........+....+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ------ -+ SVC_NAME=avs-app-aerospike-vector-search.aerospike.svc.cluster.local -+ COMMON_NAME=avs.aerospike.com -+ openssl req -new -nodes -config /home/joem/src/aerospike-vector/kubernetes/ssl/openssl.conf -extensions v3_req -out /home/joem/src/aerospike-vector/kubernetes/generated/input/avs.aerospike.com.req -keyout /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.key -subj /C=UK/ST=London/L=London/O=abs/OU=Client/CN=avs.aerospike.com -.+...........+....+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*...+....+...+...+........+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*........+.....+...+.+.........+...+.....+....+.....+.........+......+...+......+.+.....+.+..+...+...+....+...+............+...+..+.......+...........+....+...+......+.....+.......+..+.......+......+..+...+...+......+.+..................+..+.............+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -.....+...+......+....+..+.......+.....+.......+...+......+..+....+......+..+.+.....+......+....+.....+.+.....+...+...+.............+.....+....+..+...+.+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*...+...............+............+...+......+...+.......+...+......+............+...+..+....+......+..+.......+.........+.....+....+.....+..........+........+.......+........+.......+.....+.+......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*......+..+......+...+......+...................+.........+..+......+....+......+..+.+..+...+...............+.+.........+.................+...+...............+.......+...+......+........+...+.......+...+..+................+...........+.+..+.+......+.....+......+...+....+...........+............+.............+..+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ------ -+ SVC_NAME=avs-app-aerospike-vector-search.aerospike.svc.cluster.local -+ COMMON_NAME=svc.aerospike.com -+ openssl req -new -nodes -config /home/joem/src/aerospike-vector/kubernetes/ssl/openssl_svc.conf -extensions v3_req -out /home/joem/src/aerospike-vector/kubernetes/generated/input/svc.aerospike.com.req -keyout /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.key -subj /C=UK/ST=London/L=London/O=abs/OU=Client/CN=svc.aerospike.com -.....+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*...+....+.....+....+...+..+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*..+..+......+............+.......+.....+...+.......+...........+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -.........+.....+.+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*..+......+.........+...+..+.........+....+...+............+........+.........+...+...+....+......+..+......+.........+.+......+.....+.+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*......+.....+...+.+..+..........+.....+.........+...+.+......+.....+.+............+...+..+......+....+......+.........+...+...+...........+..........+...+.....+......+.......+..+.+.....+.+.........+..+...+.+.....+......+.........+.+.....+.........+.............+..+......+...+.+...........+...+....+...+...+...........+.............+..+...............+...............+.............+..+.........+.+......+...+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ------ -+ echo 'Generate Certificates' -Generate Certificates -+ SVC_NAME=aerospike-cluster.aerospike.svc.cluster.local -+ COMMON_NAME=asd.aerospike.com -+ openssl x509 -req -extfile /home/joem/src/aerospike-vector/kubernetes/ssl/openssl.conf -in /home/joem/src/aerospike-vector/kubernetes/generated/input/asd.aerospike.com.req -CA /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.pem -CAkey /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.key -extensions v3_req -days 3649 -outform PEM -out /home/joem/src/aerospike-vector/kubernetes/generated/output/asd.aerospike.com.pem -set_serial 110 -Certificate request self-signature ok -subject=C = UK, ST = London, L = London, O = abs, OU = Server, CN = asd.aerospike.com -+ SVC_NAME=avs-app-aerospike-vector-search.aerospike.svc.cluster.local -+ COMMON_NAME=avs.aerospike.com -+ openssl x509 -req -extfile /home/joem/src/aerospike-vector/kubernetes/ssl/openssl.conf -in /home/joem/src/aerospike-vector/kubernetes/generated/input/avs.aerospike.com.req -CA /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.pem -CAkey /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.key -extensions v3_req -days 3649 -outform PEM -out /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.pem -set_serial 210 -Certificate request self-signature ok -subject=C = UK, ST = London, L = London, O = abs, OU = Client, CN = avs.aerospike.com -+ SVC_NAME=avs-app-aerospike-vector-search.aerospike.svc.cluster.local -+ COMMON_NAME=svc.aerospike.com -+ openssl x509 -req -extfile /home/joem/src/aerospike-vector/kubernetes/ssl/openssl_svc.conf -in /home/joem/src/aerospike-vector/kubernetes/generated/input/svc.aerospike.com.req -CA /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.pem -CAkey /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.key -extensions v3_req -days 3649 -outform PEM -out /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.pem -set_serial 310 -Certificate request self-signature ok -subject=C = UK, ST = London, L = London, O = abs, OU = Client, CN = svc.aerospike.com -+ echo 'Verify Certificate signed by root' -Verify Certificate signed by root -+ openssl verify -verbose -CAfile /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.pem /home/joem/src/aerospike-vector/kubernetes/generated/output/asd.aerospike.com.pem -/home/joem/src/aerospike-vector/kubernetes/generated/output/asd.aerospike.com.pem: OK -+ openssl verify -verbose -CAfile /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.pem /home/joem/src/aerospike-vector/kubernetes/generated/output/asd.aerospike.com.pem -/home/joem/src/aerospike-vector/kubernetes/generated/output/asd.aerospike.com.pem: OK -+ openssl verify -verbose -CAfile /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.pem /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.pem -/home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.pem: OK -+ PASSWORD=citrusstore -+ echo -n citrusstore -+ tee /home/joem/src/aerospike-vector/kubernetes/generated/output/storepass /home/joem/src/aerospike-vector/kubernetes/generated/output/keypass -+ ADMIN_PASSWORD=admin123 -+ echo -n admin123 -+ keytool -import -file /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.pem --storepass citrusstore -keystore /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.truststore.jks -alias ca.aerospike.com -noprompt -Certificate was added to keystore -+ openssl pkcs12 -export -out /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.p12 -in /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.pem -inkey /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.key -password file:/home/joem/src/aerospike-vector/kubernetes/generated/output/storepass -++ cat /home/joem/src/aerospike-vector/kubernetes/generated/output/storepass -++ cat /home/joem/src/aerospike-vector/kubernetes/generated/output/storepass -+ keytool -importkeystore -srckeystore /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.p12 -destkeystore /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.keystore.jks -srcstoretype pkcs12 -srcstorepass citrusstore -deststorepass citrusstore -noprompt -Importing keystore /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.p12 to /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.keystore.jks... -Entry for alias 1 successfully imported. -Import command completed: 1 entries successfully imported, 0 entries failed or cancelled -+ openssl pkcs12 -export -out /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.p12 -in /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.pem -inkey /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.key -password file:/home/joem/src/aerospike-vector/kubernetes/generated/output/storepass -++ cat /home/joem/src/aerospike-vector/kubernetes/generated/output/storepass -++ cat /home/joem/src/aerospike-vector/kubernetes/generated/output/storepass -+ keytool -importkeystore -srckeystore /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.p12 -destkeystore /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.keystore.jks -srcstoretype pkcs12 -srcstorepass citrusstore -deststorepass citrusstore -noprompt -Importing keystore /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.p12 to /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.keystore.jks... -Entry for alias 1 successfully imported. -Import command completed: 1 entries successfully imported, 0 entries failed or cancelled -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.keystore.jks /home/joem/src/aerospike-vector/kubernetes/generated/certs/svc.aerospike.com.keystore.jks -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.keystore.jks /home/joem/src/aerospike-vector/kubernetes/generated/certs/avs.aerospike.com.keystore.jks -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.truststore.jks /home/joem/src/aerospike-vector/kubernetes/generated/certs/ca.aerospike.com.truststore.jks -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/asd.aerospike.com.pem /home/joem/src/aerospike-vector/kubernetes/generated/certs/asd.aerospike.com.pem -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/avs.aerospike.com.pem /home/joem/src/aerospike-vector/kubernetes/generated/certs/avs.aerospike.com.pem -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/svc.aerospike.com.pem /home/joem/src/aerospike-vector/kubernetes/generated/certs/svc.aerospike.com.pem -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/asd.aerospike.com.key /home/joem/src/aerospike-vector/kubernetes/generated/certs/asd.aerospike.com.key -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/ca.aerospike.com.pem /home/joem/src/aerospike-vector/kubernetes/generated/certs/ca.aerospike.com.pem -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/keypass /home/joem/src/aerospike-vector/kubernetes/generated/certs/keypass -+ mv /home/joem/src/aerospike-vector/kubernetes/generated/output/storepass /home/joem/src/aerospike-vector/kubernetes/generated/certs/storepass -+ echo 'Generate Auth Keys' -Generate Auth Keys -+ openssl genpkey -algorithm RSA -out /home/joem/src/aerospike-vector/kubernetes/generated/secrets/private_key.pem -pkeyopt rsa_keygen_bits:2048 -pass pass:citrusstore -....+......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*.....+........+.........+......+....+...+..+.........+....+......+..+.........+.+...+.....+............+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -.+............+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*...+.......+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*......+....+......+.....+......+.......+.....+.+..+...................+...............+...+.....+............+....+..+...+........................+...+.......+...+..+.......+...+..+.+..+.+...........+....+............+.....+...+.+.....+......+...+.......+..+...+.........+...+...+...+....+...+........+.......+.........+...........+...+.+.....+.+..+...+...........................+...............+................+.....+....+......+...........+.+...+.....+.+........+...+...+.+...+...+..............+...+............+....+....................+....+...+..+............+.....................+....+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -+ openssl rsa -pubout -in /home/joem/src/aerospike-vector/kubernetes/generated/secrets/private_key.pem -out /home/joem/src/aerospike-vector/kubernetes/generated/secrets/public_key.pem -passin pass:citrusstore -writing RSA key -+ setup_aerospike -+ echo 'Deploying Aerospike Kubernetes Operator (AKO)...' -Deploying Aerospike Kubernetes Operator (AKO)... -+ curl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.25.0/install.sh -+ bash -s v0.25.0 -customresourcedefinition.apiextensions.k8s.io/catalogsources.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/clusterserviceversions.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/installplans.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/olmconfigs.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/operatorconditions.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/operatorgroups.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/operators.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/subscriptions.operators.coreos.com created -customresourcedefinition.apiextensions.k8s.io/catalogsources.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/clusterserviceversions.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/installplans.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/olmconfigs.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/operatorconditions.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/operatorgroups.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/operators.operators.coreos.com condition met -customresourcedefinition.apiextensions.k8s.io/subscriptions.operators.coreos.com condition met -namespace/olm created -namespace/operators created -serviceaccount/olm-operator-serviceaccount created -clusterrole.rbac.authorization.k8s.io/system:controller:operator-lifecycle-manager created -clusterrolebinding.rbac.authorization.k8s.io/olm-operator-binding-olm created -olmconfig.operators.coreos.com/cluster created -deployment.apps/olm-operator created -deployment.apps/catalog-operator created -clusterrole.rbac.authorization.k8s.io/aggregate-olm-edit created -clusterrole.rbac.authorization.k8s.io/aggregate-olm-view created -operatorgroup.operators.coreos.com/global-operators created -operatorgroup.operators.coreos.com/olm-operators created -clusterserviceversion.operators.coreos.com/packageserver created -catalogsource.operators.coreos.com/operatorhubio-catalog created -Waiting for deployment "olm-operator" rollout to finish: 0 of 1 updated replicas are available... -deployment "olm-operator" successfully rolled out -deployment "catalog-operator" successfully rolled out -Package server phase: Installing -Package server phase: Succeeded -Waiting for deployment "packageserver" rollout to finish: 1 of 2 updated replicas are available... -deployment "packageserver" successfully rolled out -+ kubectl create -f https://operatorhub.io/install/aerospike-kubernetes-operator.yaml -subscription.operators.coreos.com/my-aerospike-kubernetes-operator created -+ echo 'Waiting for AKO to be ready...' -Waiting for AKO to be ready... -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO setup is still in progress...' -AKO setup is still in progress... -+ sleep 10 -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO setup is still in progress...' -AKO setup is still in progress... -+ sleep 10 -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO setup is still in progress...' -AKO setup is still in progress... -+ sleep 10 -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO setup is still in progress...' -AKO setup is still in progress... -+ sleep 10 -+ true -+ kubectl --namespace operators get deployment/aerospike-operator-controller-manager -+ echo 'AKO is ready.' -AKO is ready. -+ kubectl --namespace operators wait --for=condition=available --timeout=180s deployment/aerospike-operator-controller-manager -deployment.apps/aerospike-operator-controller-manager condition met -+ break -+ echo 'Granting permissions to the target namespace...' -Granting permissions to the target namespace... -+ kubectl --namespace aerospike create serviceaccount aerospike-operator-controller-manager -serviceaccount/aerospike-operator-controller-manager created -+ kubectl create clusterrolebinding aerospike-cluster --clusterrole=aerospike-cluster --serviceaccount=aerospike:aerospike-operator-controller-manager -clusterrolebinding.rbac.authorization.k8s.io/aerospike-cluster created -+ echo 'Setting secrets for Aerospike cluster...' -Setting secrets for Aerospike cluster... -+ kubectl --namespace aerospike create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets -secret/aerospike-secret created -+ kubectl --namespace aerospike create secret generic auth-secret --from-literal=password=admin123 -secret/auth-secret created -+ kubectl --namespace aerospike create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs -secret/aerospike-tls created -+ echo 'Adding storage class...' -Adding storage class... -+ kubectl apply -f https://raw.githubusercontent.com/aerospike/aerospike-kubernetes-operator/refs/heads/master/config/samples/storage/eks_ssd_storage_class.yaml -storageclass.storage.k8s.io/ssd created -+ echo 'Deploying Aerospike cluster...' -Deploying Aerospike cluster... -+ kubectl apply -f /home/joem/src/aerospike-vector/kubernetes/generated/manifests/aerospike-cr.yaml -aerospikecluster.asdb.aerospike.com/aerocluster created -+ setup_avs -+ echo 'Setting secrets for AVS cluster...' -Setting secrets for AVS cluster... -+ kubectl --namespace avs create secret generic auth-secret --from-literal=password=admin123 -secret/auth-secret created -+ kubectl --namespace avs create secret generic aerospike-tls --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/certs -secret/aerospike-tls created -+ kubectl --namespace avs create secret generic aerospike-secret --from-file=/home/joem/src/aerospike-vector/kubernetes/generated/secrets -secret/aerospike-secret created -+ deploy_avs_helm_chart -+ echo 'Deploying AVS Helm chart...' -Deploying AVS Helm chart... -+ helm repo add aerospike-helm https://artifact.aerospike.io/artifactory/api/helm/aerospike-helm -"aerospike-helm" already exists with the same configuration, skipping -+ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "istio" chart repository -...Successfully got an update from the "aerospike-helm" chart repository -...Successfully got an update from the "prometheus-community" chart repository -...Successfully got an update from the "stable" chart repository -...Successfully got an update from the "jetstack" chart repository -Update Complete. ⎈Happy Helming!⎈ -+ '[' -z '' ']' -+ helm install avs-app --values /home/joem/src/aerospike-vector/kubernetes/generated/manifests/avs-values.yaml --namespace avs aerospike-helm/aerospike-vector-search --version 0.6.0 --wait -NAME: avs-app -LAST DEPLOYED: Tue Nov 12 17:45:28 2024 -NAMESPACE: avs -STATUS: deployed -REVISION: 1 -TEST SUITE: None -NOTES: - -+ setup_monitoring -+ echo 'Adding monitoring setup...' -Adding monitoring setup... -+ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -"prometheus-community" already exists with the same configuration, skipping -+ helm repo update -Hang tight while we grab the latest from your chart repositories... -...Successfully got an update from the "istio" chart repository -...Successfully got an update from the "aerospike-helm" chart repository -...Successfully got an update from the "jetstack" chart repository -...Successfully got an update from the "prometheus-community" chart repository -...Successfully got an update from the "stable" chart repository -Update Complete. ⎈Happy Helming!⎈ -+ helm install monitoring-stack prometheus-community/kube-prometheus-stack --namespace monitoring --create-namespace -NAME: monitoring-stack -LAST DEPLOYED: Tue Nov 12 17:46:12 2024 -NAMESPACE: monitoring -STATUS: deployed -REVISION: 1 -NOTES: -kube-prometheus-stack has been installed. Check its status by running: - kubectl --namespace monitoring get pods -l "release=monitoring-stack" - -Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator. -+ echo 'Applying additional monitoring manifests...' -Applying additional monitoring manifests... -+ kubectl apply -f manifests/monitoring/aerospike-exporter-service.yaml -service/aerospike-exporter created -+ kubectl apply -f manifests/monitoring/aerospike-servicemonitor.yaml -servicemonitor.monitoring.coreos.com/aerospike-monitor created -+ kubectl apply -f manifests/monitoring/avs-servicemonitor.yaml -servicemonitor.monitoring.coreos.com/avs-monitor created -+ print_final_instructions -+ echo Your new deployment is available at a392672ac93974ff7aff90ce248a0daf-a96448fff4f0cc23.elb.eu-central-1.amazonaws.com. -Your new deployment is available at a392672ac93974ff7aff90ce248a0daf-a96448fff4f0cc23.elb.eu-central-1.amazonaws.com. -+ echo Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. -Check your deployment using our command line tool asvec available at https://github.com/aerospike/asvec. -+ [[ '' != 1 ]] -+ echo 'connect with asvec using cert ' -connect with asvec using cert -+ cat /home/joem/src/aerospike-vector/kubernetes/generated/certs/ca.aerospike.com.pem ------BEGIN CERTIFICATE----- -MIIDtTCCAp2gAwIBAgIUcJKnpGvliqTVvUfw/gp4hDDltigwDQYJKoZIhvcNAQEL -BQAwajELMAkGA1UEBhMCVUsxDzANBgNVBAgMBkxvbmRvbjEPMA0GA1UEBwwGTG9u -ZG9uMQwwCgYDVQQKDANhYnMxEDAOBgNVBAsMB1N1cHBvcnQxGTAXBgNVBAMMEGNh -LmFlcm9zcGlrZS5jb20wHhcNMjQxMTEzMDE0MzMyWhcNMzQxMTExMDE0MzMyWjBq -MQswCQYDVQQGEwJVSzEPMA0GA1UECAwGTG9uZG9uMQ8wDQYDVQQHDAZMb25kb24x -DDAKBgNVBAoMA2FiczEQMA4GA1UECwwHU3VwcG9ydDEZMBcGA1UEAwwQY2EuYWVy -b3NwaWtlLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKr3cU8X -8pGbVv48DMWctDYW1sGv/GCX7Cb7NJQNtFOmPV6atZKXcPsaaGq0RVtwcluwOQZm -LKTT9Nwc52OFqTlLrHr2siSwMAURMPPT/RsTabu/rqs5nXbucH5bpmWj8nLf5zxa -lRQjUY1VJJz0apLJKedq5PeFHeIqBSDjSSvQEhUD7ulo/fgWn4Lwrlwji5cy2H1b -OAamGO5POG2QEKB6CjgeoasZfptLA0pkLoT4KeAQky36+1v6yv422lpRidfkLgaF -u/vD33BaptxPdbskYEiOPUJ0RsWmiu5By4ey95YILnu2oXwkpPxxDQgwpcmEMZjv -kznXffSuOt3yTmcCAwEAAaNTMFEwHQYDVR0OBBYEFC2tQmqe+AvB/D2IFK0bGWSm -DcuXMB8GA1UdIwQYMBaAFC2tQmqe+AvB/D2IFK0bGWSmDcuXMA8GA1UdEwEB/wQF -MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAJ3sACmNGHktNAt5XzBrgmNr9eIGzglZ -8sWrrV5Gpr550iShOXbmePUAEK5xrMHNriBw7r0/XeJxrZpNoJPDaWsbP9kbyhJP -IFV7GY8v/j0G68xbj6BfIMzIXJ2AdaAei1BFXUpYfT+uCKNT0zNtCzXyrxPwBvHv -Um+elrFq75rS5Ds7d7haP3sl8wNkmr+Yqjf97wPBvKfFoNOCIfp6bBHCFzKRJGzf -EeGyjbvEXWycWVO4PIAa0KiM/Bcd0Giced+sjD6fq6QbGveg55Kg49Be3cmwVZAO -AJMw0W7eURKvSg5cCBQtOI8+3T9xFmYOuQnpu4YDoykVeFOdIkuvAwU= ------END CERTIFICATE----- -+ echo Use the asvec tool to change your password with -Use the asvec tool to change your password with -+ echo asvec -h a392672ac93974ff7aff90ce248a0daf-a96448fff4f0cc23.elb.eu-central-1.amazonaws.com:5000 --tls-cafile path/to/tls/file -U admin -P admin user new-password --name admin --new-password your-new-password -asvec -h a392672ac93974ff7aff90ce248a0daf-a96448fff4f0cc23.elb.eu-central-1.amazonaws.com:5000 --tls-cafile path/to/tls/file -U admin -P admin user new-password --name admin --new-password your-new-password -+ echo 'Setup Complete!' -Setup Complete!