Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

VEC-180:Optimize deployment add istio #37

Merged
merged 16 commits into from
Jun 7, 2024
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion kubernetes/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ Before you start, make sure you have installed the necessary tools:
## Scripts

- `full-create-and-install.sh`: Creates a GKE cluster, installs Aerospike, Proximus, and sets up monitoring services.
- `run-quote-search.sh`: Runs the Quote Search sample application.

### Grafana Dashboards

Expand Down
108 changes: 73 additions & 35 deletions kubernetes/full-create-and-install.sh
Original file line number Diff line number Diff line change
@@ -1,27 +1,33 @@
#!/bin/bash

# This script sets up a GKE cluster with specific configurations for Aerospike and Proximus node pools.
# This script sets up a GKE cluster with specific configurations for Aerospike and AVS node pools.
# It handles the creation of the cluster, node pools, labeling, tainting of nodes, and deployment of necessary operators and configurations.
# Additionally, it sets up monitoring using Prometheus and deploys a specific Helm chart for Proximus.
# Additionally, it sets up monitoring using Prometheus and deploys a specific Helm chart for AVS.

# Function to print environment variables for verification
set -eo pipefail
if [ -n "$DEBUG" ]; then set -x; fi
trap 'echo "Error: $? at line $LINENO" >&2' ERR

print_env() {
echo "Environment Variables:"
echo "export PROJECT_ID=$PROJECT_ID"
echo "export CLUSTER_NAME=$CLUSTER_NAME"
echo "export NODE_POOL_NAME_AEROSPIKE=$NODE_POOL_NAME_AEROSPIKE"
echo "export NODE_POOL_NAME_PROXIMUS=$NODE_POOL_NAME_PROXIMUS"
echo "export NODE_POOL_NAME_AVS=$NODE_POOL_NAME_AVS"
echo "export ZONE=$ZONE"
echo "export FEATURES_CONF=$FEATURES_CONF"
echo "export AEROSPIKE_CR=$AEROSPIKE_CR"
}

# Set environment variables for the GKE cluster setup
export PROJECT_ID="aerostation-dev"
export CLUSTER_NAME="myworld"
export PROJECT_ID="$(gcloud config get-value project)"
export CLUSTER_NAME="${PROJECT_ID}-modern-world"
export NODE_POOL_NAME_AEROSPIKE="aerospike-pool"
export NODE_POOL_NAME_PROXIMUS="proximus-pool"
export NODE_POOL_NAME_AVS="avs-pool"
export ZONE="us-central1-c"
export HELM_CHART_VECTOR="/Users/david/helm/aerospike/helm-charts/aerospike-vector-search"
export HELM_CHART_APP="/Users/david/helm/aerospike/helm-charts/aerospike-vector-search-examples/quote-semantic-search/"
export FEATURES_CONF="./features.conf"
export AEROSPIKE_CR="./manifests/ssd_storage_cluster_cr.yaml"

Expand Down Expand Up @@ -65,27 +71,6 @@ kubectl get nodes -l cloud.google.com/gke-nodepool="$NODE_POOL_NAME_AEROSPIKE" -
# kubectl get nodes -l cloud.google.com/gke-nodepool="$NODE_POOL_NAME_AEROSPIKE" -o name | \
# xargs -I {} kubectl taint nodes {} dedicated=aerospike:NoSchedule --overwrite

echo "Adding Proximus node pool..."
if ! gcloud container node-pools create "$NODE_POOL_NAME_PROXIMUS" \
--cluster "$CLUSTER_NAME" \
--project "$PROJECT_ID" \
--zone "$ZONE" \
--num-nodes 3 \
--disk-type "pd-standard" \
--disk-size "100" \
--machine-type "e2-highmem-4"; then
echo "Failed to create Proximus node pool"
exit 1
else
echo "Proximus node pool added successfully."
fi

echo "Labeling Proximus nodes..."
kubectl get nodes -l cloud.google.com/gke-nodepool="$NODE_POOL_NAME_PROXIMUS" -o name | \
xargs -I {} kubectl label {} aerospike.com/node-pool=proximus --overwrite

echo "Setup complete. Cluster and node pools are configured."

echo "Deploying Aerospike Kubernetes Operator (AKO)..."
curl -sL https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.25.0/install.sh | bash -s v0.25.0
kubectl create -f https://operatorhub.io/install/aerospike-kubernetes-operator.yaml
Expand Down Expand Up @@ -118,14 +103,63 @@ kubectl apply -f https://raw.githubusercontent.com/aerospike/aerospike-kubernete

echo "Deploying Aerospike cluster..."
kubectl apply -f "$AEROSPIKE_CR"
# replace with helm repo add when helm chart is published.
echo "Deploying Proximus from Helm chart..."
mkdir -p temp-helm
cd temp-helm
git clone https://github.com/aerospike/helm-charts.git
cd ..
helm install proximus-gke "temp-helm/helm-charts/aerospike-proximus" --values "manifests/proximus-gke-values.yaml" --namespace aerospike --wait

##############################################
# AVS name space
##############################################

echo "Adding avs node pool..."
if ! gcloud container node-pools create "$NODE_POOL_NAME_AVS" \
--cluster "$CLUSTER_NAME" \
--project "$PROJECT_ID" \
--zone "$ZONE" \
--num-nodes 3 \
--disk-type "pd-standard" \
--disk-size "100" \
--machine-type "e2-highmem-4"; then
echo "Failed to create avs node pool"
exit 1
else
echo "avs node pool added successfully."
fi

echo "Labeling avs nodes..."
kubectl get nodes -l cloud.google.com/gke-nodepool="$NODE_POOL_NAME_AVS" -o name | \
xargs -I {} kubectl label {} aerospike.com/node-pool=avs --overwrite



echo "Setup complete. Cluster and node pools are configured."

kubectl create namespace avs

echo "Setting secrets for avs cluster..."
kubectl --namespace avs create secret generic aerospike-secret --from-file=features.conf="$FEATURES_CONF"
kubectl --namespace avs create secret generic auth-secret --from-literal=password='admin123'


echo "Deploying Istio"
helm repo add istio https://istio-release.storage.googleapis.com/charts
helm repo update

helm install istio-base istio/base --namespace istio-system --set defaultRevision=default --create-namespace --wait
helm install istiod istio/istiod --namespace istio-system --create-namespace --wait
helm install istio-ingress istio/gateway \
--values ./manifests/istio-ingressgateway-values.yaml \
--namespace istio-ingress \
--create-namespace \
--wait

kubectl apply -f /manifests/istio



helm install avs-gke --values "manifests/avs-gke-values.yaml" --namespace avs $HELM_CHART_VECTOR --wait


##############################################
# Monitoring namespace
##############################################
echo "Adding monitoring setup..."
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
Expand All @@ -140,4 +174,8 @@ echo "To include your Grafana dashboards, use 'import-dashboards.sh <your grafan
echo "To view grafana dashboards from your machine use kubectl port-forward -n monitoring svc/monitoring-stack-grafana 3000:80"
echo "To expose grafana ports publically 'kubectl apply -f helpers/EXPOSE-GRAFANA.yaml'"
echo "To find the exposed port with 'kubectl get svc -n monitoring' "
echo "To run the quote-search app, use 'run-quote-search.sh'"


echo To run the quote search sample app on your new cluster you can use
echo helm install sematic-search-app --namespace avs --values ./manifests/sematic-search-values.yaml $HELM_CHART_APP --wait

Original file line number Diff line number Diff line change
@@ -1,21 +1,23 @@

replicaCount: 3
proximusConfig:

aerospikeVectorSearchConfig:
heartbeat:
seeds:
- address: proximus-gke-aerospike-proximus-0.proximus-gke-aerospike-proximus.aerospike.svc.cluster.local
- address: avs-gke-aerospike-vector-search-0.avs-gke-aerospike-vector-search.avs.svc.cluster.local
port: 5001
- address: proximus-gke-aerospike-proximus-1.proximus-gke-aerospike-proximus.aerospike.svc.cluster.local
- address: avs-gke-aerospike-vector-search-1.avs-gke-aerospike-vector-search.avs.svc.cluster.local
port: 5001
- address: proximus-gke-aerospike-proximus-2.proximus-gke-aerospike-proximus.aerospike.svc.cluster.local
- address: avs-gke-aerospike-vector-search-2.avs-gke-aerospike-vector-search.avs.svc.cluster.local
port: 5001

interconnect:
ports:
5001:
addresses:
0.0.0.0
aerospike:
metadata-namespace: "proximus-meta"
metadata-namespace: "avs-meta"
seeds:
- aerocluster-0-0.aerocluster.aerospike.svc.cluster.local:
port: 3000
Expand All @@ -27,16 +29,28 @@ proximusConfig:
levels:
metrics-ticker: info
root: info
com.aerospike.vector.embedded.client: debug
client: debug
ticker-interval: 10

service:
enabled: true
enabled: false
annotations:
networking.gke.io/load-balancer-type: "Internal"
networking.gke.io/load-balancer-type: "External"
ports:
- name: "svc-port"
port: 5000
targetPort: 5000
# service:
# enabled: false
# type: LoadBalancer
# annotations:
# cloud.google.com/l4-rbs: "enabled"
# # networking.gke.io/load-balancer-type: "Internal"
# ports:
# - name: "svc-port"
# port: 5000
# targetPort: 5000

# schedule proximus nodes
affinity:
Expand All @@ -47,7 +61,7 @@ service:
- key: aerospike.com/node-pool
operator: In
values:
- "proximus"
- "aerospike-vector-search"
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- topologyKey: "kubernetes.io/hostname"
Expand All @@ -56,4 +70,4 @@ service:
- key: "app.kubernetes.io/name"
operator: In
values:
- "aerospike-proximus"
- "aerospike-vector-search"
17 changes: 17 additions & 0 deletions kubernetes/manifests/istio-ingressgateway-values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
service:
type: LoadBalancer
annotations:
cloud.google.com/l4-rbs: "enabled"
ports:
- name: http
port: 80
targetPort: 80
- name: https
port: 443
targetPort: 443
- name: status-port
port: 15021
targetPort: 15021
- name: grpc
port: 5000
targetPort: 5000
23 changes: 23 additions & 0 deletions kubernetes/manifests/istio/gateway.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: avs-gw
namespace: avs
spec:
selector:
istio: ingress
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
- port:
number: 5000
name: grpc
protocol: GRPC
hosts:
- "*"
tls:
mode: PASSTHROUGH
29 changes: 29 additions & 0 deletions kubernetes/manifests/istio/virtual-service-vector-search.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: avs-vs
namespace: avs
spec:
hosts:
- "*"
gateways:
- avs-gw
http:
- match:
- uri:
prefix: /
port: 80
route:
- destination:
port:
number: 8080
host: sematic-search-app-quote-semantic-search.avs.svc.cluster.local
- match:
- uri:
prefix: /
port: 5000
route:
- destination:
port:
number: 5000
host: avs-gke-aerospike-vector-search.avs.svc.cluster.local
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: proximus-monitor
name: avs-monitor
namespace: monitoring
labels:
release: monitoring-stack # Ensure this matches the Helm release name
spec:
selector:
matchLabels:
app: proximus-gke-aerospike-proximus
app: avs-gke-aerospike-vector-search
namespaceSelector:
matchNames:
- aerospike
- avs
endpoints:
- port: manage-5040
path: /manage/rest/v1/prometheus
Expand Down
5 changes: 5 additions & 0 deletions kubernetes/manifests/sematic-search-values.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
quoteSearchConfig:
avsHost: "istio-ingress.istio-ingress.svc.cluster.local"
avsIsLoadbalancer: "True"
avsNamespace: "avs-meta"

14 changes: 14 additions & 0 deletions kubernetes/manifests/servicemonitor.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: avs-service-monitor
namespace: aerospike
spec:
selector:
matchLabels:
app.kubernetes.io/name: aerospike-vector-search
endpoints:
- port: "manage-5040"
interval: 10s
path: "/manage/rest/v1/prometheus"
8 changes: 4 additions & 4 deletions kubernetes/manifests/ssd_storage_cluster_cr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,9 @@ spec:
storageClass: ssd
volumeMode: Filesystem
size: 1Gi
- name: proximus-meta
- name: avs-meta
aerospike:
path: /proximus/dev/xvdf
path: /avs/dev/xvdf
source:
persistentVolume:
storageClass: ssd
Expand Down Expand Up @@ -91,12 +91,12 @@ spec:
devices:
- /test/dev/xvdf

- name: proximus-meta
- name: avs-meta
nsup-period: 600
nsup-threads: 2
evict-tenths-pct: 5
replication-factor: 2
storage-engine:
type: device
devices:
- /proximus/dev/xvdf
- /avs/dev/xvdf
Loading