Skip to content

Commit

Permalink
Add snapshotter CRDs after cluster setup
Browse files Browse the repository at this point in the history
Signed-off-by: Grant Griffiths <grant@portworx.com>
  • Loading branch information
ggriffiths committed Nov 13, 2019
1 parent 4fcafec commit d199de0
Showing 1 changed file with 62 additions and 0 deletions.
62 changes: 62 additions & 0 deletions prow.sh
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,9 @@ configvar CSI_PROW_E2E_ALPHA_GATES_1_16 'VolumeSnapshotDataSource=true' "alpha f
configvar CSI_PROW_E2E_ALPHA_GATES_LATEST 'VolumeSnapshotDataSource=true' "alpha feature gates for latest Kubernetes"
configvar CSI_PROW_E2E_ALPHA_GATES "$(get_versioned_variable CSI_PROW_E2E_ALPHA_GATES "${csi_prow_kubernetes_version_suffix}")" "alpha E2E feature gates"

# Which external-snapshotter tag to use for the snapshotter CRD and snapshot-controller deployment
configvar CSI_SNAPSHOTTER_VERSION 'v2.0.0-rc4' "external-snapshotter version tag"

# Some tests are known to be unusable in a KinD cluster. For example,
# stopping kubelet with "ssh <node IP> systemctl stop kubelet" simply
# doesn't work. Such tests should be written in a way that they verify
Expand Down Expand Up @@ -657,6 +660,61 @@ install_hostpath () {
fi
}

# Installs all nessesary snapshotter CRDs
install_snapshot_crds() {
# Wait until volumesnapshot CRDs are in place.
CRD_BASE_DIR="https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/config/crd"
kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotclasses.yaml"
until kubectl get volumesnapshotclasses.snapshot.storage.k8s.io
do
sleep 2
done

kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshots.yaml"
until kubectl get volumesnapshots.snapshot.storage.k8s.io
do
sleep 2
done

kubectl apply -f "${CRD_BASE_DIR}/snapshot.storage.k8s.io_volumesnapshotcontents.yaml"
until kubectl get volumesnapshotcontents.snapshot.storage.k8s.io
do
sleep 2
done
}

# Install snapshot controller and associated RBAC, retrying until the pod is running.
install_snapshot_controller() {
kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/rbac-snapshot-controller.yaml"
cnt=0
until kubectl get clusterrolebinding snapshot-controller-role; do
if [ $cnt -gt 30 ]; then
echo "Cluster role bindings:"
kubectl describe clusterrolebinding
echo >&2 "ERROR: snapshot controller RBAC not ready after over 5min"
exit 1
fi
echo "$(date +%H:%M:%S)" "waiting for snapshot RBAC setup complete, attempt #$cnt"
cnt=$((cnt + 1))
sleep 10
done


kubectl apply -f "https://raw.githubusercontent.com/kubernetes-csi/external-snapshotter/${CSI_SNAPSHOTTER_VERSION}/deploy/kubernetes/snapshot-controller/setup-snapshot-controller.yaml"
cnt=0
until kubectl get statefulset snapshot-controller | grep snapshot-controller | grep "1/1"; do
if [ $cnt -gt 30 ]; then
echo "Running statefulsets:"
kubectl describe statefulsets
echo >&2 "ERROR: snapshot controller not ready after over 5min"
exit 1
fi
echo "$(date +%H:%M:%S)" "waiting for snapshot controller deployment to complete, attempt #$cnt"
cnt=$((cnt + 1))
sleep 10
done
}

# collect logs and cluster status (like the version of all components, Kubernetes version, test version)
collect_cluster_info () {
cat <<EOF
Expand Down Expand Up @@ -987,6 +1045,10 @@ main () {
if tests_need_non_alpha_cluster; then
start_cluster || die "starting the non-alpha cluster failed"

# Install necessary snapshot CRDs and snapshot controller
install_snapshot_crds
install_snapshot_controller

# Installing the driver might be disabled.
if install_hostpath "$images"; then
collect_cluster_info
Expand Down

0 comments on commit d199de0

Please sign in to comment.