+
+function site_deploy {
+ flux_create_site ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH} ${FLUX_SOPS_KEY_NAME}
+}
+
+function site_clean {
+ kubectl -n flux-system delete kustomization $(_site_kustomization_name ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH})
+}
+
+function _is_cluster_ready {
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/clusterName:/ {print $2}' ${yaml})
+ if [[ ! -z ${name} ]]; then
+ if [[ $(kubectl -n ${SITE_NAMESPACE} get cluster ${name} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') != "True" ]]; then
+ return 1
+ fi
+ fi
+ done
+}
+
+function _is_control_plane_ready {
+ # Checking the Cluster resource status is not sufficient, it
+ # reports the control plane as ready before the nodes forming the
+ # control plane are ready
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/clusterName:/ {print $2}' ${yaml})
+ if [[ ! -z ${name} ]]; then
+ local replicas=$(kubectl -n ${SITE_NAMESPACE} get kubeadmcontrolplane ${name} -o jsonpath='{.spec.replicas}')
+ if [[ $(kubectl --kubeconfig=${BUILDDIR}/${name}-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c True) != ${replicas} ]]; then
+ return 1
+ fi
+ fi
+ done
+}
+
+function site_wait_for_all_ready {
+ WAIT_FOR_INTERVAL=60s
+ WAIT_FOR_TRIES=30
+ wait_for _is_cluster_ready
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/clusterName:/ {print $2}' ${yaml})
+ if [[ ! -z ${name} ]]; then
+ clusterctl -n ${SITE_NAMESPACE} get kubeconfig ${name} >${BUILDDIR}/${name}-admin.conf
+ chmod 600 ${BUILDDIR}/${name}-admin.conf
+ fi
+ done
+ wait_for _is_control_plane_ready
+}
+
+function site_insert_control_plane_network_identity_into_ssh_config {
+ # This enables logging into the control plane machines from this
+ # machine without specifying the identify file on the command line
+
+ if [[ ! $(which ipcalc) ]]; then
+ apt-get install -y ipcalc
+ fi
+
+ # Create ssh config if it doesn't exist
+ mkdir -p ${HOME}/.ssh && chmod 700 ${HOME}/.ssh
+ touch ${HOME}/.ssh/config
+ chmod 600 ${HOME}/.ssh/config
+ # Add the entry for the control plane network, host value in ssh
+ # config is a wildcard
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/name:/ {NAME=$2} /chart: deploy\/cluster/ {print NAME; exit}' ${yaml})
+ if [[ ! -z ${name} ]]; then
+ endpoint=$(helm -n ${SITE_NAMESPACE} get values -a ${name} | awk '/controlPlaneEndpoint:/ {print $2}')
+ prefix=$(helm -n ${SITE_NAMESPACE} get values -a ${name} | awk '/controlPlanePrefix:/ {print $2}')
+ host=$(ipcalc ${endpoint}/${prefix} | awk '/Network:/ {sub(/\.0.*/,".*"); print $2}')
+ if [[ $(grep -c "Host ${host}" ${HOME}/.ssh/config) != 0 ]]; then
+ sed -i -e '/Host '"${host}"'/,+3 d' ${HOME}/.ssh/config
+ fi
+ cat <<EOF >>${HOME}/.ssh/config
+Host ${host}
+ IdentityFile ${SCRIPTDIR}/id_rsa
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
+EOF
+ fi
+ done
+ # Add the identity to authorized keys on this host to enable ssh
+ # logins via its control plane address
+ authorized_key=$(cat ${SCRIPTDIR}/id_rsa.pub)
+ sed -i -e '\!'"${authorized_key}"'!d' ${HOME}/.ssh/authorized_keys
+ cat ${SCRIPTDIR}/id_rsa.pub >> ~/.ssh/authorized_keys
+}
+
+function _is_cluster_deleted {
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/clusterName:/ {print $2}' ${yaml})
+ if [[ ! -z ${name} ]]; then
+ if kubectl -n ${SITE_NAMESPACE} get cluster ${name}; then
+ return 1
+ fi
+ fi
+ done
+}
+
+function site_wait_for_all_deleted {
+ WAIT_FOR_INTERVAL=60s
+ WAIT_FOR_TRIES=30
+ wait_for _is_cluster_deleted
+}