4 SCRIPTDIR="$(readlink -f $(dirname ${BASH_SOURCE[0]}))"
5 LIBDIR="$(dirname $(dirname $(dirname ${SCRIPTDIR})))/env/lib"
7 source $LIBDIR/common.sh
8 source $SCRIPTDIR/../common.sh
10 BUILDDIR=${SCRIPTDIR/deploy/build}
13 SITE_REPO=${SITE_REPO:-" https://gerrit.akraino.org/r/icn"}
14 SITE_BRANCH=${SITE_BRANCH:-"master"}
15 SITE_PATH=${SITE_PATH:-"deploy/site/vm-mc/deployment"}
18 gpg --import ${FLUX_SOPS_PRIVATE_KEY}
19 flux_create_site ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH} ${FLUX_SOPS_KEY_NAME}
23 kubectl -n flux-system delete kustomization $(flux_site_kustomization_name ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH})
26 function is_cluster_ready {
27 for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
28 name=$(awk '/clusterName:/ {print $2}' ${yaml})
29 if [[ $(kubectl -n ${SITE_NAMESPACE} get cluster ${name} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') != "True" ]]; then
35 function is_control_plane_ready {
36 # Checking the Cluster resource status is not sufficient, it
37 # reports the control plane as ready before the nodes forming the
38 # control plane are ready
39 for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
40 name=$(awk '/clusterName:/ {print $2}' ${yaml})
41 local replicas=$(kubectl -n ${SITE_NAMESPACE} get kubeadmcontrolplane ${name} -o jsonpath='{.spec.replicas}')
42 if [[ $(kubectl --kubeconfig=${BUILDDIR}/${name}-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c True) != ${replicas} ]]; then
48 function insert_control_plane_network_identity_into_ssh_config {
49 # This enables logging into the control plane machines from this
50 # machine without specifying the identify file on the command line
52 if [[ ! $(which ipcalc) ]]; then
53 apt-get install -y ipcalc
56 # Create ssh config if it doesn't exist
57 mkdir -p ${HOME}/.ssh && chmod 700 ${HOME}/.ssh
58 touch ${HOME}/.ssh/config
59 chmod 600 ${HOME}/.ssh/config
60 # Add the entry for the control plane network, host value in ssh
61 # config is a wildcard
62 for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
63 name=$(awk '/name:/ {print $2; exit}' ${yaml})
64 endpoint=$(helm -n ${SITE_NAMESPACE} get values -a ${name} | awk '/controlPlaneEndpoint:/ {print $2}')
65 prefix=$(helm -n ${SITE_NAMESPACE} get values -a ${name} | awk '/controlPlanePrefix:/ {print $2}')
66 host=$(ipcalc ${endpoint}/${prefix} | awk '/Network:/ {sub(/\.0.*/,".*"); print $2}')
67 if [[ $(grep -c "Host ${host}" ${HOME}/.ssh/config) != 0 ]]; then
68 sed -i -e '/Host '"${host}"'/,+3 d' ${HOME}/.ssh/config
70 cat <<EOF >>${HOME}/.ssh/config
72 IdentityFile ${SCRIPTDIR}/id_rsa
73 StrictHostKeyChecking no
74 UserKnownHostsFile /dev/null
77 # Add the identity to authorized keys on this host to enable ssh
78 # logins via its control plane address
79 authorized_key=$(cat ${SCRIPTDIR}/id_rsa.pub)
80 sed -i -e '\!'"${authorized_key}"'!d' ${HOME}/.ssh/authorized_keys
81 cat ${SCRIPTDIR}/id_rsa.pub >> ~/.ssh/authorized_keys
84 function wait_for_all_ready {
87 wait_for is_cluster_ready
88 for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
89 name=$(awk '/clusterName:/ {print $2}' ${yaml})
90 clusterctl -n ${SITE_NAMESPACE} get kubeconfig ${name} >${BUILDDIR}/${name}-admin.conf
91 chmod 600 ${BUILDDIR}/${name}-admin.conf
93 wait_for is_control_plane_ready
94 insert_control_plane_network_identity_into_ssh_config
97 function is_cluster_deleted {
98 for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
99 name=$(awk '/clusterName:/ {print $2}' ${yaml})
100 ! kubectl -n ${SITE_NAMESPACE} get cluster ${name}
104 function wait_for_all_deleted {
105 WAIT_FOR_INTERVAL=60s
107 wait_for is_cluster_deleted
113 "wait") wait_for_all_ready ;;
114 "wait-clean") wait_for_all_deleted ;;
116 Usage: $(basename $0) COMMAND
119 clean - Remove the site
120 deploy - Deploy the site
121 wait - Wait for the site to be ready