4 SCRIPTDIR="$(readlink -f $(dirname ${BASH_SOURCE[0]}))"
5 LIBDIR="$(dirname $(dirname $(dirname ${SCRIPTDIR})))/env/lib"
7 source $LIBDIR/common.sh
8 source $SCRIPTDIR/../common.sh
10 BUILDDIR=${SCRIPTDIR/deploy/build}
13 SITE_REPO=${SITE_REPO:-"https://gerrit.akraino.org/r/icn"}
14 SITE_BRANCH=${SITE_BRANCH:-"master"}
15 SITE_PATH=${SITE_PATH:-"deploy/site/vm"}
17 FLUX_SOPS_KEY_NAME=${FLUX_SOPS_KEY_NAME:-"icn-site-vm"}
19 # !!!NOTE!!! THE KEYS USED BELOW ARE FOR TEST PURPOSES ONLY. DO NOT
20 # USE THESE OUTSIDE OF THIS ICN VIRTUAL TEST ENVIRONMENT.
21 function build_source {
22 # First decrypt the existing site YAML, otherwise we'll be
23 # attempting to encrypt it twice below
24 if [[ -f ${SCRIPTDIR}/sops.asc ]]; then
25 gpg --import ${SCRIPTDIR}/sops.asc
26 sops_decrypt_site ${SCRIPTDIR}/site.yaml
29 # Generate user password and authorized key in site YAML
30 # To login to guest, ssh -i ${SCRIPTDIR}/id_rsa
31 HASHED_PASSWORD=$(mkpasswd --method=SHA-512 --rounds 10000 "mypasswd")
32 sed -i -e 's!hashedPassword: .*!hashedPassword: '"${HASHED_PASSWORD}"'!' ${SCRIPTDIR}/site.yaml
33 ssh-keygen -t rsa -N "" -f ${SCRIPTDIR}/id_rsa <<<y
34 SSH_AUTHORIZED_KEY=$(cat ${SCRIPTDIR}/id_rsa.pub)
35 # Use ! instead of usual / to avoid escaping / in
37 sed -i -e 's!sshAuthorizedKey: .*!sshAuthorizedKey: '"${SSH_AUTHORIZED_KEY}"'!' ${SCRIPTDIR}/site.yaml
39 # Encrypt the site YAML
40 create_gpg_key ${FLUX_SOPS_KEY_NAME}
41 sops_encrypt_site ${SCRIPTDIR}/site.yaml ${FLUX_SOPS_KEY_NAME}
43 # ONLY FOR TEST ENVIRONMENT: save the private key used
44 export_gpg_private_key ${FLUX_SOPS_KEY_NAME} >${SCRIPTDIR}/sops.asc
48 gpg --import ${SCRIPTDIR}/sops.asc
49 flux_create_site ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH} ${FLUX_SOPS_KEY_NAME}
53 kubectl -n flux-system delete kustomization $(flux_site_kustomization_name ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH})
56 function is_cluster_ready {
57 [[ $(kubectl -n metal3 get cluster icn -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') == "True" ]]
60 function is_control_plane_ready {
61 # Checking the Cluster resource status is not sufficient, it
62 # reports the control plane as ready before the nodes forming the
63 # control plane are ready
64 local -r replicas=$(kubectl -n metal3 get kubeadmcontrolplane icn -o jsonpath='{.spec.replicas}')
65 [[ $(kubectl --kubeconfig=${BUILDDIR}/icn-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c True) == ${replicas} ]]
68 function insert_control_plane_network_identity_into_ssh_config {
69 # This enables logging into the control plane machines from this
70 # machine without specifying the identify file on the command line
72 # Create ssh config if it doesn't exist
73 mkdir -p ${HOME}/.ssh && chmod 700 ${HOME}/.ssh
74 touch ${HOME}/.ssh/config
75 chmod 600 ${HOME}/.ssh/config
76 # Add the entry for the control plane network, host value in ssh
77 # config is a wildcard
78 endpoint=$(helm -n metal3 get values -a cluster-icn | awk '/controlPlaneEndpoint:/ {print $2}')
79 prefix=$(helm -n metal3 get values -a cluster-icn | awk '/controlPlanePrefix:/ {print $2}')
80 host=$(ipcalc ${endpoint}/${prefix} | awk '/Network:/ {sub(/\.0.*/,".*"); print $2}')
81 if [[ $(grep -c "Host ${host}" ${HOME}/.ssh/config) != 0 ]]; then
82 sed -i -e '/Host '"${host}"'/,+1 d' ${HOME}/.ssh/config
84 cat <<EOF >>${HOME}/.ssh/config
86 IdentityFile ${SCRIPTDIR}/id_rsa
90 function wait_for_all_ready {
93 wait_for is_cluster_ready
94 clusterctl -n metal3 get kubeconfig icn >${BUILDDIR}/icn-admin.conf
95 chmod 600 ${BUILDDIR}/icn-admin.conf
96 wait_for is_control_plane_ready
97 insert_control_plane_network_identity_into_ssh_config
101 "build-source") build_source ;;
104 "wait") wait_for_all_ready ;;
106 Usage: $(basename $0) COMMAND
109 build-source - Build the in-tree site values
110 clean - Remove the site
111 deploy - Deploy the site
112 wait - Wait for the site to be ready