4 SCRIPTDIR="$(readlink -f $(dirname ${BASH_SOURCE[0]}))"
5 LIBDIR="$(dirname $(dirname $(dirname ${SCRIPTDIR})))/env/lib"
7 source $LIBDIR/common.sh
8 source $SCRIPTDIR/../common.sh
10 BUILDDIR=${SCRIPTDIR/deploy/build}
13 FLUX_SOPS_KEY_NAME=${FLUX_SOPS_KEY_NAME:-"icn-site-vm"}
15 # !!!NOTE!!! THE KEYS USED BELOW ARE FOR TEST PURPOSES ONLY. DO NOT
16 # USE THESE OUTSIDE OF THIS ICN VIRTUAL TEST ENVIRONMENT.
17 function build_source {
18 # First decrypt the existing site YAML, otherwise we'll be
19 # attempting to encrypt it twice below
20 if [[ -f ${SCRIPTDIR}/sops.asc ]]; then
21 gpg --import ${SCRIPTDIR}/sops.asc
22 sops_decrypt_site ${SCRIPTDIR}/site.yaml
25 # Generate user password and authorized key in site YAML
26 # To login to guest, ssh -i ${SCRIPTDIR}/id_rsa
27 HASHED_PASSWORD=$(mkpasswd --method=SHA-512 --rounds 10000 "mypasswd")
28 sed -i -e 's!hashedPassword: .*!hashedPassword: '"${HASHED_PASSWORD}"'!' ${SCRIPTDIR}/site.yaml
29 ssh-keygen -t rsa -N "" -f ${SCRIPTDIR}/id_rsa <<<y
30 SSH_AUTHORIZED_KEY=$(cat ${SCRIPTDIR}/id_rsa.pub)
31 # Use ! instead of usual / to avoid escaping / in
33 sed -i -e 's!sshAuthorizedKey: .*!sshAuthorizedKey: '"${SSH_AUTHORIZED_KEY}"'!' ${SCRIPTDIR}/site.yaml
35 # Encrypt the site YAML
36 create_gpg_key ${FLUX_SOPS_KEY_NAME}
37 sops_encrypt_site ${SCRIPTDIR}/site.yaml ${FLUX_SOPS_KEY_NAME}
39 # ONLY FOR TEST ENVIRONMENT: save the private key used
40 export_gpg_private_key ${FLUX_SOPS_KEY_NAME} >${SCRIPTDIR}/sops.asc
44 gpg --import ${SCRIPTDIR}/sops.asc
45 flux_create_site https://gerrit.akraino.org/r/icn master deploy/site/vm ${FLUX_SOPS_KEY_NAME}
49 kubectl -n flux-system delete kustomization icn-master-site-vm
52 function is_cluster_ready {
53 [[ $(kubectl -n metal3 get cluster icn -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') == "True" ]]
56 function is_control_plane_ready {
57 # Checking the Cluster resource status is not sufficient, it
58 # reports the control plane as ready before the nodes forming the
59 # control plane are ready
60 local -r replicas=$(kubectl -n metal3 get kubeadmcontrolplane icn -o jsonpath='{.spec.replicas}')
61 [[ $(kubectl --kubeconfig=${BUILDDIR}/icn-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c True) == ${replicas} ]]
64 function insert_control_plane_network_identity_into_ssh_config {
65 # This enables logging into the control plane machines from this
66 # machine without specifying the identify file on the command line
68 # Create ssh config if it doesn't exist
69 mkdir -p ${HOME}/.ssh && chmod 700 ${HOME}/.ssh
70 touch ${HOME}/.ssh/config
71 chmod 600 ${HOME}/.ssh/config
72 # Add the entry for the control plane network, host value in ssh
73 # config is a wildcard
74 endpoint=$(helm -n metal3 get values -a cluster-icn | awk '/controlPlaneEndpoint:/ {print $2}')
75 prefix=$(helm -n metal3 get values -a cluster-icn | awk '/controlPlanePrefix:/ {print $2}')
76 host=$(ipcalc ${endpoint}/${prefix} | awk '/Network:/ {sub(/\.0.*/,".*"); print $2}')
77 if [[ $(grep -c "Host ${host}" ${HOME}/.ssh/config) != 0 ]]; then
78 sed -i -e '/Host '"${host}"'/,+1 d' ${HOME}/.ssh/config
80 cat <<EOF >>${HOME}/.ssh/config
82 IdentityFile ${SCRIPTDIR}/id_rsa
86 function wait_for_all_ready {
89 wait_for is_cluster_ready
90 clusterctl -n metal3 get kubeconfig icn >${BUILDDIR}/icn-admin.conf
91 chmod 600 ${BUILDDIR}/icn-admin.conf
92 wait_for is_control_plane_ready
93 insert_control_plane_network_identity_into_ssh_config
97 "build-source") build_source ;;
100 "wait") wait_for_all_ready ;;
102 Usage: $(basename $0) COMMAND
105 build-source - Build the in-tree site values
106 clean - Remove the site
107 deploy - Deploy the site
108 wait - Wait for the site to be ready