gpg --with-colons --list-secret-keys $1 | awk -F: '/fpr/ {print $10;exit}'
}
-function create_gpg_key {
- local -r key_name=$1
-
- # Create an rsa4096 key that does not expire
- gpg --batch --full-generate-key <<EOF
-%no-protection
-Key-Type: 1
-Key-Length: 4096
-Subkey-Type: 1
-Subkey-Length: 4096
-Expire-Date: 0
-Name-Real: ${key_name}
-EOF
-}
-
-function export_gpg_private_key {
- gpg --export-secret-keys --armor "$(_gpg_key_fp $1)"
-}
-
function sops_encrypt {
local -r yaml=$1
local -r yaml_dir=$(dirname ${yaml})
fi
}
-function flux_site_source_name {
+function _site_source_name {
local -r url=$1
local -r branch=$2
echo $(basename ${url})-${branch}
}
-function flux_site_kustomization_name {
+function _site_kustomization_name {
local -r url=$1
local -r branch=$2
local -r path=$3
- echo $(flux_site_source_name ${url} ${branch})-site-$(basename ${path})
+ echo $(_site_source_name ${url} ${branch})-site-$(basename ${path})
}
function flux_create_site {
local -r path=$3
local -r key_name=$4
- local -r source_name=$(flux_site_source_name ${url} ${branch})
- local -r kustomization_name=$(flux_site_kustomization_name ${url} ${branch} ${path})
+ local -r source_name=$(_site_source_name ${url} ${branch})
+ local -r kustomization_name=$(_site_kustomization_name ${url} ${branch} ${path})
local -r key_fp=$(gpg --with-colons --list-secret-keys ${key_name} | awk -F: '/fpr/ {print $10;exit}')
local -r secret_name="${key_name}-sops-gpg"
flux create kustomization ${kustomization_name} --target-namespace=${SITE_NAMESPACE} --path=${path} --source=GitRepository/${source_name} --prune=true \
--decryption-provider=sops --decryption-secret=${secret_name}
}
+
+function site_deploy {
+ flux_create_site ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH} ${FLUX_SOPS_KEY_NAME}
+}
+
+function site_clean {
+ kubectl -n flux-system delete kustomization $(_site_kustomization_name ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH})
+}
+
+function _is_cluster_ready {
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/clusterName:/ {print $2}' ${yaml})
+ if [[ ! -z ${name} ]]; then
+ if [[ $(kubectl -n ${SITE_NAMESPACE} get cluster ${name} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') != "True" ]]; then
+ return 1
+ fi
+ fi
+ done
+}
+
+function _is_control_plane_ready {
+ # Checking the Cluster resource status is not sufficient, it
+ # reports the control plane as ready before the nodes forming the
+ # control plane are ready
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/clusterName:/ {print $2}' ${yaml})
+ if [[ ! -z ${name} ]]; then
+ local replicas=$(kubectl -n ${SITE_NAMESPACE} get kubeadmcontrolplane ${name} -o jsonpath='{.spec.replicas}')
+ if [[ $(kubectl --kubeconfig=${BUILDDIR}/${name}-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c True) != ${replicas} ]]; then
+ return 1
+ fi
+ fi
+ done
+}
+
+function site_wait_for_all_ready {
+ WAIT_FOR_INTERVAL=60s
+ WAIT_FOR_TRIES=30
+ wait_for _is_cluster_ready
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/clusterName:/ {print $2}' ${yaml})
+ clusterctl -n ${SITE_NAMESPACE} get kubeconfig ${name} >${BUILDDIR}/${name}-admin.conf
+ chmod 600 ${BUILDDIR}/${name}-admin.conf
+ done
+ wait_for _is_control_plane_ready
+}
+
+function site_insert_control_plane_network_identity_into_ssh_config {
+ # This enables logging into the control plane machines from this
+ # machine without specifying the identify file on the command line
+
+ if [[ ! $(which ipcalc) ]]; then
+ apt-get install -y ipcalc
+ fi
+
+ # Create ssh config if it doesn't exist
+ mkdir -p ${HOME}/.ssh && chmod 700 ${HOME}/.ssh
+ touch ${HOME}/.ssh/config
+ chmod 600 ${HOME}/.ssh/config
+ # Add the entry for the control plane network, host value in ssh
+ # config is a wildcard
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/name:/ {NAME=$2} /chart: deploy\/cluster/ {print NAME; exit}' ${yaml})
+ if [[ ! -z ${name} ]]; then
+ endpoint=$(helm -n ${SITE_NAMESPACE} get values -a ${name} | awk '/controlPlaneEndpoint:/ {print $2}')
+ prefix=$(helm -n ${SITE_NAMESPACE} get values -a ${name} | awk '/controlPlanePrefix:/ {print $2}')
+ host=$(ipcalc ${endpoint}/${prefix} | awk '/Network:/ {sub(/\.0.*/,".*"); print $2}')
+ if [[ $(grep -c "Host ${host}" ${HOME}/.ssh/config) != 0 ]]; then
+ sed -i -e '/Host '"${host}"'/,+3 d' ${HOME}/.ssh/config
+ fi
+ cat <<EOF >>${HOME}/.ssh/config
+Host ${host}
+ IdentityFile ${SCRIPTDIR}/id_rsa
+ StrictHostKeyChecking no
+ UserKnownHostsFile /dev/null
+EOF
+ fi
+ done
+ # Add the identity to authorized keys on this host to enable ssh
+ # logins via its control plane address
+ authorized_key=$(cat ${SCRIPTDIR}/id_rsa.pub)
+ sed -i -e '\!'"${authorized_key}"'!d' ${HOME}/.ssh/authorized_keys
+ cat ${SCRIPTDIR}/id_rsa.pub >> ~/.ssh/authorized_keys
+}
+
+function _is_cluster_deleted {
+ for yaml in ${SCRIPTDIR}/deployment/*.yaml; do
+ name=$(awk '/clusterName:/ {print $2}' ${yaml})
+ ! kubectl -n ${SITE_NAMESPACE} get cluster ${name}
+ done
+}
+
+function site_wait_for_all_deleted {
+ WAIT_FOR_INTERVAL=60s
+ WAIT_FOR_TRIES=30
+ wait_for _is_cluster_deleted
+}
SITE_BRANCH=${SITE_BRANCH:-"master"}
SITE_PATH=${SITE_PATH:-"deploy/site/pod11/deployment"}
-function deploy {
- # TODO Replace ICN test key with real key
- flux_create_site ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH} ${FLUX_SOPS_KEY_NAME}
-}
-
-function clean {
- kubectl -n flux-system delete kustomization $(flux_site_kustomization_name ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH})
-}
-
-function is_cluster_ready {
- [[ $(kubectl -n ${SITE_NAMESPACE} get cluster icn -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') == "True" ]]
-}
-
-function is_control_plane_ready {
- # Checking the Cluster resource status is not sufficient, it
- # reports the control plane as ready before the nodes forming the
- # control plane are ready
- local -r replicas=$(kubectl -n ${SITE_NAMESPACE} get kubeadmcontrolplane icn -o jsonpath='{.spec.replicas}')
- [[ $(kubectl --kubeconfig=${BUILDDIR}/icn-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c True) == ${replicas} ]]
-}
-
-function wait_for_all_ready {
- WAIT_FOR_INTERVAL=60s
- WAIT_FOR_TRIES=30
- wait_for is_cluster_ready
- clusterctl -n ${SITE_NAMESPACE} get kubeconfig icn >${BUILDDIR}/icn-admin.conf
- chmod 600 ${BUILDDIR}/icn-admin.conf
- wait_for is_control_plane_ready
-}
-
-function is_cluster_deleted {
- ! kubectl -n ${SITE_NAMESPACE} get cluster icn
-}
-
-function wait_for_all_deleted {
- WAIT_FOR_INTERVAL=60s
- WAIT_FOR_TRIES=30
- wait_for is_cluster_deleted
-}
-
case $1 in
- "clean") clean ;;
- "deploy") deploy ;;
- "wait") wait_for_all_ready ;;
- "wait-clean") wait_for_all_deleted ;;
+ "clean") site_clean ;;
+ "deploy")
+ # TODO Replace ICN test key, $FLUX_SOPS_PRIVATE_KEY, with real key
+ site_deploy ;;
+ "wait") site_wait_for_all_ready ;;
+ "wait-clean") site_wait_for_all_deleted ;;
*) cat <<EOF
Usage: $(basename $0) COMMAND
clean - Remove the site
deploy - Deploy the site
wait - Wait for the site to be ready
+ wait-clean - Wait for the site to be removed
EOF
;;
esac
done
}
+function create_gpg_key {
+ local -r key_name=$1
+
+ # Create an rsa4096 key that does not expire
+ gpg --batch --full-generate-key <<EOF
+%no-protection
+Key-Type: 1
+Key-Length: 4096
+Subkey-Type: 1
+Subkey-Length: 4096
+Expire-Date: 0
+Name-Real: ${key_name}
+EOF
+}
+
+function export_gpg_private_key {
+ gpg --export-secret-keys --armor "$(_gpg_key_fp $1)"
+}
+
function build_source {
create_gpg_key ${FLUX_SOPS_KEY_NAME}
# ONLY FOR TEST ENVIRONMENT: save the private key used
SITE_BRANCH=${SITE_BRANCH:-"master"}
SITE_PATH=${SITE_PATH:-"deploy/site/vm-mc/deployment"}
-function deploy {
- gpg --import ${FLUX_SOPS_PRIVATE_KEY}
- flux_create_site ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH} ${FLUX_SOPS_KEY_NAME}
-}
-
-function clean {
- kubectl -n flux-system delete kustomization $(flux_site_kustomization_name ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH})
-}
-
-function is_cluster_ready {
- for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
- name=$(awk '/clusterName:/ {print $2}' ${yaml})
- if [[ $(kubectl -n ${SITE_NAMESPACE} get cluster ${name} -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') != "True" ]]; then
- return 1
- fi
- done
-}
-
-function is_control_plane_ready {
- # Checking the Cluster resource status is not sufficient, it
- # reports the control plane as ready before the nodes forming the
- # control plane are ready
- for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
- name=$(awk '/clusterName:/ {print $2}' ${yaml})
- local replicas=$(kubectl -n ${SITE_NAMESPACE} get kubeadmcontrolplane ${name} -o jsonpath='{.spec.replicas}')
- if [[ $(kubectl --kubeconfig=${BUILDDIR}/${name}-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c True) != ${replicas} ]]; then
- return 1
- fi
- done
-}
-
-function insert_control_plane_network_identity_into_ssh_config {
- # This enables logging into the control plane machines from this
- # machine without specifying the identify file on the command line
-
- if [[ ! $(which ipcalc) ]]; then
- apt-get install -y ipcalc
- fi
-
- # Create ssh config if it doesn't exist
- mkdir -p ${HOME}/.ssh && chmod 700 ${HOME}/.ssh
- touch ${HOME}/.ssh/config
- chmod 600 ${HOME}/.ssh/config
- # Add the entry for the control plane network, host value in ssh
- # config is a wildcard
- for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
- name=$(awk '/name:/ {print $2; exit}' ${yaml})
- endpoint=$(helm -n ${SITE_NAMESPACE} get values -a ${name} | awk '/controlPlaneEndpoint:/ {print $2}')
- prefix=$(helm -n ${SITE_NAMESPACE} get values -a ${name} | awk '/controlPlanePrefix:/ {print $2}')
- host=$(ipcalc ${endpoint}/${prefix} | awk '/Network:/ {sub(/\.0.*/,".*"); print $2}')
- if [[ $(grep -c "Host ${host}" ${HOME}/.ssh/config) != 0 ]]; then
- sed -i -e '/Host '"${host}"'/,+3 d' ${HOME}/.ssh/config
- fi
- cat <<EOF >>${HOME}/.ssh/config
-Host ${host}
- IdentityFile ${SCRIPTDIR}/id_rsa
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
-EOF
- done
- # Add the identity to authorized keys on this host to enable ssh
- # logins via its control plane address
- authorized_key=$(cat ${SCRIPTDIR}/id_rsa.pub)
- sed -i -e '\!'"${authorized_key}"'!d' ${HOME}/.ssh/authorized_keys
- cat ${SCRIPTDIR}/id_rsa.pub >> ~/.ssh/authorized_keys
-}
-
-function wait_for_all_ready {
- WAIT_FOR_INTERVAL=60s
- WAIT_FOR_TRIES=30
- wait_for is_cluster_ready
- for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
- name=$(awk '/clusterName:/ {print $2}' ${yaml})
- clusterctl -n ${SITE_NAMESPACE} get kubeconfig ${name} >${BUILDDIR}/${name}-admin.conf
- chmod 600 ${BUILDDIR}/${name}-admin.conf
- done
- wait_for is_control_plane_ready
- insert_control_plane_network_identity_into_ssh_config
-}
-
-function is_cluster_deleted {
- for yaml in ${SCRIPTDIR}/deployment/cluster-*.yaml; do
- name=$(awk '/clusterName:/ {print $2}' ${yaml})
- ! kubectl -n ${SITE_NAMESPACE} get cluster ${name}
- done
-}
-
-function wait_for_all_deleted {
- WAIT_FOR_INTERVAL=60s
- WAIT_FOR_TRIES=30
- wait_for is_cluster_deleted
-}
-
case $1 in
- "clean") clean ;;
- "deploy") deploy ;;
- "wait") wait_for_all_ready ;;
- "wait-clean") wait_for_all_deleted ;;
+ "clean") site_clean ;;
+ "deploy")
+ gpg --import ${FLUX_SOPS_PRIVATE_KEY}
+ site_deploy
+ ;;
+ "wait")
+ site_wait_for_all_ready
+ site_insert_control_plane_network_identity_into_ssh_config
+ ;;
+ "wait-clean") site_wait_for_all_deleted ;;
*) cat <<EOF
Usage: $(basename $0) COMMAND
clean - Remove the site
deploy - Deploy the site
wait - Wait for the site to be ready
+ wait-clean - Wait for the site to be removed
EOF
;;
esac
SITE_BRANCH=${SITE_BRANCH:-"master"}
SITE_PATH=${SITE_PATH:-"deploy/site/vm/deployment"}
-function deploy {
- gpg --import ${FLUX_SOPS_PRIVATE_KEY}
- flux_create_site ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH} ${FLUX_SOPS_KEY_NAME}
-}
-
-function clean {
- kubectl -n flux-system delete kustomization $(flux_site_kustomization_name ${SITE_REPO} ${SITE_BRANCH} ${SITE_PATH})
-}
-
-function is_cluster_ready {
- [[ $(kubectl -n ${SITE_NAMESPACE} get cluster icn -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') == "True" ]]
-}
-
-function is_control_plane_ready {
- # Checking the Cluster resource status is not sufficient, it
- # reports the control plane as ready before the nodes forming the
- # control plane are ready
- local -r replicas=$(kubectl -n ${SITE_NAMESPACE} get kubeadmcontrolplane icn -o jsonpath='{.spec.replicas}')
- [[ $(kubectl --kubeconfig=${BUILDDIR}/icn-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c True) == ${replicas} ]]
-}
-
-function insert_control_plane_network_identity_into_ssh_config {
- # This enables logging into the control plane machines from this
- # machine without specifying the identify file on the command line
-
- if [[ ! $(which ipcalc) ]]; then
- apt-get install -y ipcalc
- fi
-
- # Create ssh config if it doesn't exist
- mkdir -p ${HOME}/.ssh && chmod 700 ${HOME}/.ssh
- touch ${HOME}/.ssh/config
- chmod 600 ${HOME}/.ssh/config
- # Add the entry for the control plane network, host value in ssh
- # config is a wildcard
- endpoint=$(helm -n ${SITE_NAMESPACE} get values -a cluster-icn | awk '/controlPlaneEndpoint:/ {print $2}')
- prefix=$(helm -n ${SITE_NAMESPACE} get values -a cluster-icn | awk '/controlPlanePrefix:/ {print $2}')
- host=$(ipcalc ${endpoint}/${prefix} | awk '/Network:/ {sub(/\.0.*/,".*"); print $2}')
- if [[ $(grep -c "Host ${host}" ${HOME}/.ssh/config) != 0 ]]; then
- sed -i -e '/Host '"${host}"'/,+3 d' ${HOME}/.ssh/config
- fi
- cat <<EOF >>${HOME}/.ssh/config
-Host ${host}
- IdentityFile ${SCRIPTDIR}/id_rsa
- StrictHostKeyChecking no
- UserKnownHostsFile /dev/null
-EOF
- # Add the identity to authorized keys on this host to enable ssh
- # logins via its control plane address
- authorized_key=$(cat ${SCRIPTDIR}/id_rsa.pub)
- sed -i -e '\!'"${authorized_key}"'!d' ${HOME}/.ssh/authorized_keys
- cat ${SCRIPTDIR}/id_rsa.pub >> ~/.ssh/authorized_keys
-}
-
-function wait_for_all_ready {
- WAIT_FOR_INTERVAL=60s
- WAIT_FOR_TRIES=30
- wait_for is_cluster_ready
- clusterctl -n ${SITE_NAMESPACE} get kubeconfig icn >${BUILDDIR}/icn-admin.conf
- chmod 600 ${BUILDDIR}/icn-admin.conf
- wait_for is_control_plane_ready
- insert_control_plane_network_identity_into_ssh_config
-}
-
-function is_cluster_deleted {
- ! kubectl -n ${SITE_NAMESPACE} get cluster icn
-}
-
-function wait_for_all_deleted {
- WAIT_FOR_INTERVAL=60s
- WAIT_FOR_TRIES=30
- wait_for is_cluster_deleted
-}
-
case $1 in
- "clean") clean ;;
- "deploy") deploy ;;
- "wait") wait_for_all_ready ;;
- "wait-clean") wait_for_all_deleted ;;
+ "clean") site_clean ;;
+ "deploy")
+ gpg --import ${FLUX_SOPS_PRIVATE_KEY}
+ site_deploy ;;
+ "wait")
+ site_wait_for_all_ready
+ site_insert_control_plane_network_identity_into_ssh_config
+ ;;
+ "wait-clean") site_wait_for_all_deleted ;;
*) cat <<EOF
Usage: $(basename $0) COMMAND
clean - Remove the site
deploy - Deploy the site
wait - Wait for the site to be ready
+ wait-clean - Wait for the site to be removed
EOF
;;
esac