bpa_op_all: bm_all bpa_op_install
-.PHONY: all bm_preinstall bm_install
+bashate:
+ bashate -i E006 `find . -name *.sh`
+
+.PHONY: all bm_preinstall bm_install bashate
popd
#Copy bpa operator directory to the right path
-kubectl create -f $PWD/deploy/crds/bpa_v1alpha1_provisioning_crd.yaml
+kubectl create -f $PWD/deploy/crds/bpa_v1alpha1_provisioning_crd.yaml
echo $GOPATH
-mkdir -p $GOPATH/src/github.com/ && cp -r $PWD $GOPATH/src/github.com/bpa-operator
+mkdir -p $GOPATH/src/github.com/ && \
+cp -r $PWD $GOPATH/src/github.com/bpa-operator
pushd $GOPATH/src/github.com/bpa-operator
operator-sdk up local --kubeconfig $HOME/.kube/config
popd
bindir=/usr/local/bin
am__append_1="drivers/crypto/qat/qat_dh895xcc/qat_dh895xcc.ko\
- drivers/crypto/qat/qat_dh895xccvf/qat_dh895xccvf.ko"
+ drivers/crypto/qat/qat_dh895xccvf/qat_dh895xccvf.ko"
am__append_2="qat_895xcc.bin qat_895xcc_mmp.bin"
am__append_3="dh895xcc_dev0.conf dh895xcc_dev1.conf dh895xccvf_dev0.conf.vm"
# Kernel modules list
KO_MODULES_LIST="drivers/crypto/qat/qat_common/intel_qat.ko \
- drivers/crypto/qat/qat_c62x/qat_c62x.ko \
- drivers/crypto/qat/qat_c62xvf/qat_c62xvf.ko \
- drivers/crypto/qat/qat_d15xx/qat_d15xx.ko \
- drivers/crypto/qat/qat_d15xxvf/qat_d15xxvf.ko \
- drivers/crypto/qat/qat_c3xxx/qat_c3xxx.ko \
- drivers/crypto/qat/qat_c3xxxvf/qat_c3xxxvf.ko $am__append_1"
+ drivers/crypto/qat/qat_c62x/qat_c62x.ko \
+ drivers/crypto/qat/qat_c62xvf/qat_c62xvf.ko \
+ drivers/crypto/qat/qat_d15xx/qat_d15xx.ko \
+ drivers/crypto/qat/qat_d15xxvf/qat_d15xxvf.ko \
+ drivers/crypto/qat/qat_c3xxx/qat_c3xxx.ko \
+ drivers/crypto/qat/qat_c3xxxvf/qat_c3xxxvf.ko $am__append_1"
# Firmwares list
BIN_LIST="qat_c3xxx.bin qat_c3xxx_mmp.bin qat_c62x.bin \
- qat_c62x_mmp.bin qat_mmp.bin qat_d15xx.bin qat_d15xx_mmp.bin \
- $am__append_2"
+ qat_c62x_mmp.bin qat_mmp.bin qat_d15xx.bin qat_d15xx_mmp.bin \
+ $am__append_2"
CONFIG_LIST="c3xxx_dev0.conf \
- c3xxxvf_dev0.conf.vm \
- c6xx_dev0.conf \
- c6xx_dev1.conf \
- c6xx_dev2.conf \
- c6xxvf_dev0.conf.vm \
- d15xx_dev0.conf \
- d15xxpf_dev0.conf \
- d15xxvf_dev0.conf.vm \
- $am__append_3"
+ c3xxxvf_dev0.conf.vm \
+ c6xx_dev0.conf \
+ c6xx_dev1.conf \
+ c6xx_dev2.conf \
+ c6xxvf_dev0.conf.vm \
+ d15xx_dev0.conf \
+ d15xxpf_dev0.conf \
+ d15xxvf_dev0.conf.vm \
+ $am__append_3"
QAT_DH895XCC_NUM_VFS=32
QAT_DHC62X_NUM_VFS=16
$MV $ROOT/etc/d15xx*.conf $ROOT/etc/qat_conf_backup/ 2>/dev/null;
$MV $ROOT/etc/c3xxx*.conf $ROOT/etc/qat_conf_backup/ 2>/dev/null;
-for ((dev=0; dev<$numDh895xDevicesP; dev++))
-do
+for ((dev=0; dev<$numDh895xDevicesP; dev++)); do
$INSTALL -D -m 640 dh895xcc_dev0.conf $ROOT/etc/dh895xcc_dev$dev.conf;
- for ((vf_dev = 0; vf_dev<$QAT_DH895XCC_NUM_VFS; vf_dev++))
- do
+ for ((vf_dev = 0; vf_dev<$QAT_DH895XCC_NUM_VFS; vf_dev++)); do
vf_dev_num=$(($dev * $QAT_DH895XCC_NUM_VFS + $vf_dev));
$INSTALL -D -m 640 dh895xccvf_dev0.conf.vm $ROOT/etc/dh895xccvf_dev$vf_dev_num.conf;
done;
done;
-for ((dev=0; dev<$numC62xDevicesP; dev++))
-do
+for ((dev=0; dev<$numC62xDevicesP; dev++)); do
$INSTALL -D -m 640 c6xx_dev$(($dev%3)).conf $ROOT/etc/c6xx_dev$dev.conf;
- for ((vf_dev = 0; vf_dev<$QAT_DHC62X_NUM_VFS; vf_dev++))
- do
+ for ((vf_dev = 0; vf_dev<$QAT_DHC62X_NUM_VFS; vf_dev++)); do
vf_dev_num=$(($dev * $QAT_DHC62X_NUM_VFS + $vf_dev));
$INSTALL -D -m 640 c6xxvf_dev0.conf.vm $ROOT/etc/c6xxvf_dev$vf_dev_num.conf;
done;
done;
-for ((dev=0; dev<$numD15xxDevicesP; dev++))
-do
+for ((dev=0; dev<$numD15xxDevicesP; dev++)); do
$INSTALL -D -m 640 d15xx_dev$(($dev%3)).conf $ROOT/etc/d15xx_dev$dev.conf;
- for ((vf_dev = 0; vf_dev<$QAT_DHD15XX_NUM_VFS; vf_dev++))
- do
+ for ((vf_dev = 0; vf_dev<$QAT_DHD15XX_NUM_VFS; vf_dev++)); do
vf_dev_num=$(($dev * $QAT_DHD15XX_NUM_VFS + $vf_dev));
$INSTALL -D -m 640 d15xxvf_dev0.conf.vm $ROOT/etc/d15xxvf_dev$vf_dev_num.conf;
done;
done;
-for ((dev=0; dev<$numC3xxxDevicesP; dev++))
-do
+for ((dev=0; dev<$numC3xxxDevicesP; dev++)); do
$INSTALL -D -m 640 c3xxx_dev0.conf $ROOT/etc/c3xxx_dev$dev.conf;
- for ((vf_dev = 0; vf_dev<$QAT_DHC3XXX_NUM_VFS; vf_dev++))
- do
+ for ((vf_dev = 0; vf_dev<$QAT_DHC3XXX_NUM_VFS; vf_dev++)); do
vf_dev_num=$(($dev * $QAT_DHC3XXX_NUM_VFS + $vf_dev));
$INSTALL -D -m 640 c3xxxvf_dev0.conf.vm $ROOT/etc/c3xxxvf_dev$vf_dev_num.conf;
done;
#!/bin/bash
if [ ! -d $PWD/multicloud-k8s ]; then
- git clone https://github.com/onap/multicloud-k8s.git
+ git clone https://github.com/onap/multicloud-k8s.git
fi
exit 1
fi
-function get_default_inteface_ipaddress() {
+function get_default_inteface_ipaddress {
local _ip=$1
local _default_interface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route)
local _ipv4address=$(ip addr show dev $_default_interface | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }')
}
create_ssh_key() {
- #ssh key for compute node to communicate back to bootstrap server
- mkdir -p $BUILD_DIR/ssh_key
- ssh-keygen -C "compute.icn.akraino.lfedge.org" -f $BUILD_DIR/ssh_key/id_rsa
- cat $BUILD_DIR/ssh_key/id_rsa.pub >> $HOME/.ssh/authorized_keys
+ #ssh key for compute node to communicate back to bootstrap server
+ mkdir -p $BUILD_DIR/ssh_key
+ ssh-keygen -C "compute.icn.akraino.lfedge.org" -f $BUILD_DIR/ssh_key/id_rsa
+ cat $BUILD_DIR/ssh_key/id_rsa.pub >> $HOME/.ssh/authorized_keys
}
set_compute_key() {
cat << EOF
write_files:
- path: /opt/ssh_id_rsa
- owner: root:root
- permissions: '0600'
- content: |
+ owner: root:root
+ permissions: '0600'
+ content: |
$_SSH_LOCAL_KEY
EOF
}
provision_compute_node() {
- IMAGE_URL=http://172.22.0.1/images/${BM_IMAGE}
- IMAGE_CHECKSUM=http://172.22.0.1/images/${BM_IMAGE}.md5sum
-
- if [ ! -d $GOPATH/src/github.com/metal3-io/baremetal-operator ]; then
- go get github.com/metal3-io/baremetal-operator
- fi
-
- go run $GOPATH/src/github.com/metal3-io/baremetal-operator/cmd/make-bm-worker/main.go \
- -address "ipmi://$COMPUTE_IPMI_ADDRESS" \
- -user "$COMPUTE_IPMI_USER" \
- -password "$COMPUTE_IPMI_PASSWORD" \
- "$COMPUTE_NODE_NAME" > $COMPUTE_NODE_NAME-bm-node.yaml
-
- printf " image:" >> $COMPUTE_NODE_NAME-bm-node.yaml
- printf "\n url: ""%s" "$IMAGE_URL" >> $COMPUTE_NODE_NAME-bm-node.yaml
- printf "\n checksum: ""%s" "$IMAGE_CHECKSUM" >> $COMPUTE_NODE_NAME-bm-node.yaml
- printf "\n userData:" >> $COMPUTE_NODE_NAME-bm-node.yaml
- printf "\n name: ""%s" "$COMPUTE_NODE_NAME""-user-data" >> $COMPUTE_NODE_NAME-bm-node.yaml
- printf "\n namespace: metal3\n" >> $COMPUTE_NODE_NAME-bm-node.yaml
- kubectl apply -f $COMPUTE_NODE_NAME-bm-node.yaml -n metal3
+ IMAGE_URL=http://172.22.0.1/images/${BM_IMAGE}
+ IMAGE_CHECKSUM=http://172.22.0.1/images/${BM_IMAGE}.md5sum
+
+ if [ ! -d $GOPATH/src/github.com/metal3-io/baremetal-operator ]; then
+ go get github.com/metal3-io/baremetal-operator
+ fi
+
+ go run $GOPATH/src/github.com/metal3-io/baremetal-operator/cmd/make-bm-worker/main.go \
+ -address "ipmi://$COMPUTE_IPMI_ADDRESS" \
+ -user "$COMPUTE_IPMI_USER" \
+ -password "$COMPUTE_IPMI_PASSWORD" \
+ "$COMPUTE_NODE_NAME" > $COMPUTE_NODE_NAME-bm-node.yaml
+
+ printf " image:" >> $COMPUTE_NODE_NAME-bm-node.yaml
+ printf "\n url: ""%s" "$IMAGE_URL" >> $COMPUTE_NODE_NAME-bm-node.yaml
+ printf "\n checksum: ""%s" "$IMAGE_CHECKSUM" >> $COMPUTE_NODE_NAME-bm-node.yaml
+ printf "\n userData:" >> $COMPUTE_NODE_NAME-bm-node.yaml
+ printf "\n name: ""%s" "$COMPUTE_NODE_NAME""-user-data" >> $COMPUTE_NODE_NAME-bm-node.yaml
+ printf "\n namespace: metal3\n" >> $COMPUTE_NODE_NAME-bm-node.yaml
+ kubectl apply -f $COMPUTE_NODE_NAME-bm-node.yaml -n metal3
}
deprovision_compute_node() {
- kubectl patch baremetalhost $COMPUTE_NODE_NAME -n metal3 --type merge \
+ kubectl patch baremetalhost $COMPUTE_NODE_NAME -n metal3 --type merge \
-p '{"spec":{"image":{"url":"","checksum":""}}}'
}
get_default_inteface_ipaddress default_addr
cat << EOF
- path: /root/.ssh/config
- owner: root:root
- permissions: '0600'
- content: |
+ owner: root:root
+ permissions: '0600'
+ content: |
Host bootstrapmachine $default_addr
HostName $default_addr
IdentityFile /opt/ssh_id_rsa
User $USER
- path: /etc/apt/sources.list
- owner: root:root
- permissions: '0665'
- content: |
- deb [trusted=yes] ssh://$USER@$default_addr:$LOCAL_APT_REPO ./
+ owner: root:root
+ permissions: '0665'
+ content: |
+ deb [trusted=yes] ssh://$USER@$default_addr:$LOCAL_APT_REPO ./
EOF
}
create_userdata() {
- printf "#cloud-config\n" > userdata.yaml
- if [ -n "$COMPUTE_NODE_PASSWORD" ]; then
- printf "password: ""%s" "$COMPUTE_NODE_PASSWORD" >> userdata.yaml
- printf "\nchpasswd: {expire: False}\n" >> userdata.yaml
- printf "ssh_pwauth: True\n" >> userdata.yaml
- fi
-
- if [ -n "$COMPUTE_NODE_FQDN" ]; then
- printf "fqdn: ""%s" "$COMPUTE_NODE_FQDN" >> userdata.yaml
- printf "\n" >> userdata.yaml
- fi
- printf "disable_root: false\n" >> userdata.yaml
- printf "ssh_authorized_keys:\n - " >> userdata.yaml
-
- if [ ! -f $HOME/.ssh/id_rsa.pub ]; then
- yes y | ssh-keygen -t rsa -N "" -f $HOME/.ssh/id_rsa
- fi
-
- cat $HOME/.ssh/id_rsa.pub >> userdata.yaml
- printf "\n" >> userdata.yaml
+ printf "#cloud-config\n" > userdata.yaml
+ if [ -n "$COMPUTE_NODE_PASSWORD" ]; then
+ printf "password: ""%s" "$COMPUTE_NODE_PASSWORD" >> userdata.yaml
+ printf "\nchpasswd: {expire: False}\n" >> userdata.yaml
+ printf "ssh_pwauth: True\n" >> userdata.yaml
+ fi
+
+ if [ -n "$COMPUTE_NODE_FQDN" ]; then
+ printf "fqdn: ""%s" "$COMPUTE_NODE_FQDN" >> userdata.yaml
+ printf "\n" >> userdata.yaml
+ fi
+ printf "disable_root: false\n" >> userdata.yaml
+ printf "ssh_authorized_keys:\n - " >> userdata.yaml
+
+ if [ ! -f $HOME/.ssh/id_rsa.pub ]; then
+ yes y | ssh-keygen -t rsa -N "" -f $HOME/.ssh/id_rsa
+ fi
+
+ cat $HOME/.ssh/id_rsa.pub >> userdata.yaml
+ printf "\n" >> userdata.yaml
}
apply_userdata_credential() {
- cat <<EOF > ./$COMPUTE_NODE_NAME-user-data.yaml
+ cat <<EOF > ./$COMPUTE_NODE_NAME-user-data.yaml
apiVersion: v1
data:
- userData: $(base64 -w 0 userdata.yaml)
+ userData: $(base64 -w 0 userdata.yaml)
kind: Secret
metadata:
- name: $COMPUTE_NODE_NAME-user-data
- namespace: metal3
+ name: $COMPUTE_NODE_NAME-user-data
+ namespace: metal3
type: Opaque
EOF
- kubectl apply -n metal3 -f $COMPUTE_NODE_NAME-user-data.yaml
+ kubectl apply -n metal3 -f $COMPUTE_NODE_NAME-user-data.yaml
}
launch_baremetal_operator() {
- if [ ! -d $GOPATH/src/github.com/metal3-io/baremetal-operator ]; then
+ if [ ! -d $GOPATH/src/github.com/metal3-io/baremetal-operator ]; then
go get github.com/metal3-io/baremetal-operator
fi
- pushd $GOPATH/src/github.com/metal3-io/baremetal-operator
- make deploy
- popd
-
+ pushd $GOPATH/src/github.com/metal3-io/baremetal-operator
+ make deploy
+ popd
+
}
if [ "$1" == "launch" ]; then
if [ "$1" == "provision" ]; then
create_userdata
- apply_userdata_credential
- provision_compute_node
+ apply_userdata_credential
+ provision_compute_node
exit 0
fi
exit 1
fi
-function install_essential_packages() {
+function install_essential_packages {
apt-get update
apt-get -y install \
- crudini \
- curl \
- dnsmasq \
- figlet \
- nmap \
- patch \
- psmisc \
- python-pip \
- python-requests \
- python-setuptools \
- vim \
- wget \
- git \
- software-properties-common
+ crudini \
+ curl \
+ dnsmasq \
+ figlet \
+ nmap \
+ patch \
+ psmisc \
+ python-pip \
+ python-requests \
+ python-setuptools \
+ vim \
+ wget \
+ git \
+ software-properties-common
- add-apt-repository ppa:longsleep/golang-backports
- apt-get update
- apt-get install golang-go
+ add-apt-repository ppa:longsleep/golang-backports
+ apt-get update
+ apt-get install golang-go
}
-function install_ironic_packages() {
+function install_ironic_packages {
apt-get update
apt-get -y install \
- jq \
- nodejs \
- python-ironicclient \
- python-ironic-inspector-client \
- python-lxml \
- python-netaddr \
- python-openstackclient \
- unzip \
- genisoimage
+ jq \
+ nodejs \
+ python-ironicclient \
+ python-ironic-inspector-client \
+ python-lxml \
+ python-netaddr \
+ python-openstackclient \
+ unzip \
+ genisoimage
- if [ "$1" == "offline" ]; then
- pip install --no-index
- --find-links=file:$PIP_CACHE_DIR locat yq
- return
- fi
+ if [ "$1" == "offline" ]; then
+ pip install --no-index
+ --find-links=file:$PIP_CACHE_DIR locat yq
+ return
+ fi
pip install \
- lolcat \
- yq
+ lolcat \
+ yq
}
-function install_docker_packages() {
+function install_docker_packages {
apt-get remove docker \
- docker-engine \
- docker.io \
- containerd \
- runc
+ docker-engine \
+ docker.io \
+ containerd \
+ runc
apt-get update
apt-get -y install \
- apt-transport-https \
- ca-certificates \
- curl \
- gnupg-agent \
- software-properties-common
- if [ "$1" != "offline" ]; then
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
- add-apt-repository \
- "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
- $(lsb_release -cs) \
- stable"
- apt-get update
- fi
+ apt-transport-https \
+ ca-certificates \
+ curl \
+ gnupg-agent \
+ software-properties-common
+ if [ "$1" != "offline" ]; then
+ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
+ add-apt-repository \
+ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
+ $(lsb_release -cs) \
+ stable"
+ apt-get update
+ fi
apt-get -y install docker-ce=18.06.0~ce~3-0~ubuntu
}
-function install_podman_packages() {
- if [ "$1" != "offline" ]; then
- add-apt-repository -y ppa:projectatomic/ppa
- apt-get update
- fi
+function install_podman_packages {
+ if [ "$1" != "offline" ]; then
+ add-apt-repository -y ppa:projectatomic/ppa
+ apt-get update
+ fi
apt-get -y install podman
}
-function install_kubernetes_packages() {
- if [ "$1" != "offline" ]; then
- curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
- bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
+function install_kubernetes_packages {
+ if [ "$1" != "offline" ]; then
+ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+ bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF'
- apt-get update
- fi
- apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
- apt-mark hold kubelet kubeadm kubectl
+ apt-get update
+ fi
+ apt-get install -y kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00
+ apt-mark hold kubelet kubeadm kubectl
}
install() {
- install_essential_packages
- install_ironic_packages $1
- install_docker_packages $1
- install_podman_packages $1
- install_kubernetes_packages $1
+ install_essential_packages
+ install_ironic_packages $1
+ install_docker_packages $1
+ install_podman_packages $1
+ install_kubernetes_packages $1
}
if ["$1" == "-o"]; then
- install offline
- exit 0
+ install offline
+ exit 0
fi
install
exit 1
fi
-function check_inteface_ip() {
- local interface=$1
- local ipaddr=$2
+function check_inteface_ip {
+ local interface=$1
+ local ipaddr=$2
if [ ! $(ip addr show dev $interface) ]; then
exit 1
fi
}
-function configure_kubelet() {
- swapoff -a
- #Todo addition kubelet configuration
+function configure_kubelet {
+ swapoff -a
+ #Todo addition kubelet configuration
}
-function configure_kubeadm() {
- #Todo error handing
- if [ "$1" == "offline" ]; then
- for images in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do
- docker load --input $CONTAINER_IMAGES_DIR/$images.tar;
- done
+function configure_kubeadm {
+ #Todo error handing
+ if [ "$1" == "offline" ]; then
+ for images in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do
+ docker load --input $CONTAINER_IMAGES_DIR/$images.tar;
+ done
- docker load --input $CONTAINER_IMAGES_DIR/pause.tar
- docker load --input $CONTAINER_IMAGES_DIR/etcd.tar
- docker load --input $CONTAINER_IMAGES_DIR/coredns.tar
+ docker load --input $CONTAINER_IMAGES_DIR/pause.tar
+ docker load --input $CONTAINER_IMAGES_DIR/etcd.tar
+ docker load --input $CONTAINER_IMAGES_DIR/coredns.tar
return
fi
- kubeadm config images pull --kubernetes-version=$KUBE_VERSION
+ kubeadm config images pull --kubernetes-version=$KUBE_VERSION
}
-function configure_ironic_interfaces() {
- #Todo later to change the CNI networking for podman networking
- # Add firewall rules to ensure the IPA ramdisk can reach httpd, Ironic and the Inspector API on the host
- if [ "$IRONIC_PROVISIONING_INTERFACE" ]; then
- check_inteface_ip $IRONIC_PROVISIONING_INTERFACE $IRONIC_PROVISIONING_INTERFACE_IP
- else
- exit 1
+function configure_ironic_interfaces {
+ #Todo later to change the CNI networking for podman networking
+ # Add firewall rules to ensure the IPA ramdisk can reach httpd, Ironic and the Inspector API on the host
+ if [ "$IRONIC_PROVISIONING_INTERFACE" ]; then
+ check_inteface_ip $IRONIC_PROVISIONING_INTERFACE $IRONIC_PROVISIONING_INTERFACE_IP
+ else
+ exit 1
- fi
+ fi
- if [ "$IRONIC_IPMI_INTERFACE" ]; then
+ if [ "$IRONIC_IPMI_INTERFACE" ]; then
check_inteface_ip $IRONIC_IPMI_INTERFACE $IRONIC_IPMI_INTERFACE_IP
else
exit 1
fi
- for port in 80 5050 6385 ; do
- if ! sudo iptables -C INPUT -i $IRONIC_PROVISIONING_INTERFACE -p tcp -m tcp --dport $port -j ACCEPT > /dev/null 2>&1; then
- sudo iptables -I INPUT -i $IRONIC_PROVISIONING_INTERFACE -p tcp -m tcp --dport $port -j ACCEPT
- fi
- done
-
- # Allow ipmi to the bmc processes
- if ! sudo iptables -C INPUT -i $IRONIC_IPMI_INTERFACE -p udp -m udp --dport 6230:6235 -j ACCEPT 2>/dev/null ; then
- sudo iptables -I INPUT -i $IRONIC_IPMI_INTERFACE -p udp -m udp --dport 6230:6235 -j ACCEPT
- fi
-
- #Allow access to dhcp and tftp server for pxeboot
- for port in 67 69 ; do
- if ! sudo iptables -C INPUT -i $IRONIC_PROVISIONING_INTERFACE -p udp --dport $port -j ACCEPT 2>/dev/null ; then
- sudo iptables -I INPUT -i $IRONIC_PROVISIONING_INTERFACE -p udp --dport $port -j ACCEPT
- fi
- done
+ for port in 80 5050 6385 ; do
+ if ! sudo iptables -C INPUT -i $IRONIC_PROVISIONING_INTERFACE -p tcp -m tcp --dport $port -j ACCEPT > /dev/null 2>&1; then
+ sudo iptables -I INPUT -i $IRONIC_PROVISIONING_INTERFACE -p tcp -m tcp --dport $port -j ACCEPT
+ fi
+ done
+
+ # Allow ipmi to the bmc processes
+ if ! sudo iptables -C INPUT -i $IRONIC_IPMI_INTERFACE -p udp -m udp --dport 6230:6235 -j ACCEPT 2>/dev/null ; then
+ sudo iptables -I INPUT -i $IRONIC_IPMI_INTERFACE -p udp -m udp --dport 6230:6235 -j ACCEPT
+ fi
+
+ #Allow access to dhcp and tftp server for pxeboot
+ for port in 67 69 ; do
+ if ! sudo iptables -C INPUT -i $IRONIC_PROVISIONING_INTERFACE -p udp --dport $port -j ACCEPT 2>/dev/null ; then
+ sudo iptables -I INPUT -i $IRONIC_PROVISIONING_INTERFACE -p udp --dport $port -j ACCEPT
+ fi
+ done
}
-function configure_ironic_offline() {
- if [ ! -d $CONTAINER_IMAGES_DIR ] && [ ! -d $BUILD_DIR ]; then
- exit 1
- fi
-
- for image in ironic-inspector-image ironic-image podman-pause \
- baremetal-operator socat; do
- if [ ! -f "$CONTAINER_IMAGES_DIR/$image" ]; then
- exit 1
- fi
- done
-
- if [ ! -f "$BUILD_DIR/ironic-python-agent.initramfs"] && [ ! -f \
- "$BUILD_DIR/ironic-python-agent.kernel" ] && [ ! -f
- "$BUILD_DIR/$BM_IMAGE" ]; then
- exit 1
- fi
-
- podman load --input $CONTAINER_IMAGES_DIR/ironic-inspector-image.tar
- podman load --input $CONTAINER_IMAGES_DIR/ironic-image.tar
- podman load --input $CONTAINER_IMAGES_DIR/podman-pause.tar
-
- docker load --input $CONTAINER_IMAGES_DIR/baremetal-operator.tar
- docker load --input $CONTAINER_IMAGES_DIR/socat.tar
-
- mkdir -p "$IRONIC_DATA_DIR/html/images"
-
- cp $BUILD_DIR/ironic-python-agent.initramfs $IRONIC_DATA_DIR/html/images/
- cp $BUILD_DIR/ironic-python-agent.kernel $IRONIC_DATA_DIR/html/images/
- cp $BUILD_DIR/$BM_IMAGE $IRONIC_DATA_DIR/html/images/
- md5sum $BUILD_DIR/$BM_IMAGE | awk '{print $1}' > $BUILD_DIR/${BM_IMAGE}.md5sum
+function configure_ironic_offline {
+ if [ ! -d $CONTAINER_IMAGES_DIR ] && [ ! -d $BUILD_DIR ]; then
+ exit 1
+ fi
+
+ for image in ironic-inspector-image ironic-image podman-pause \
+ baremetal-operator socat; do
+ if [ ! -f "$CONTAINER_IMAGES_DIR/$image" ]; then
+ exit 1
+ fi
+ done
+
+ if [ ! -f "$BUILD_DIR/ironic-python-agent.initramfs"] && [ ! -f \
+ "$BUILD_DIR/ironic-python-agent.kernel" ] && [ ! -f
+ "$BUILD_DIR/$BM_IMAGE" ]; then
+ exit 1
+ fi
+
+ podman load --input $CONTAINER_IMAGES_DIR/ironic-inspector-image.tar
+ podman load --input $CONTAINER_IMAGES_DIR/ironic-image.tar
+ podman load --input $CONTAINER_IMAGES_DIR/podman-pause.tar
+
+ docker load --input $CONTAINER_IMAGES_DIR/baremetal-operator.tar
+ docker load --input $CONTAINER_IMAGES_DIR/socat.tar
+
+ mkdir -p "$IRONIC_DATA_DIR/html/images"
+
+ cp $BUILD_DIR/ironic-python-agent.initramfs $IRONIC_DATA_DIR/html/images/
+ cp $BUILD_DIR/ironic-python-agent.kernel $IRONIC_DATA_DIR/html/images/
+ cp $BUILD_DIR/$BM_IMAGE $IRONIC_DATA_DIR/html/images/
+ md5sum $BUILD_DIR/$BM_IMAGE | awk '{print $1}' > $BUILD_DIR/${BM_IMAGE}.md5sum
}
-function configure_ironic() {
- if [ "$1" == "offline" ]; then
- configure_ironic_offline
- return
- fi
-
- podman pull $IRONIC_IMAGE
- podman pull $IRONIC_INSPECTOR_IMAGE
-
- mkdir -p "$IRONIC_DATA_DIR/html/images"
- pushd $IRONIC_DATA_DIR/html/images
-
- if [ ! -f ironic-python-agent.initramfs ]; then
- curl --insecure --compressed -L https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo/ironic-python-agent.tar | tar -xf -
- fi
-
- if [[ "$BM_IMAGE_URL" && "$BM_IMAGE" ]]; then
- curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
- md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
- fi
- popd
+function configure_ironic {
+ if [ "$1" == "offline" ]; then
+ configure_ironic_offline
+ return
+ fi
+
+ podman pull $IRONIC_IMAGE
+ podman pull $IRONIC_INSPECTOR_IMAGE
+
+ mkdir -p "$IRONIC_DATA_DIR/html/images"
+ pushd $IRONIC_DATA_DIR/html/images
+
+ if [ ! -f ironic-python-agent.initramfs ]; then
+ curl --insecure --compressed -L https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo/ironic-python-agent.tar | tar -xf -
+ fi
+
+ if [[ "$BM_IMAGE_URL" && "$BM_IMAGE" ]]; then
+ curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
+ md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
+ fi
+ popd
}
-function configure() {
- configure_kubeadm $1
- configure_kubelet
- configure_ironic_interfaces
- configure_ironic $1
+function configure {
+ configure_kubeadm $1
+ configure_kubelet
+ configure_ironic_interfaces
+ configure_ironic $1
}
if [ "$1" == "-o" ]; then
exit 1
fi
-function get_default_inteface_ipaddress() {
- local _ip=$1
- local _default_interface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route)
- local _ipv4address=$(ip addr show dev $_default_interface | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }')
- eval $_ip="'$_ipv4address'"
+function get_default_inteface_ipaddress {
+ local _ip=$1
+ local _default_interface=$(awk '$2 == 00000000 { print $1 }' /proc/net/route)
+ local _ipv4address=$(ip addr show dev $_default_interface | awk '$1 == "inet" { sub("/.*", "", $2); print $2 }')
+ eval $_ip="'$_ipv4address'"
}
-function check_cni_network() {
- #since bootstrap cluster is a single node cluster,
- #podman and bootstap cluster have same network configuration to avoid the cni network conf conflicts
- if [ ! -d "/etc/cni/net.d" ]; then
- mkdir -p "/etc/cni/net.d"
- fi
+function check_cni_network {
+ #since bootstrap cluster is a single node cluster,
+ #podman and bootstap cluster have same network configuration to avoid the cni network conf conflicts
+ if [ ! -d "/etc/cni/net.d" ]; then
+ mkdir -p "/etc/cni/net.d"
+ fi
- if [ ! -f "/etc/cni/net.d/87-podman-bridge.conflist" ]; then
- if [ "$1" == "offline" ]; then
- cp $BUILD_DIR/87-podman-bridge.conflist /etc/cni/net.d/
- return
- fi
+ if [ ! -f "/etc/cni/net.d/87-podman-bridge.conflist" ]; then
+ if [ "$1" == "offline" ]; then
+ cp $BUILD_DIR/87-podman-bridge.conflist /etc/cni/net.d/
+ return
+ fi
- if !(wget $PODMAN_CNI_CONFLIST -P /etc/cni/net.d/); then
- exit 1
- fi
- fi
+ if !(wget $PODMAN_CNI_CONFLIST -P /etc/cni/net.d/); then
+ exit 1
+ fi
+ fi
}
-function create_k8s_regular_user() {
- if [ ! -d "$HOME/.kube" ]; then
- mkdir -p $HOME/.kube
- fi
+function create_k8s_regular_user {
+ if [ ! -d "$HOME/.kube" ]; then
+ mkdir -p $HOME/.kube
+ fi
- if [ ! -f /etc/kubernetes/admin.conf]; then
- exit 1
- fi
+ if [ ! -f /etc/kubernetes/admin.conf]; then
+ exit 1
+ fi
- cp -rf /etc/kubernetes/admin.conf $HOME/.kube/config
- chown $(id -u):$(id -g) $HOME/.kube/config
+ cp -rf /etc/kubernetes/admin.conf $HOME/.kube/config
+ chown $(id -u):$(id -g) $HOME/.kube/config
}
-function check_k8s_node_status(){
- echo 'checking bootstrap cluster single node status'
- node_status="False"
-
- for i in {1..5}
- do
- check_node=$(kubectl get node -o \
- jsonpath='{.items[0].status.conditions[?(@.reason == "KubeletReady")].status}')
- if [ $check_node != "" ]; then
- node_status=${check_node}
- fi
-
- if [ $node_status == "True" ]; then
- break
- fi
-
- sleep 3
- done
-
- if [ $node_status != "True" ]; then
- echo "bootstrap cluster single node status is not ready"
- exit 1
- fi
+function check_k8s_node_status {
+ echo 'checking bootstrap cluster single node status'
+ node_status="False"
+
+ for i in {1..5}; do
+ check_node=$(kubectl get node -o \
+ jsonpath='{.items[0].status.conditions[?(@.reason == "KubeletReady")].status}')
+ if [ $check_node != "" ]; then
+ node_status=${check_node}
+ fi
+
+ if [ $node_status == "True" ]; then
+ break
+ fi
+
+ sleep 3
+ done
+
+ if [ $node_status != "True" ]; then
+ echo "bootstrap cluster single node status is not ready"
+ exit 1
+ fi
}
-function install_podman() {
- # set password for mariadb
- mariadb_password=$(echo $(date;hostname)|sha256sum |cut -c-20)
+function install_podman {
+ # set password for mariadb
+ mariadb_password=$(echo $(date;hostname)|sha256sum |cut -c-20)
- # Create pod
- podman pod create -n ironic-pod
+ # Create pod
+ podman pod create -n ironic-pod
- # Start dnsmasq, http, mariadb, and ironic containers using same image
- podman run -d --net host --privileged --name dnsmasq --pod ironic-pod \
- -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/rundnsmasq ${IRONIC_IMAGE}
+ # Start dnsmasq, http, mariadb, and ironic containers using same image
+ podman run -d --net host --privileged --name dnsmasq --pod ironic-pod \
+ -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/rundnsmasq ${IRONIC_IMAGE}
- podman run -d --net host --privileged --name httpd --pod ironic-pod \
- -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runhttpd ${IRONIC_IMAGE}
+ podman run -d --net host --privileged --name httpd --pod ironic-pod \
+ -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runhttpd ${IRONIC_IMAGE}
- podman run -d --net host --privileged --name mariadb --pod ironic-pod \
- -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runmariadb \
- --env MARIADB_PASSWORD=$mariadb_password ${IRONIC_IMAGE}
+ podman run -d --net host --privileged --name mariadb --pod ironic-pod \
+ -v $IRONIC_DATA_DIR:/shared --entrypoint /bin/runmariadb \
+ --env MARIADB_PASSWORD=$mariadb_password ${IRONIC_IMAGE}
- podman run -d --net host --privileged --name ironic --pod ironic-pod \
- --env MARIADB_PASSWORD=$mariadb_password \
- -v $IRONIC_DATA_DIR:/shared ${IRONIC_IMAGE}
+ podman run -d --net host --privileged --name ironic --pod ironic-pod \
+ --env MARIADB_PASSWORD=$mariadb_password \
+ -v $IRONIC_DATA_DIR:/shared ${IRONIC_IMAGE}
- # Start Ironic Inspector
- podman run -d --net host --privileged --name ironic-inspector \
- --pod ironic-pod "${IRONIC_INSPECTOR_IMAGE}"
+ # Start Ironic Inspector
+ podman run -d --net host --privileged --name ironic-inspector \
+ --pod ironic-pod "${IRONIC_INSPECTOR_IMAGE}"
}
-function remove_k8s_noschedule_taint() {
- #Bootstrap cluster is a single node
- nodename=$(kubectl get node -o jsonpath='{.items[0].metadata.name}')
- if !(kubectl taint node $nodename node-role.kubernetes.io/master:NoSchedule-); then
- exit 1
- fi
+function remove_k8s_noschedule_taint {
+ #Bootstrap cluster is a single node
+ nodename=$(kubectl get node -o jsonpath='{.items[0].metadata.name}')
+ if !(kubectl taint node $nodename node-role.kubernetes.io/master:NoSchedule-); then
+ exit 1
+ fi
}
-function install_k8s_single_node() {
- get_default_inteface_ipaddress apiserver_advertise_addr
- kubeadm_init="kubeadm init --kubernetes-version=$KUBE_VERSION \
- --pod-network-cidr=$POD_NETWORK_CIDR \
- --apiserver-advertise-address=$apiserver_advertise_addr"
- if !(${kubeadm_init}); then
- exit 1
- fi
+function install_k8s_single_node {
+ get_default_inteface_ipaddress apiserver_advertise_addr
+ kubeadm_init="kubeadm init --kubernetes-version=$KUBE_VERSION \
+ --pod-network-cidr=$POD_NETWORK_CIDR \
+ --apiserver-advertise-address=$apiserver_advertise_addr"
+ if !(${kubeadm_init}); then
+ exit 1
+ fi
}
-function install() {
- #install_kubernetes
- install_k8s_single_node
- check_cni_network $1
- create_k8s_regular_user
- check_k8s_node_status
- remove_k8s_noschedule_taint
-
- #install_podman
- #Todo - error handling mechanism
- install_podman
+function install {
+ #install_kubernetes
+ install_k8s_single_node
+ check_cni_network $1
+ create_k8s_regular_user
+ check_k8s_node_status
+ remove_k8s_noschedule_taint
+
+ #install_podman
+ #Todo - error handling mechanism
+ install_podman
}
if [ "$1" == "-o" ]; then
exit 1
fi
-function download_essential_packages() {
+function download_essential_packages {
apt-get update
- for package in crudini curl dnsmasq figlet golang nmap patch psmisc \
- python-pip python-requests python-setuptools vim wget; do
- apt-get -d install $package -y
- done
+ for package in crudini curl dnsmasq figlet golang nmap patch psmisc \
+ python-pip python-requests python-setuptools vim wget; do
+ apt-get -d install $package -y
+ done
}
-function build_baremetal_operator_images() {
- if [ ! -d "$BUILD_DIR/baremetal-operator"]; then
- return
- fi
+function build_baremetal_operator_images {
+ if [ ! -d "$BUILD_DIR/baremetal-operator"]; then
+ return
+ fi
- pushd $BUILD_DIR/baremetal-operator
- docker build -t $IRONIC_BAREMETAL_IMAGE . -f build/Dockerfile
- docker save --output \
- $CONTAINER_IMAGES_DIR/baremetal-operator.tar $IRONIC_BAREMETAL_IMAGE
- popd
+ pushd $BUILD_DIR/baremetal-operator
+ docker build -t $IRONIC_BAREMETAL_IMAGE . -f build/Dockerfile
+ docker save --output \
+ $CONTAINER_IMAGES_DIR/baremetal-operator.tar $IRONIC_BAREMETAL_IMAGE
+ popd
- docker pull $IRONIC_BAREMETAL_SOCAT_IMAGE
- docker save --output $CONTAINER_IMAGES_DIR/socat.tar $IRONIC_BAREMETAL_SOCAT_IMAGE
+ docker pull $IRONIC_BAREMETAL_SOCAT_IMAGE
+ docker save --output $CONTAINER_IMAGES_DIR/socat.tar $IRONIC_BAREMETAL_SOCAT_IMAGE
}
-function build_ironic_images() {
- for images in ironic-image ironic-inspector-image; do
- if [ -d "$BUILD_DIR/$images" ]; then
- pushd $BUILD_DIR/$images
- podman build -t $images .
- popd
- fi
- done
-
- if podman images -q localhost/ironic-inspector-image ; then
- podman tag localhost/ironic-inspector-image $IRONIC_INSPECTOR_IMAGE
- podman save --output \
- $CONTAINER_IMAGES_DIR/ironic-inspector-image.tar \
- $IRONIC_INSPECTOR_IMAGE
- fi
-
- if podman images -q localhost/ironic-image ; then
+function build_ironic_images {
+ for images in ironic-image ironic-inspector-image; do
+ if [ -d "$BUILD_DIR/$images" ]; then
+ pushd $BUILD_DIR/$images
+ podman build -t $images .
+ popd
+ fi
+ done
+
+ if podman images -q localhost/ironic-inspector-image ; then
+ podman tag localhost/ironic-inspector-image $IRONIC_INSPECTOR_IMAGE
+ podman save --output \
+ $CONTAINER_IMAGES_DIR/ironic-inspector-image.tar \
+ $IRONIC_INSPECTOR_IMAGE
+ fi
+
+ if podman images -q localhost/ironic-image ; then
podman tag localhost/ironic-inspector-image $IRONIC_IMAGE
- podman save --output $CONTAINER_IMAGES_DIR/ironic-image.tar \
- $IRONIC_IMAGE
+ podman save --output $CONTAINER_IMAGES_DIR/ironic-image.tar \
+ $IRONIC_IMAGE
fi
-
- podman pull k8s.gcr.io/pause:3.1
- podman save --output $CONTAINER_IMAGES_DIR/podman-pause.tar \
- k8s.gcr.io/pause:3.1
- #build_baremetal_operator_images
+ podman pull k8s.gcr.io/pause:3.1
+ podman save --output $CONTAINER_IMAGES_DIR/podman-pause.tar \
+ k8s.gcr.io/pause:3.1
+
+ #build_baremetal_operator_images
}
-function download_container_images() {
- check_docker
- pushd $CONTAINER_IMAGES_DIR
- #docker images for Kubernetes
- for images in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do
- docker pull k8s.gcr.io/$images:v1.15.0;
- docker save --output $images.tar k8s.gcr.io/$images;
- done
-
- docker pull k8s.gcr.io/pause:3.1
- docker save --output pause.tar k8s.gcr.io/pause
-
- docker pull k8s.gcr.io/etcd:3.3.10
- docker save --output etcd.tar k8s.gcr.io/etcd
-
- docker pull k8s.gcr.io/coredns:1.3.1
- docker save --output coredns.tar k8s.gcr.io/coredns
-
- #podman images for Ironic
- check_podman
- build_ironic_images
- #podman pull $IRONIC_IMAGE
- #podman save --output ironic.tar $IRONIC_IMAGE
- #podman pull $IRONIC_INSPECTOR_IMAGE
- #podman save --output ironic-inspector.tar $IRONIC_INSPECTOR_IMAGE
- popd
+function download_container_images {
+ check_docker
+ pushd $CONTAINER_IMAGES_DIR
+ #docker images for Kubernetes
+ for images in kube-apiserver kube-controller-manager kube-scheduler kube-proxy; do
+ docker pull k8s.gcr.io/$images:v1.15.0;
+ docker save --output $images.tar k8s.gcr.io/$images;
+ done
+
+ docker pull k8s.gcr.io/pause:3.1
+ docker save --output pause.tar k8s.gcr.io/pause
+
+ docker pull k8s.gcr.io/etcd:3.3.10
+ docker save --output etcd.tar k8s.gcr.io/etcd
+
+ docker pull k8s.gcr.io/coredns:1.3.1
+ docker save --output coredns.tar k8s.gcr.io/coredns
+
+ #podman images for Ironic
+ check_podman
+ build_ironic_images
+ #podman pull $IRONIC_IMAGE
+ #podman save --output ironic.tar $IRONIC_IMAGE
+ #podman pull $IRONIC_INSPECTOR_IMAGE
+ #podman save --output ironic-inspector.tar $IRONIC_INSPECTOR_IMAGE
+ popd
}
-function download_build_packages() {
- check_curl
- pushd $BUILD_DIR
- if [ ! -f ironic-python-agent.initramfs ]; then
- curl --insecure --compressed \
- -L https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo/ironic-python-agent.tar | tar -xf -
- fi
-
- if [[ "$BM_IMAGE_URL" && "$BM_IMAGE" ]]; then
- curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
- md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
+function download_build_packages {
+ check_curl
+ pushd $BUILD_DIR
+ if [ ! -f ironic-python-agent.initramfs ]; then
+ curl --insecure --compressed \
+ -L https://images.rdoproject.org/master/rdo_trunk/current-tripleo-rdo/ironic-python-agent.tar | tar -xf -
+ fi
+
+ if [[ "$BM_IMAGE_URL" && "$BM_IMAGE" ]]; then
+ curl -o ${BM_IMAGE} --insecure --compressed -O -L ${BM_IMAGE_URL}
+ md5sum ${BM_IMAGE} | awk '{print $1}' > ${BM_IMAGE}.md5sum
+ fi
+
+ if [ ! -f 87-podman-bridge.conflist ]; then
+ curl --insecure --compressed -O -L $PODMAN_CNI_CONFLIST
+ fi
+
+ if [ ! -d baremetal-operator ]; then
+ git clone https://github.com/metal3-io/baremetal-operator.git
+ pushd ./baremetal-operator
+ git checkout -b icn_baremetal_operator 11ea02ab5cab8b3ab14972ae7c0e70206bba00b5
+ popd
fi
- if [ ! -f 87-podman-bridge.conflist ]; then
- curl --insecure --compressed -O -L $PODMAN_CNI_CONFLIST
- fi
-
- if [ ! -d baremetal-operator ]; then
- git clone https://github.com/metal3-io/baremetal-operator.git
- pushd ./baremetal-operator
- git checkout -b icn_baremetal_operator 11ea02ab5cab8b3ab14972ae7c0e70206bba00b5
- popd
- fi
-
- if [ ! -d ironic-inspector-image ]; then
- git clone https://github.com/metal3-io/ironic-inspector-image.git
- pushd ./ironic-inspector-image
- git checkout -b icn_ironic_inspector_image 25431bd5b7fc87c6f3cfb8b0431fe66b86bbab0e
- popd
- fi
-
- if [ ! -d ironic-image ]; then
- git clone https://github.com/metal3-io/ironic-image.git
- pushd ./ironic-image
- git checkout -b icn_ironic_image 329eb4542f0d8d0f0e9cf0d7e550e33b07efe7fb
- popd
- fi
+ if [ ! -d ironic-inspector-image ]; then
+ git clone https://github.com/metal3-io/ironic-inspector-image.git
+ pushd ./ironic-inspector-image
+ git checkout -b icn_ironic_inspector_image 25431bd5b7fc87c6f3cfb8b0431fe66b86bbab0e
+ popd
+ fi
+
+ if [ ! -d ironic-image ]; then
+ git clone https://github.com/metal3-io/ironic-image.git
+ pushd ./ironic-image
+ git checkout -b icn_ironic_image 329eb4542f0d8d0f0e9cf0d7e550e33b07efe7fb
+ popd
+ fi
}
-function check_pip() {
- if ! which pip ; then
- apt-get install python-pip -y
- fi
+function check_pip {
+ if ! which pip ; then
+ apt-get install python-pip -y
+ fi
}
-function check_curl() {
- if ! which curl ; then
+function check_curl {
+ if ! which curl ; then
apt-get install curl -y
fi
}
-function check_apt_tools() {
- if ! which add-apt-repository ; then
- apt-get install software-properties-common -y
- fi
+function check_apt_tools {
+ if ! which add-apt-repository ; then
+ apt-get install software-properties-common -y
+ fi
}
-function download_ironic_packages() {
- for package in jq nodejs python-ironicclient \
- python-ironic-inspector-client python-lxml python-netaddr \
- python-openstackclient unzip genisoimage; do
- apt-get -d install $package -y
- done
-
- check_pip
+function download_ironic_packages {
+ for package in jq nodejs python-ironicclient \
+ python-ironic-inspector-client python-lxml python-netaddr \
+ python-openstackclient unzip genisoimage; do
+ apt-get -d install $package -y
+ done
+
+ check_pip
pip download lolcat yq -d $PIP_CACHE_DIR
}
-function check_docker() {
- if which docker ; then
- return
- fi
+function check_docker {
+ if which docker ; then
+ return
+ fi
apt-get remove -y docker \
docker-engine \
apt-get install docker-ce=18.06.0~ce~3-0~ubuntu -y
}
-function check_podman() {
- if which podman; then
- return
- fi
+function check_podman {
+ if which podman; then
+ return
+ fi
add-apt-repository -y ppa:projectatomic/ppa
- apt-get update
+ apt-get update
apt-get install podman -y
}
-function download_docker_packages() {
+function download_docker_packages {
apt-get remove -y docker \
docker-engine \
docker.io \
containerd \
runc \
- docker-ce
+ docker-ce
apt-get update
- for package in apt-transport-https ca-certificates gnupg-agent \
- software-properties-common; do
- apt-get -d install $package -y
- done
+ for package in apt-transport-https ca-certificates gnupg-agent \
+ software-properties-common; do
+ apt-get -d install $package -y
+ done
- check_curl
- check_apt_tools
+ check_curl
+ check_apt_tools
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
apt-get -d install docker-ce=18.06.0~ce~3-0~ubuntu -y
}
-function download_podman_packages() {
+function download_podman_packages {
apt-get update
add-apt-repository -y ppa:projectatomic/ppa
apt-get -d install podman -y
}
-function download_kubernetes_packages() {
- curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
- bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
+function download_kubernetes_packages {
+ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+ bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF'
- apt-get update
- apt-get install -d kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00 -y
+ apt-get update
+ apt-get install -d kubelet=1.15.0-00 kubeadm=1.15.0-00 kubectl=1.15.0-00 -y
}
-function clean_apt_cache() {
- pushd /var/cache/apt/archives
+function clean_apt_cache {
+ pushd /var/cache/apt/archives
+
+ if [ $(ls -1q . | wc -l ) -ge 3 ]; then
+ $(rm !("lock"|"partial"))
+ fi
+ popd
- if [ $(ls -1q . | wc -l ) -ge 3 ]; then
- $(rm !("lock"|"partial"))
- fi
- popd
-
}
-function mv_apt_cache() {
+function mv_apt_cache {
pushd /var/cache/apt/archives
if [ $(ls -1q . | wc -l ) -gt 2 ]; then
popd
}
-function check_dir() {
+function check_dir {
if [ ! -d $1 ]; then
mkdir -p $1
fi
}
-function clean_dir() {
+function clean_dir {
pushd $1
if [ $(ls -1q . | wc -l ) -ne 0 ]; then
}
clean_apt_cache
-check_dir $LOCAL_APT_REPO
-clean_dir $LOCAL_APT_REPO
+check_dir $LOCAL_APT_REPO
+clean_dir $LOCAL_APT_REPO
check_dir $PIP_CACHE_DIR
clean_dir $PIP_CACHE_DIR
check_dir $BUILD_DIR
exit 1
fi
-function clean_essential_packages() {
+function clean_essential_packages {
apt-get update
- for package in crudini curl dnsmasq figlet golang nmap patch psmisc \
- python-pip python-requests python-setuptools vim wget; do
- apt-get remove $package -y
- done
+ for package in crudini curl dnsmasq figlet golang nmap patch psmisc \
+ python-pip python-requests python-setuptools vim wget; do
+ apt-get remove $package -y
+ done
- apt-get autoremove -y
- rm -rf /etc/apt/sources.list.d/*
+ apt-get autoremove -y
+ rm -rf /etc/apt/sources.list.d/*
}
-function check_prerequisite() {
+function check_prerequisite {
if !(which pip); then
apt-get install python-pip -y
fi
fi
}
-function clean_ironic_packages() {
- for package in jq nodejs python-ironicclient \
- python-ironic-inspector-client python-lxml python-netaddr \
- python-openstackclient unzip genisoimage; do
- apt-get remove $package -y
- done
+function clean_ironic_packages {
+ for package in jq nodejs python-ironicclient \
+ python-ironic-inspector-client python-lxml python-netaddr \
+ python-openstackclient unzip genisoimage; do
+ apt-get remove $package -y
+ done
}
-function clean_docker_packages() {
+function clean_docker_packages {
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
stable"
apt-get update
apt-get remove docker-ce -y
- for package in apt-transport-https ca-certificates gnupg-agent \
+ for package in apt-transport-https ca-certificates gnupg-agent \
software-properties-common; do
apt-get remove $package -y
done
- apt-get remove -y docker \
+ apt-get remove -y docker \
docker-engine \
docker.io \
containerd \
runc \
docker-ce
- apt-get update
+ apt-get update
}
-function clean_podman_packages() {
+function clean_podman_packages {
apt-get update
add-apt-repository -y ppa:projectatomic/ppa
apt-get remove podman -y
}
-function clean_kubernetes_packages() {
- #Just to make sure kubernetes packages are removed during the download
- curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
- bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
+function clean_kubernetes_packages {
+ #Just to make sure kubernetes packages are removed during the download
+ curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+ bash -c 'cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF'
- apt-get update
- apt-get remove kubelet kubeadm kubectl -y
+ apt-get update
+ apt-get remove kubelet kubeadm kubectl -y
}
-function clean_apt_cache() {
- shopt -s extglob
- pushd /var/cache/apt/archives
+function clean_apt_cache {
+ shopt -s extglob
+ pushd /var/cache/apt/archives
+
+ if [ $(ls -1q . | wc -l ) -ge 3 ]; then
+ $(rm !("lock"|"partial"))
+ fi
+ popd
- if [ $(ls -1q . | wc -l ) -ge 3 ]; then
- $(rm !("lock"|"partial"))
- fi
- popd
-
}
-function mv_apt_cache() {
- shopt -s extglob
+function mv_apt_cache {
+ shopt -s extglob
pushd /var/cache/apt/archives
if [ $(ls -1q . | wc -l ) -gt 2 ]; then
popd
}
-function check_dir() {
+function check_dir {
if [ ! -d $1 ]; then
mkdir -p $1
fi
}
-function clean_dir() {
- shopt -s extglob
+function clean_dir {
+ shopt -s extglob
pushd $1
if [ $(ls -1q . | wc -l ) -ne 0 ]; then
filename=$module-$VER.tar.gz
if [ ! -e $filename ]; then
if [ ! -e $SHARE_FOLDER/$filename ]; then
- echo "Cannot install module $module ..."
- continue
- else
+ echo "Cannot install module $module ..."
+ continue
+ else
cp $SHARE_FOLDER/$filename .
fi
fi
tar xvzf $filename
- if [ -d $module ]; then
+ if [ -d $module ]; then
echo "Installing module $module ..."
- pushd $module
+ pushd $module
bash ./install.sh
popd
- rm -rf $module
+ rm -rf $module
fi
-done
+done
# Call scripts to collect everything from Internet,
# all the collected files need to be put under ICN_PATH
-for collect_sh in `find icn/ -name collect_*.sh | sort`
-do
- collect_parent=`dirname $collect_sh`
- pushd $collect_parent
+for collect_sh in `find icn/ -name collect_*.sh | sort`; do
+ collect_parent=`dirname $collect_sh`
+ pushd $collect_parent
bash `basename $collect_sh` $ICN_PATH
- popd
+ popd
done
mkdir -p build
if [ ! -f "build/ubuntu-18.04.2-server-amd64.iso" ];then
- curl "http://old-releases.ubuntu.com/releases/18.04.2/ubuntu-18.04.2-server-amd64.iso" \
+ curl "http://old-releases.ubuntu.com/releases/18.04.2/ubuntu-18.04.2-server-amd64.iso" \
-o build/ubuntu-18.04.2-server-amd64.iso
else
- echo "Not download official ISO, using existing one"
+ echo "Not download official ISO, using existing one"
fi
mkdir -p build/iso
cp -rf icn build/ubuntu/
mkisofs -R -J -T -v -no-emul-boot -boot-load-size 4 -boot-info-table \
- -b isolinux/isolinux.bin -c isolinux/boot.cat -o icn-ubuntu-18.04.iso build/ubuntu/
+ -b isolinux/isolinux.bin -c isolinux/boot.cat -o icn-ubuntu-18.04.iso build/ubuntu/