deploy/ironic/logs/
deploy/baremetal-operator/logs/
deploy/cert-manager/logs/
-deploy/metal3/scripts/machines-values.yaml
+deploy/metal3/scripts/machine-*-values.yaml
deploy/cluster-api/logs/
-deploy/clusters/logs/
+deploy/cluster/logs/
env/metal3/logs/
-deploy/clusters/addons/
+deploy/cluster/addons/
build/
.vagrant/
deploy/kata/logs/
--- /dev/null
+apiVersion: v2
+name: cluster
+version: 0.1.0
+description: A Cluster API bare metal cluster
+type: application
+sources:
+- https://gerrit.akraino.org/r/icn
# Flannel
curl -sL https://raw.githubusercontent.com/coreos/flannel/${FLANNEL_VERSION}/Documentation/kube-flannel.yml -o ${SCRIPTDIR}/addons/flannel.yaml
cat <<EOF >${SCRIPTDIR}/templates/flannel-addon.yaml
-{{- range \$clusterName, \$cluster := .Values.clusters }}
-{{- if eq \$cluster.cni "flannel" }}
+{{- if eq .Values.cni "flannel" }}
---
$(kubectl create configmap flannel-addon --from-file=${SCRIPTDIR}/addons/flannel.yaml -o yaml --dry-run=client)
{{- end }}
-{{- end }}
EOF
- sed -i -e 's/ name: flannel-addon/ name: {{ $clusterName }}-flannel-addon/' ${SCRIPTDIR}/templates/flannel-addon.yaml
- sed -i -e 's/10.244.0.0\/16/{{ $cluster.podCidr }}/' ${SCRIPTDIR}/templates/flannel-addon.yaml
+ sed -i -e 's/ name: flannel-addon/ name: {{ .Values.clusterName }}-flannel-addon/' ${SCRIPTDIR}/templates/flannel-addon.yaml
+ sed -i -e 's/10.244.0.0\/16/{{ .Values.podCidr }}/' ${SCRIPTDIR}/templates/flannel-addon.yaml
# Flux
flux install --export >${SCRIPTDIR}/addons/flux-system.yaml
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
- name: {{ $cluster.flux.repositoryName }}
+ name: {{ .Values.flux.repositoryName }}
namespace: flux-system
spec:
gitImplementation: go-git
interval: 1m0s
ref:
- branch: {{ $cluster.flux.branch }}
+ branch: {{ .Values.flux.branch }}
timeout: 20s
- url: {{ $cluster.flux.url }}
+ url: {{ .Values.flux.url }}
---
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
- name: {{ $clusterName }}-flux-sync
+ name: {{ .Values.clusterName }}-flux-sync
namespace: flux-system
spec:
interval: 10m0s
- path: {{ $cluster.flux.path }}
+ path: {{ .Values.flux.path }}
prune: true
sourceRef:
kind: GitRepository
- name: {{ $cluster.flux.repositoryName }}
+ name: {{ .Values.flux.repositoryName }}
EOF
cat <<EOF >${SCRIPTDIR}/templates/flux-addon.yaml
-{{- range \$clusterName, \$cluster := .Values.clusters }}
-{{- if \$cluster.flux }}
+{{- if .Values.flux }}
---
$(kubectl create configmap flux-addon --from-file=${SCRIPTDIR}/addons/flux-system.yaml,${SCRIPTDIR}/addons/sync.yaml -o yaml --dry-run=client)
{{- end }}
-{{- end }}
EOF
- sed -i -e 's/ name: flux-addon/ name: {{ $clusterName }}-flux-addon/' ${SCRIPTDIR}/templates/flux-addon.yaml
+ sed -i -e 's/ name: flux-addon/ name: {{ .Values.clusterName }}-flux-addon/' ${SCRIPTDIR}/templates/flux-addon.yaml
# PodSecurityPolicy is being replaced in future versions of K8s.
# The recommended practice is described by K8s at
apiGroup: rbac.authorization.k8s.io
EOF
cat <<EOF >${SCRIPTDIR}/templates/podsecurity-addon.yaml
-{{- range \$clusterName, \$cluster := .Values.clusters }}
---
$(kubectl create configmap podsecurity-addon --from-file=${SCRIPTDIR}/addons/podsecurity.yaml -o yaml --dry-run=client)
-{{- end }}
EOF
- sed -i -e 's/ name: podsecurity-addon/ name: {{ $clusterName }}-podsecurity-addon/' ${SCRIPTDIR}/templates/podsecurity-addon.yaml
+ sed -i -e 's/ name: podsecurity-addon/ name: {{ .Values.clusterName }}-podsecurity-addon/' ${SCRIPTDIR}/templates/podsecurity-addon.yaml
}
--- /dev/null
+Thank you for installing the {{ .Chart.Name }} chart of Akraino ICN.
+
+Your release is named {{ .Release.Name }}.
+
+To learn more about this release, try:
+
+ $ helm status {{ .Release.Name }}
+ $ helm get all {{ .Release.Name }}
+
+To get a brief description of this cluster and its resources, try:
+
+ $ clusterctl describe cluster {{ .Values.clusterName }}
+
+To view the status of this cluster's control plane, try:
+
+ $ kubectl get kubeadmcontrolplane {{ .Values.clusterName }}
+
+Once the first control plane node is up and running, retrieve this
+cluster's kubeconfig with:
+
+ $ clusterctl get kubeconfig {{ .Values.clusterName }} > {{ .Values.clusterName }}.kubeconfig
-{{- define "clusters.containerRuntime" -}}
+{{- define "cluster.containerRuntime" -}}
{{- if eq .containerRuntime "containerd" -}}
- path: /usr/local/bin/install-container-runtime.sh
permissions: '0777'
-{{- define "clusters.keepalived" -}}
+{{- define "cluster.keepalived" -}}
{{- if .keepalived -}}
- path: /etc/keepalived/keepalived.conf
content: |
-{{- range $clusterName, $cluster := .Values.clusters }}
---
apiVersion: cluster.x-k8s.io/v1alpha4
kind: Cluster
metadata:
labels:
- cluster.x-k8s.io/cluster-name: {{ $clusterName }}
- {{- toYaml $cluster.clusterLabels | nindent 4 }}
- name: {{ $clusterName }}
+ cluster.x-k8s.io/cluster-name: {{ .Values.clusterName }}
+ {{- toYaml .Values.clusterLabels | nindent 4 }}
+ name: {{ .Values.clusterName }}
spec:
clusterNetwork:
pods:
cidrBlocks:
- - {{ $cluster.podCidr }}
+ - {{ .Values.podCidr }}
services:
cidrBlocks:
- 10.244.0.0/18
controlPlaneRef:
apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
kind: KubeadmControlPlane
- name: {{ $clusterName }}
+ name: {{ .Values.clusterName }}
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
kind: Metal3Cluster
- name: {{ $clusterName }}
-{{- end }}
+ name: {{ .Values.clusterName }}
--- /dev/null
+---
+apiVersion: addons.cluster.x-k8s.io/v1alpha4
+kind: ClusterResourceSet
+metadata:
+ name: {{ .Values.clusterName }}-crs
+spec:
+ clusterSelector:
+ matchLabels:
+ cluster.x-k8s.io/cluster-name: {{ .Values.clusterName }}
+ resources:
+{{- if eq .Values.cni "flannel" }}
+ - name: {{ .Values.clusterName }}-flannel-addon
+ kind: ConfigMap
+{{- end }}
+{{- if .Values.flux }}
+ - name: {{ .Values.clusterName }}-flux-addon
+ kind: ConfigMap
+{{- end }}
+ - name: {{ .Values.clusterName }}-podsecurity-addon
+ kind: ConfigMap
-{{- range $clusterName, $cluster := .Values.clusters }}
-{{- if eq $cluster.cni "flannel" }}
+{{- if eq .Values.cni "flannel" }}
---
apiVersion: v1
data:
}
net-conf.json: |
{
- "Network": "{{ $cluster.podCidr }}",
+ "Network": "{{ .Values.podCidr }}",
"Backend": {
"Type": "vxlan"
}
kind: ConfigMap
metadata:
creationTimestamp: null
- name: {{ $clusterName }}-flannel-addon
-{{- end }}
+ name: {{ .Values.clusterName }}-flannel-addon
{{- end }}
-{{- range $clusterName, $cluster := .Values.clusters }}
-{{- if $cluster.flux }}
+{{- if .Values.flux }}
---
apiVersion: v1
data:
apiVersion: source.toolkit.fluxcd.io/v1beta1
kind: GitRepository
metadata:
- name: {{ $cluster.flux.repositoryName }}
+ name: {{ .Values.flux.repositoryName }}
namespace: flux-system
spec:
gitImplementation: go-git
interval: 1m0s
ref:
- branch: {{ $cluster.flux.branch }}
+ branch: {{ .Values.flux.branch }}
timeout: 20s
- url: {{ $cluster.flux.url }}
+ url: {{ .Values.flux.url }}
---
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
- name: {{ $clusterName }}-flux-sync
+ name: {{ .Values.clusterName }}-flux-sync
namespace: flux-system
spec:
interval: 10m0s
- path: {{ $cluster.flux.path }}
+ path: {{ .Values.flux.path }}
prune: true
sourceRef:
kind: GitRepository
- name: {{ $cluster.flux.repositoryName }}
+ name: {{ .Values.flux.repositoryName }}
kind: ConfigMap
metadata:
creationTimestamp: null
- name: {{ $clusterName }}-flux-addon
-{{- end }}
+ name: {{ .Values.clusterName }}-flux-addon
{{- end }}
-{{- range $clusterName, $cluster := .Values.clusters }}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4
kind: KubeadmConfigTemplate
metadata:
- name: {{ $clusterName }}-workers
+ name: {{ .Values.clusterName }}-workers
spec:
template:
spec:
kube-reserved: cpu=100m,memory=256Mi
name: '{{ "{{" }} ds.meta_data.name {{ "}}" }}'
preKubeadmCommands:
-{{- if $cluster.networks }}
+{{- if .Values.networks }}
# Without touching up /etc/hosts, kubeadm may pick the wrong
# (i.e. provisioning network) address for the node IP
- - sed -i "1i $(ip -4 addr show dev {{ $cluster.networks.baremetal.interface }} | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | head -1) $(hostname)" /etc/hosts
+ - sed -i "1i $(ip -4 addr show dev {{ .Values.networks.baremetal.interface }} | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | head -1) $(hostname)" /etc/hosts
{{- end }}
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
- add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- apt update -y
- apt-get install -y ca-certificates
- /usr/local/bin/install-container-runtime.sh
- - apt-get install -y kubelet={{ $cluster.kubeVersion }} kubeadm={{ $cluster.kubeVersion }} kubectl={{ $cluster.kubeVersion }}
+ - apt-get install -y kubelet={{ .Values.kubeVersion }} kubeadm={{ .Values.kubeVersion }} kubectl={{ .Values.kubeVersion }}
- systemctl enable --now kubelet
postKubeadmCommands:
- /usr/local/bin/harden_os.sh
# on files in /var/run, which won't persist after a reboot
- /usr/local/bin/set_kernel_cmdline.sh
files:
-{{ include "clusters.containerRuntime" $cluster | indent 6 }}
+{{ include "cluster.containerRuntime" .Values | indent 6 }}
- path: /etc/systemd/system/containerd.service.d/override.conf
content: |
{{ $.Files.Get "resources/override.conf" | indent 10 }}
content: |
{{ $.Files.Get "resources/set_kernel_cmdline.sh" | indent 10 }}
users:
- - name: {{ $cluster.userData.name }}
+ - name: {{ .Values.userData.name }}
shell: /bin/bash
lockPassword: False # Necessary to allow password login
- passwd: {{ $cluster.userData.hashedPassword }}
+ passwd: {{ .Values.userData.hashedPassword }}
sshAuthorizedKeys:
- - {{ $cluster.userData.sshAuthorizedKey }}
+ - {{ .Values.userData.sshAuthorizedKey }}
sudo: "ALL=(ALL) NOPASSWD:ALL"
groups: sudo # Necessary to allow SSH logins (see /etc/ssh/sshd_config)
- name: root
sshAuthorizedKeys:
- - {{ $cluster.userData.sshAuthorizedKey }}
-{{- end }}
+ - {{ .Values.userData.sshAuthorizedKey }}
-{{- range $clusterName, $cluster := .Values.clusters }}
---
apiVersion: controlplane.cluster.x-k8s.io/v1alpha4
kind: KubeadmControlPlane
metadata:
- name: {{ $clusterName }}
+ name: {{ .Values.clusterName }}
spec:
kubeadmConfigSpec:
clusterConfiguration:
node-labels: metal3.io/uuid={{ "{{" }} ds.meta_data.uuid {{ "}}" }}
name: '{{ "{{" }} ds.meta_data.name {{ "}}" }}'
preKubeadmCommands:
-{{- if $cluster.networks }}
+{{- if .Values.networks }}
# Without touching up /etc/hosts, kubeadm may pick the wrong
# (i.e. provisioning network) address for the node IP
- - sed -i "1i $(ip -4 addr show dev {{ $cluster.networks.baremetal.interface }} | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | head -1) $(hostname)" /etc/hosts
+ - sed -i "1i $(ip -4 addr show dev {{ .Values.networks.baremetal.interface }} | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | head -1) $(hostname)" /etc/hosts
{{- end }}
- curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
- add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
- add-apt-repository "deb https://apt.kubernetes.io/ kubernetes-xenial main"
- apt update -y
- apt-get install -y ca-certificates
-{{- if $cluster.keepalived }}
+{{- if .Values.keepalived }}
- apt-get install -y keepalived
- systemctl enable --now keepalived
{{- end }}
- /usr/local/bin/install-container-runtime.sh
- - apt-get install -y kubelet={{ $cluster.kubeVersion }} kubeadm={{ $cluster.kubeVersion }} kubectl={{ $cluster.kubeVersion }}
+ - apt-get install -y kubelet={{ .Values.kubeVersion }} kubeadm={{ .Values.kubeVersion }} kubectl={{ .Values.kubeVersion }}
- systemctl enable --now kubelet
postKubeadmCommands:
- mkdir -p /home/ubuntu/.kube
# files in /var/run, which won't persist after a reboot
- /usr/local/bin/set_kernel_cmdline.sh
files:
-{{ include "clusters.keepalived" $cluster | indent 4 }}
-{{ include "clusters.containerRuntime" $cluster | indent 4 }}
+{{ include "cluster.keepalived" .Values | indent 4 }}
+{{ include "cluster.containerRuntime" .Values | indent 4 }}
- path: /etc/systemd/system/containerd.service.d/override.conf
content: |
{{ $.Files.Get "resources/override.conf" | indent 8 }}
content: |
{{ $.Files.Get "resources/set_kernel_cmdline.sh" | indent 8 }}
users:
- - name: {{ $cluster.userData.name }}
+ - name: {{ .Values.userData.name }}
shell: /bin/bash
lockPassword: False # Necessary to allow password login
- passwd: {{ $cluster.userData.hashedPassword }}
+ passwd: {{ .Values.userData.hashedPassword }}
sshAuthorizedKeys:
- - {{ $cluster.userData.sshAuthorizedKey }}
+ - {{ .Values.userData.sshAuthorizedKey }}
sudo: "ALL=(ALL) NOPASSWD:ALL"
groups: sudo # Necessary to allow SSH logins (see /etc/ssh/sshd_config)
- name: root
sshAuthorizedKeys:
- - {{ $cluster.userData.sshAuthorizedKey }}
+ - {{ .Values.userData.sshAuthorizedKey }}
machineTemplate:
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
kind: Metal3MachineTemplate
- name: {{ $clusterName }}-controlplane
+ name: {{ .Values.clusterName }}-controlplane
nodeDrainTimeout: 0s
- replicas: {{ $cluster.numControlPlaneMachines }}
+ replicas: {{ .Values.numControlPlaneMachines }}
rolloutStrategy:
rollingUpdate:
maxSurge: 1
type: RollingUpdate
- version: {{ $cluster.k8sVersion }}
-{{- end }}
+ version: {{ .Values.k8sVersion }}
--- /dev/null
+---
+apiVersion: cluster.x-k8s.io/v1alpha4
+kind: MachineDeployment
+metadata:
+ labels:
+ cluster.x-k8s.io/cluster-name: {{ .Values.clusterName }}
+ name: {{ .Values.clusterName }}
+spec:
+ clusterName: {{ .Values.clusterName }}
+ replicas: {{ .Values.numWorkerMachines }}
+ selector:
+ matchLabels:
+ cluster.x-k8s.io/cluster-name: {{ .Values.clusterName }}
+ template:
+ metadata:
+ labels:
+ cluster.x-k8s.io/cluster-name: {{ .Values.clusterName }}
+ spec:
+ bootstrap:
+ configRef:
+ apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4
+ kind: KubeadmConfigTemplate
+ name: {{ .Values.clusterName }}-workers
+ clusterName: {{ .Values.clusterName }}
+ infrastructureRef:
+ apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
+ kind: Metal3MachineTemplate
+ name: {{ .Values.clusterName }}-workers
+ nodeDrainTimeout: 0s
+ version: {{ .Values.k8sVersion }}
-{{- range $clusterName, $cluster := .Values.clusters }}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
kind: Metal3Cluster
metadata:
- name: {{ $clusterName }}
+ name: {{ .Values.clusterName }}
spec:
controlPlaneEndpoint:
- host: {{ $cluster.controlPlaneEndpoint }}
+ host: {{ .Values.controlPlaneEndpoint }}
port: 6443
noCloudProvider: true
-{{- end }}
-{{- range $clusterName, $cluster := .Values.clusters }}
-{{- if $cluster.networks }}
+{{- if .Values.networks }}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
kind: Metal3DataTemplate
metadata:
- name: {{ $clusterName }}-nodepool
+ name: {{ .Values.clusterName }}-nodepool
spec:
- clusterName: {{ $clusterName }}
+ clusterName: {{ .Values.clusterName }}
networkData:
links:
ethernets:
-{{- range $name, $network := $cluster.networks }}
+{{- range $name, $network := .Values.networks }}
- id: {{ $name }}_nic
macAddress:
fromHostInterface: {{ $network.interface }}
{{- end }}
networks:
ipv4DHCP:
-{{- range $name, $network := $cluster.networks }}
+{{- range $name, $network := .Values.networks }}
- id: {{ $name }}
link: {{ $name }}_nic
{{- end }}
{{- end }}
-{{- end }}
--- /dev/null
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
+kind: Metal3MachineTemplate
+metadata:
+ name: {{ .Values.clusterName }}-controlplane
+spec:
+ nodeReuse: false
+ template:
+ spec:
+ automatedCleaningMode: metadata
+{{- if .Values.controlPlaneHostSelector }}
+ hostSelector:
+ {{- toYaml .Values.controlPlaneHostSelector | nindent 8 }}
+{{- end }}
+{{- if .Values.networks }}
+ dataTemplate:
+ name: {{ .Values.clusterName }}-nodepool
+{{- end }}
+ image:
+ checksum: http://172.22.0.1:6180/images/{{ .Values.imageName }}.md5sum
+ url: http://172.22.0.1:6180/images/{{ .Values.imageName }}
--- /dev/null
+---
+apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
+kind: Metal3MachineTemplate
+metadata:
+ name: {{ .Values.clusterName }}-workers
+spec:
+ nodeReuse: false
+ template:
+ spec:
+ automatedCleaningMode: metadata
+{{- if .Values.workersHostSelector }}
+ hostSelector:
+ {{- toYaml .Values.workersHostSelector | nindent 8 }}
+{{- end }}
+{{- if .Values.networks }}
+ dataTemplate:
+ name: {{ .Values.clusterName }}-nodepool
+{{- end }}
+ image:
+ checksum: http://172.22.0.1:6180/images/{{ .Values.imageName }}.md5sum
+ url: http://172.22.0.1:6180/images/{{ .Values.imageName }}
-{{- range $clusterName, $cluster := .Values.clusters }}
---
apiVersion: v1
data:
kind: ConfigMap
metadata:
creationTimestamp: null
- name: {{ $clusterName }}-podsecurity-addon
-{{- end }}
+ name: {{ .Values.clusterName }}-podsecurity-addon
--- /dev/null
+# clusterName is the name of the cluster.
+clusterName: cluster-1
+
+# clusterLabels is a dictionary of labels. The provider and site
+# labels are shown as examples only; additional labels may be
+# provided.
+clusterLabels:
+ provider: icn
+ site: test-site
+
+# numControlPlaneMachines is the number of control plane nodes.
+numControlPlaneMachines: 1
+
+# numWorkerMachines is the number of worker nodes.
+numWorkerMachines: 1
+
+# controlPlaneEndpoint is the address of the control plane endpoint.
+# With a highly-available control plane this would typically be a
+# load-balanced virtual IP, however other configurations are possible
+# as shown below.
+controlPlaneEndpoint: 192.168.151.254
+# controlPlanePrefix is the network mask of the control plane
+# endpoint.
+controlPlanePrefix: 24
+
+# keepalived is one mechanism to provide a virtual control plane
+# endpoint. keepalived uses the VRRP protocol to assign the control
+# plane endpoint among the control plane nodes.
+#keepalived:
+# # The interface must be the same as the baremetal interface.
+# interface: ens6
+# routerId: 3
+
+# controlPlaneHostSelector uses labels added to the BareMetalHost
+# resources to select specific machines for the control plane. Using
+# this mechanism for example, one could assign a static address of a
+# known machine to the control plane endpoint.
+#controlPlaneHostSelector:
+# matchLabels:
+# machine: machine-1
+# controlPlaneHostSelector uses labels added to the BareMetalHost
+# resources to select specific machines for workers.
+#workersHostSelector:
+# matchLabels:
+# machine: machine-2
+
+# networks can be used when DHCP is present and the network
+# configuration of each machine may be provided with a template
+# containing the names of the baremetal and provisioning interfaces.
+#networks:
+# baremetal:
+# interface: ens6
+# provisioning:
+# interface: ens5
+
+# userData is used to provide cloud-init data for machines in the
+# cluster. See
+# https://cloudinit.readthedocs.io/en/latest/topics/modules.html#users-and-groups
+# for more information.
+userData:
+ name: ubuntu
+ # hashedPasswd was created with `mkpasswd --method=SHA-512 --rounds
+ # 10000 "mypasswd"`.
+ hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
+ # sshAuthorizedKey key will also be authorized to login as the root
+ # user.
+ sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
+
+# flux provides bootstrapping configuration of the cluster. When
+# enabled, the Flux controllers will be installed into the cluster and
+# begin reconciling the resources located at the specified location.
+flux:
+ repositoryName: icn
+ url: https://gerrit.akraino.org/r/icn
+ branch: master
+ # path is the repository to the resources to be applied to the
+ # cluster.
+ path: ./deploy/site/e2etest
+
+# containerRuntime may be containerd or docker.
+containerRuntime: containerd
+
+# podCidr is the POD CIDR.
+podCidr: 10.244.64.0/18
+
+# cni is the cluster CNI. The only currently supported CNI is
+# flannel.
+cni: flannel
+
+# All the version info is captured in one block here. Care must
+# be taken to ensure that the OS, Kubernetes, and CRI (containerd
+# or docker) versions are all compatible.
+#
+# Refer to the below for further information:
+# - https://github.com/kubernetes/kubernetes/blob/master/build/dependencies.yaml
+# - https://download.docker.com/linux/ubuntu/dists/focal/stable/binary-amd64/Packages
+#
+# imageName is the OS image.
+imageName: focal-server-cloudimg-amd64.img
+# k8s is the version of Kubernetes installed.
+k8sVersion: v1.21.6
+# kubeVersion is the version of the kubelet, kubeadm, and kubectl
+# packages.
+kubeVersion: 1.21.6-00
+# containerd is the version of containerd installed.
+containerdVersion: 1.4.11-1
+# dockerVersion is the version of docker installed.
+dockerVersion: 5:20.10.10~3-0~ubuntu-focal
+++ /dev/null
-apiVersion: v2
-name: clusters
-type: application
-version: 0.1.0
+++ /dev/null
-# The dictionary of clusters to create.
-clusters:
- # The cluster name.
- ha-dhcp:
-
- # Example provider and site labels; additional labels may be
- # provided.
- clusterLabels:
- provider: icn
- site: test-site
-
- # The number of control plane nodes.
- numControlPlaneMachines: 3
-
- # The number of worker nodes.
- numWorkerMachines: 2
-
- # The control plane endpoint of the cluster. This is a virtual IP
- # managed by keepalived.
- controlPlaneEndpoint: 192.168.151.254
- controlPlanePrefix: 24
-
- # keepalived uses the VRRP protocol to assign the control plane
- # endpoint among the control plane nodes.
- keepalived:
- # This interface must be the same as the baremetal interface.
- interface: ens6
- routerId: 3
-
- # Since DHCP is present, the network configuration of each machine
- # may be provided with a template containing the names of the
- # baremetal and provisioning interfaces.
- networks:
- baremetal:
- interface: ens6
- provisioning:
- interface: ens5
-
- # The user account created in all the machines.
- userData:
- name: ubuntu
- # mkpasswd --method=SHA-512 --rounds 10000 "mypasswd"
- hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
- # This key will also be authorized to login as the root user
- sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
-
- # Flux bootstrapping of the cluster. When enabled, the Flux
- # controllers will be installed into the cluster and begin
- # reconciling the resources located at the specified location.
- flux:
- repositoryName: icn
- url: https://gerrit.akraino.org/r/icn
- branch: master
- # The path at the repository to the resources to be applied to the
- # cluster
- path: ./deploy/test-site/ha-dhcp
-
- # The containerRuntime may be containerd or docker.
- containerRuntime: containerd
-
- # The POD CIDR.
- podCidr: 10.244.64.0/18
-
- # The only currently supported CNI is flannel.
- cni: flannel
-
- # All the version info is captured in one block here. Care must
- # be taken to ensure that the OS, Kubernetes, and CRI (containerd
- # or docker) versions are all compatible.
- #
- # Refer to the below for further information:
- # - https://github.com/kubernetes/kubernetes/blob/master/build/dependencies.yaml
- # - https://download.docker.com/linux/ubuntu/dists/focal/stable/binary-amd64/Packages
- #
- # The OS image.
- imageName: focal-server-cloudimg-amd64.img
- # The version of Kubernetes installed.
- k8sVersion: v1.21.6
- # The version of the kubelet, kubeadm, and kubectl packages.
- kubeVersion: 1.21.6-00
- # The version of the CRI installed.
- containerdVersion: 1.4.11-1
- dockerVersion: 5:20.10.10~3-0~ubuntu-focal
+++ /dev/null
-# The dictionary of clusters to create.
-clusters:
- # The cluster name.
- static:
-
- # Example provider and site labels; additional labels may be
- # provided.
- clusterLabels:
- provider: icn
- site: test-site
-
- # The number of control plane nodes.
- numControlPlaneMachines: 1
-
- # The number of worker nodes.
- numWorkerMachines: 1
-
- # The control plane endpoint is set to the statically configured
- # baremetal network address of node1. The host selector is
- # defined below to ensure that node1 is the control plane.
- controlPlaneEndpoint: 10.10.110.23
- controlPlanePrefix: 24
-
- # Labels added to the BareMetalHost resources may be used here to
- # select specific machines for control plane or workers. This is
- # required here to ensure that node1 will host the control plane
- # endpoint.
- controlPlaneHostSelector:
- matchLabels:
- machine: node1
- workersHostSelector:
- matchLabels:
- machine: node2
-
- # The user account created in all the machines.
- userData:
- name: ubuntu
- # mkpasswd --method=SHA-512 --rounds 10000 "mypasswd"
- hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
- # This key will also be authorized to login as the root user
- sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
-
- # Flux bootstrapping of the cluster. When enabled, the Flux
- # controllers will be installed into the cluster and begin
- # reconciling the resources located at the specified location.
- flux:
- repositoryName: icn
- url: https://gerrit.akraino.org/r/icn
- branch: master
- # The path at the repository to the resources to be applied to the
- # cluster
- path: ./deploy/test-site/static
-
- # The containerRuntime may be containerd or docker.
- containerRuntime: containerd
-
- # The POD CIDR.
- podCidr: 10.244.64.0/18
-
- # The only currently supported CNI is flannel.
- cni: flannel
-
- # All the version info is captured in one block here. Care must
- # be taken to ensure that the OS, Kubernetes, and CRI (containerd
- # or docker) versions are all compatible.
- #
- # Refer to the below for further information:
- # - https://github.com/kubernetes/kubernetes/blob/master/build/dependencies.yaml
- # - https://download.docker.com/linux/ubuntu/dists/focal/stable/binary-amd64/Packages
- #
- # The OS image.
- imageName: focal-server-cloudimg-amd64.img
- # The version of Kubernetes installed.
- k8sVersion: v1.21.6
- # The version of the kubelet, kubeadm, and kubectl packages.
- kubeVersion: 1.21.6-00
- # The version of the CRI installed.
- containerdVersion: 1.4.11-1
- dockerVersion: 5:20.10.10~3-0~ubuntu-focal
+++ /dev/null
-{{- range $clusterName, $cluster := .Values.clusters }}
----
-apiVersion: addons.cluster.x-k8s.io/v1alpha4
-kind: ClusterResourceSet
-metadata:
- name: {{ $clusterName }}-crs
-spec:
- clusterSelector:
- matchLabels:
- cluster.x-k8s.io/cluster-name: {{ $clusterName }}
- resources:
-{{- if eq $cluster.cni "flannel" }}
- - name: {{ $clusterName }}-flannel-addon
- kind: ConfigMap
-{{- end }}
-{{- if $cluster.flux }}
- - name: {{ $clusterName }}-flux-addon
- kind: ConfigMap
-{{- end }}
- - name: {{ $clusterName }}-podsecurity-addon
- kind: ConfigMap
-{{- end }}
+++ /dev/null
-{{- range $clusterName, $cluster := .Values.clusters }}
----
-apiVersion: cluster.x-k8s.io/v1alpha4
-kind: MachineDeployment
-metadata:
- labels:
- cluster.x-k8s.io/cluster-name: {{ $clusterName }}
- name: {{ $clusterName }}
-spec:
- clusterName: {{ $clusterName }}
- replicas: {{ $cluster.numWorkerMachines }}
- selector:
- matchLabels:
- cluster.x-k8s.io/cluster-name: {{ $clusterName }}
- template:
- metadata:
- labels:
- cluster.x-k8s.io/cluster-name: {{ $clusterName }}
- spec:
- bootstrap:
- configRef:
- apiVersion: bootstrap.cluster.x-k8s.io/v1alpha4
- kind: KubeadmConfigTemplate
- name: {{ $clusterName }}-workers
- clusterName: {{ $clusterName }}
- infrastructureRef:
- apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
- kind: Metal3MachineTemplate
- name: {{ $clusterName }}-workers
- nodeDrainTimeout: 0s
- version: {{ $cluster.k8sVersion }}
-{{- end }}
+++ /dev/null
-{{- range $clusterName, $cluster := .Values.clusters }}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
-kind: Metal3MachineTemplate
-metadata:
- name: {{ $clusterName }}-controlplane
-spec:
- nodeReuse: false
- template:
- spec:
- automatedCleaningMode: metadata
-{{- if $cluster.controlPlaneHostSelector }}
- hostSelector:
- {{- toYaml $cluster.controlPlaneHostSelector | nindent 8 }}
-{{- end }}
-{{- if $cluster.networks }}
- dataTemplate:
- name: {{ $clusterName }}-nodepool
-{{- end }}
- image:
- checksum: http://172.22.0.1:6180/images/{{ $cluster.imageName }}.md5sum
- url: http://172.22.0.1:6180/images/{{ $cluster.imageName }}
-{{- end }}
+++ /dev/null
-{{- range $clusterName, $cluster := .Values.clusters }}
----
-apiVersion: infrastructure.cluster.x-k8s.io/v1alpha5
-kind: Metal3MachineTemplate
-metadata:
- name: {{ $clusterName }}-workers
-spec:
- nodeReuse: false
- template:
- spec:
- automatedCleaningMode: metadata
-{{- if $cluster.workersHostSelector }}
- hostSelector:
- {{- toYaml $cluster.workersHostSelector | nindent 8 }}
-{{- end }}
-{{- if $cluster.networks }}
- dataTemplate:
- name: {{ $clusterName }}-nodepool
-{{- end }}
- image:
- checksum: http://172.22.0.1:6180/images/{{ $cluster.imageName }}.md5sum
- url: http://172.22.0.1:6180/images/{{ $cluster.imageName }}
-{{- end }}
--- /dev/null
+apiVersion: v2
+name: machine
+version: 0.1.0
+description: A Metal3 BareMetalHost
+type: application
+sources:
+- https://gerrit.akraino.org/r/icn
--- /dev/null
+Thank you for installing the {{ .Chart.Name }} chart of Akraino ICN.
+
+Your release is named {{ .Release.Name }}.
+
+To learn more about this release, try:
+
+ $ helm status {{ .Release.Name }}
+ $ helm get all {{ .Release.Name }}
-{{- define "machines.networkData" -}}
+{{- define "machine.networkData" -}}
{{- if .networks -}}
{
"links": [
-{{- define "machines.userData" -}}
+{{- define "machine.userData" -}}
{{- if .userData -}}
#cloud-config
{{- if and .userData.name .userData.hashedPassword }}
--- /dev/null
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.machineName }}-bmc-secret
+type: Opaque
+data:
+ username: {{ .Values.bmcUsername | b64enc }}
+ password: {{ .Values.bmcPassword | b64enc }}
+{{- if .Values.networks }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.machineName }}-network-data
+type: Opaque
+data:
+ networkData: {{ include "machine.networkData" .Values | b64enc }}
+{{- end }}
+{{- if .Values.userData }}
+---
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ .Values.machineName }}-user-data
+type: Opaque
+data:
+ userData: {{ include "machine.userData" .Values | b64enc }}
+{{- end }}
+---
+apiVersion: metal3.io/v1alpha1
+kind: BareMetalHost
+metadata:
+ name: {{ .Values.machineName }}
+{{- if .Values.machineLabels }}
+ labels:
+ {{- toYaml .Values.machineLabels | nindent 4 }}
+{{- end }}
+spec:
+ online: true
+{{- if .Values.bootMACAddress }}
+ bootMACAddress: {{ .Values.bootMACAddress }}
+{{- end }}
+ bmc:
+ address: {{ .Values.bmcAddress }}
+ credentialsName: {{ .Values.machineName }}-bmc-secret
+{{- if .Values.networks }}
+ networkData:
+ name: {{ .Values.machineName }}-network-data
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+{{- if .Values.userData }}
+ userData:
+ name: {{ .Values.machineName }}-user-data
+ namespace: {{ .Release.Namespace }}
+{{- end }}
+ rootDeviceHints:
+ minSizeGigabytes: 48
+{{- if .Values.imageName }}
+ image:
+ url: http://172.22.0.1:6180/images/{{ .Values.imageName }}
+ checksum: http://172.22.0.1:6180/images/{{ .Values.imageName }}.md5sum
+{{- end }}
--- /dev/null
+# machineName is the host name of the machine.
+machineName: machine-1
+
+# machineLabels is a dictionary of labels. The machine label is shown
+# as an example only; additional labels may be provided.
+#machineLabels:
+# machine: machine-1
+
+# bmcAddress is the address of the machine's bare metal controller.
+bmcAddress: ipmi://10.10.110.11
+# bmcUsername is the username used to access the machine's bare metal
+# controller.
+bmcUsername: admin
+# bmcPassword is the password used to access the machine's bare metal
+# controller.
+bmcPassword: password
+
+# bootMACAddress is the MAC address of the NIC that will PXE boot.
+# This is only required when using virtual machines.
+#bootMACAddress: 00:1e:67:fe:f4:1a
+
+# networks is used to provide per-machine network configuration.
+# Where feasible (such as when DHCP is available or static IP pools
+# can be used), the use of templates at the cluster level is
+# encouraged instead.
+#networks:
+# baremetal:
+# macAddress: 00:1e:67:fe:f4:19
+# # type is either ipv4 or ipv4_dhcp
+# type: ipv4
+# # ipAddress is only valid for type ipv4
+# ipAddress: 10.10.110.21/24
+# # gateway is only valid for type ipv4
+# gateway: 10.10.110.1
+# # nameservers is an array of DNS servers; only valid for type ipv4
+# nameservers: ["8.8.8.8"]
+# provisioning:
+# macAddress: 00:1e:67:fe:f4:1a
+# type: ipv4_dhcp
+# private:
+# macAddress: 00:1e:67:f8:6a:40
+# type: ipv4
+# ipAddress: 10.10.112.2/24
+# storage:
+# macAddress: 00:1e:67:f8:6a:41
+# type: ipv4
+# ipAddress: 10.10.113.2/24
+
+# userData is used to provide per-machine cloud-init data. Again,
+# where feasible, the use of templates at the cluster level is
+# encouraged instead.
+#userData:
+# name: ubuntu
+# hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
+# sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
+# fqdn: machine-1.akraino.icn.org
+
+# Legacy ICN support only, do not use with R6 or later
+#imageName: focal-server-cloudimg-amd64.img
+++ /dev/null
-apiVersion: v2
-name: machines
-type: application
-version: 0.1.0
+++ /dev/null
-machines:
- machine-1:
- bmcUsername: admin
- bmcPassword: password
- bmcAddress: ipmi://192.168.151.1:6230
-
- # Optional
- bootMACAddress: 52:54:00:2b:bc:3a
-
- # Optional
- imageName: focal-server-cloudimg-amd64.img
-
- # Optional
- networks:
- baremetal:
- macAddress: 52:54:00:da:c9:7b
- type: ipv4_dhcp
- provisioning:
- macAddress: 52:54:00:2b:bc:3a
- type: ipv4_dhcp
-
- # Optional
- userData:
- name: ubuntu
- hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
- sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
- fqdn: machine-1.akraino.icn.org
+++ /dev/null
-{{- range $name, $machine := .Values.machines }}
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ $name }}-bmc-secret
-type: Opaque
-data:
- username: {{ $machine.bmcUsername | b64enc }}
- password: {{ $machine.bmcPassword | b64enc }}
-{{- if $machine.networks }}
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ $name }}-network-data
-type: Opaque
-data:
- networkData: {{ include "machines.networkData" $machine | b64enc }}
-{{- end }}
-{{- if $machine.userData }}
----
-apiVersion: v1
-kind: Secret
-metadata:
- name: {{ $name }}-user-data
-type: Opaque
-data:
- userData: {{ include "machines.userData" $machine | b64enc }}
-{{- end }}
----
-apiVersion: metal3.io/v1alpha1
-kind: BareMetalHost
-metadata:
- name: {{ $name }}
-spec:
- online: true
-{{- if $machine.bootMACAddress }}
- bootMACAddress: {{ $machine.bootMACAddress }}
-{{- end }}
- bmc:
- address: {{ $machine.bmcAddress }}
- credentialsName: {{ $name }}-bmc-secret
-{{- if $machine.imageName }}
- image:
- url: http://172.22.0.1:6180/images/{{ $machine.imageName }}
- checksum: http://172.22.0.1:6180/images/{{ $machine.imageName }}.md5sum
-{{- end }}
-{{- if $machine.networks }}
- networkData:
- name: {{ $name }}-network-data
- namespace: {{ $.Release.Namespace }}
-{{- end }}
-{{- if $machine.userData }}
- userData:
- name: {{ $name }}-user-data
- namespace: {{ $.Release.Namespace }}
-{{- end }}
- rootDeviceHints:
- minSizeGigabytes: 48
-{{- end }}
exit 1
fi
- printf " userData:\n" >>${SCRIPTDIR}/machines-values.yaml
+ printf "userData:\n" >>${SCRIPTDIR}/${name}-values.yaml
if [ -n "$username" ]; then
- printf " name: ${username}\n" >>${SCRIPTDIR}/machines-values.yaml
+ printf " name: ${username}\n" >>${SCRIPTDIR}/${name}-values.yaml
fi
if [ -n "$password" ]; then
passwd=$(mkpasswd --method=SHA-512 --rounds 4096 "$password")
- printf " hashedPassword: ${passwd}\n" >>${SCRIPTDIR}/machines-values.yaml
+ printf " hashedPassword: ${passwd}\n" >>${SCRIPTDIR}/${name}-values.yaml
fi
if [ -n "$COMPUTE_NODE_FQDN" ]; then
- printf " fqdn: ${COMPUTE_NODE_FQDN}\n" >>${SCRIPTDIR}/machines-values.yaml
+ printf " fqdn: ${COMPUTE_NODE_FQDN}\n" >>${SCRIPTDIR}/${name}-values.yaml
fi
if [ ! -f $HOME/.ssh/id_rsa.pub ]; then
yes y | ssh-keygen -t rsa -N "" -f $HOME/.ssh/id_rsa
fi
- printf " sshAuthorizedKey: $(cat $HOME/.ssh/id_rsa.pub)\n" >>${SCRIPTDIR}/machines-values.yaml
+ printf " sshAuthorizedKey: $(cat $HOME/.ssh/id_rsa.pub)\n" >>${SCRIPTDIR}/${name}-values.yaml
}
create_networkdata() {
name="$1"
- node_networkdata $name >>${SCRIPTDIR}/machines-values.yaml
+ node_networkdata $name >>${SCRIPTDIR}/${name}-values.yaml
}
function make_bm_hosts {
while IFS=',' read -r name ipmi_username ipmi_password ipmi_address boot_mac os_username os_password os_image_name; do
- printf " ${name}:\n" >>${SCRIPTDIR}/machines-values.yaml
- printf " bmcUsername: ${ipmi_username}\n" >>${SCRIPTDIR}/machines-values.yaml
- printf " bmcPassword: ${ipmi_password}\n" >>${SCRIPTDIR}/machines-values.yaml
- printf " bmcAddress: ipmi://${ipmi_address}\n" >>${SCRIPTDIR}/machines-values.yaml
+ printf "machineName: ${name}\n" >${SCRIPTDIR}/${name}-values.yaml
+ printf "bmcUsername: ${ipmi_username}\n" >>${SCRIPTDIR}/${name}-values.yaml
+ printf "bmcPassword: ${ipmi_password}\n" >>${SCRIPTDIR}/${name}-values.yaml
+ printf "bmcAddress: ipmi://${ipmi_address}\n" >>${SCRIPTDIR}/${name}-values.yaml
if [[ ! -z ${boot_mac} ]]; then
- printf " bootMACAddress: ${boot_mac}\n" >>${SCRIPTDIR}/machines-values.yaml
+ printf "bootMACAddress: ${boot_mac}\n" >>${SCRIPTDIR}/${name}-values.yaml
fi
- printf " imageName: ${BM_IMAGE}\n" >>${SCRIPTDIR}/machines-values.yaml
+ printf "imageName: ${BM_IMAGE}\n" >>${SCRIPTDIR}/${name}-values.yaml
create_userdata $name $os_username $os_password
create_networkdata $name
+
+ helm -n metal3 install ${name} ${SCRIPTDIR}/../../machine --create-namespace -f ${SCRIPTDIR}/${name}-values.yaml
+
done
}
done
}
+function clean_bm_hosts {
+ while IFS=',' read -r name ipmi_username ipmi_password ipmi_address boot_mac os_username os_password os_image_name; do
+ helm -n metal3 uninstall ${name}
+ rm -rf ${SCRIPTDIR}/${name}-values.yaml
+ done
+}
+
function clean_all {
- helm -n metal3 uninstall machines
- rm -f ${SCRIPTDIR}/machines-values.yaml
+ list_nodes | clean_bm_hosts
if [ -f $IRONIC_DATA_DIR/nodes.json ]; then
rm -rf $IRONIC_DATA_DIR/nodes.json
fi
}
function apply_bm_hosts {
- printf "machines:\n" >${SCRIPTDIR}/machines-values.yaml
list_nodes | make_bm_hosts
- helm -n metal3 install machines ${SCRIPTDIR}/../../machines --create-namespace -f ${SCRIPTDIR}/machines-values.yaml
}
function deprovision_all_hosts {
--- /dev/null
+clusterName: e2etest
+clusterLabels:
+ site: vm
+keepalived:
+ interface: ens6
+ routerId: 3
+networks:
+ baremetal:
+ interface: ens6
+ provisioning:
+ interface: ens5
+userData:
+ sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
+flux:
+ path: ./deploy/site/vm/e2etest
+++ /dev/null
-clusters:
- e2etest:
- clusterLabels:
- provider: icn
- site: vm
- numControlPlaneMachines: 1
- numWorkerMachines: 1
- controlPlaneEndpoint: 192.168.151.254
- controlPlanePrefix: 24
- keepalived:
- interface: ens6
- routerId: 3
- networks:
- baremetal:
- interface: ens6
- provisioning:
- interface: ens5
- userData:
- name: ubuntu
- hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
- sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
- flux:
- repositoryName: icn
- url: https://gerrit.akraino.org/r/icn
- branch: master
- path: ./deploy/site/vm/e2etest/
- containerRuntime: containerd
- podCidr: 10.244.64.0/18
- cni: flannel
- imageName: focal-server-cloudimg-amd64.img
- k8sVersion: v1.21.6
- kubeVersion: 1.21.6-00
- containerdVersion: 1.4.11-1
- dockerVersion: 5:20.10.10~3-0~ubuntu-focal
SSH_AUTHORIZED_KEY=$(cat ${HOME}/.ssh/id_rsa.pub)
# Use ! instead of usual / to avoid escaping / in
# SSH_AUTHORIZED_KEY
- sed -e 's!sshAuthorizedKey: .*!sshAuthorizedKey: '"${SSH_AUTHORIZED_KEY}"'!' ${SCRIPTDIR}/clusters-values.yaml >${BUILDDIR}/clusters-values.yaml
+ sed -e 's!sshAuthorizedKey: .*!sshAuthorizedKey: '"${SSH_AUTHORIZED_KEY}"'!' ${SCRIPTDIR}/cluster-e2etest-values.yaml >${BUILDDIR}/cluster-e2etest-values.yaml
+}
+
+function release_name {
+ local -r values_path=$1
+ name=$(basename ${values_path})
+ echo ${name%-values.yaml}
}
function deploy {
- helm -n metal3 install machines ${SCRIPTDIR}/../../machines --create-namespace -f ${BUILDDIR}/machines-values.yaml
- helm -n metal3 install clusters ${SCRIPTDIR}/../../clusters --create-namespace -f ${BUILDDIR}/clusters-values.yaml
+ for values in build/site/vm/machine-*-values.yaml; do
+ helm -n metal3 install $(release_name ${values}) ${SCRIPTDIR}/../../machine --create-namespace -f ${values}
+ done
+ helm -n metal3 install cluster-e2etest ${SCRIPTDIR}/../../cluster --create-namespace -f ${BUILDDIR}/cluster-e2etest-values.yaml
}
function clean {
- helm -n metal3 uninstall clusters
- helm -n metal3 uninstall machines
+ helm -n metal3 uninstall cluster-e2etest
+ for values in build/site/vm/machine-*-values.yaml; do
+ helm -n metal3 uninstall $(release_name ${values})
+ done
}
function is_cluster_ready {
exit 1
fi
- printf " networks:\n"
+ printf "networks:\n"
for network in $(cat $NODES_FILE | jq -r --arg name "$name" '.nodes[] | select(.name==$name) | .net.networks[].id'); do
link=$(networkdata_networks_field $name $network "link")
type=$(networkdata_networks_field $name $network "type")
gateway=$(networkdata_networks_field $name $network "gateway")
dns_nameservers=$(networkdata_networks_field $name $network "dns_nameservers")
- printf " ${network}:\n"
- printf " macAddress: ${mac}\n"
- printf " type: ${type}\n"
+ printf " ${network}:\n"
+ printf " macAddress: ${mac}\n"
+ printf " type: ${type}\n"
if [[ $ip_address != "null" ]]; then
- printf " ipAddress: ${ip_address}\n"
+ printf " ipAddress: ${ip_address}\n"
fi
if [[ $gateway != "null" ]]; then
- printf " gateway: ${gateway}\n"
+ printf " gateway: ${gateway}\n"
fi
if [[ $dns_nameservers != "null" ]]; then
- printf " nameservers: ${dns_nameservers}\n"
+ printf " nameservers: ${dns_nameservers}\n"
fi
done
}
ipmi_port=$((6230+index-1))
boot_mac=$(virsh -c qemu:///system dumpxml "${site}-${name}" | xmlstarlet sel -t -v "//interface[source/@network='${site}-provisioning']/mac/@address")
-if [[ ${index} == 1 ]]; then
- mkdir -p build/site/${site}
- cat <<EOF >build/site/${site}/machines-values.yaml
-machines:
-EOF
-fi
-cat <<EOF >>build/site/${site}/machines-values.yaml
- machine-${index}:
- bootMACAddress: ${boot_mac}
- bmcAddress: ipmi://${ipmi_host}:${ipmi_port}
- bmcUsername: admin
- bmcPassword: password
+mkdir -p build/site/${site}
+cat <<EOF >build/site/${site}/machine-${index}-values.yaml
+machineName: machine-${index}
+bootMACAddress: ${boot_mac}
+bmcAddress: ipmi://${ipmi_host}:${ipmi_port}
+bmcUsername: admin
+bmcPassword: password
EOF