function deploy {
export EXP_CLUSTER_RESOURCE_SET=true
- clusterctl init --infrastructure=metal3
+ clusterctl init --infrastructure=metal3:${CAPM3_VERSION}
}
function clean {
{{- end }}
EOF
sed -i -e 's/ name: flux-addon/ name: {{ $clusterName }}-flux-addon/' ${SCRIPTDIR}/templates/flux-addon.yaml
+
+ # PodSecurityPolicy is being replaced in future versions of K8s.
+ # The recommended practice is described by K8s at
+ # - https://kubernetes.io/docs/concepts/policy/pod-security-policy/#recommended-practice
+ # - https://kubernetes.io/docs/concepts/security/pod-security-standards/
+ # and provides three levels: privileged, baseline, and restricted.
+ #
+ # The question to answer here is how to reconcile the K8s levels
+ # against the Akraino security requirements.
+ #
+ # For the time being, the below populates the cluster with the K8s
+ # recommended levels and provides an additional policy (icn) bound
+ # to the system:authenticated group to meet the Akraino
+ # requirements.
+ cat <<EOF >${SCRIPTDIR}/addons/podsecurity.yaml
+---
+$(curl -sL https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/policy/privileged-psp.yaml)
+---
+$(curl -sL https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/policy/baseline-psp.yaml)
+---
+$(curl -sL https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/policy/restricted-psp.yaml)
+---
+$(curl -sL https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/policy/privileged-psp.yaml |
+ sed -e 's/ name: privileged/ name: icn/' |
+ sed -e '/^ allowedCapabilities:/,/^ [!-]/d')
+ allowedCapabilities:
+ - 'NET_ADMIN'
+ - 'SYS_ADMIN'
+ - 'SYS_NICE'
+ - 'SYS_PTRACE'
+ requiredDropCapabilities:
+ - 'NET_RAW'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: psp:privileged
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: psp:baseline
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - policy
+ resourceNames:
+ - baseline
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: psp:icn
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - policy
+ resourceNames:
+ - icn
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: psp:restricted
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+ - policy
+ resourceNames:
+ - restricted
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: psp:privileged:nodes
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+subjects:
+- kind: Group
+ name: system:nodes
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: psp:privileged:kube-system
+ namespace: kube-system
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+subjects:
+- kind: Group
+ name: system:serviceaccounts:kube-system
+ apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: psp:icn:any
+roleRef:
+ kind: ClusterRole
+ name: psp:icn
+ apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: Group
+ name: system:authenticated
+ apiGroup: rbac.authorization.k8s.io
+EOF
+ cat <<EOF >${SCRIPTDIR}/templates/podsecurity-addon.yaml
+{{- range \$clusterName, \$cluster := .Values.clusters }}
+---
+$(kubectl create configmap podsecurity-addon --from-file=${SCRIPTDIR}/addons/podsecurity.yaml -o yaml --dry-run=client)
+{{- end }}
+EOF
+ sed -i -e 's/ name: podsecurity-addon/ name: {{ $clusterName }}-podsecurity-addon/' ${SCRIPTDIR}/templates/podsecurity-addon.yaml
+
}
case $1 in
# The user account created in all the machines.
userData:
name: ubuntu
- # mkpasswd --method=SHA-512 --rounds 4096 "mypasswd"
- hashedPassword: $6$rounds=4096$acxyX2VqfHJSAc2$sgVf5uTHHPCX6u50NHnJmhIoqbcL9J12jlBAaWKvd3w8uYO0iXgcBrEhtvHLgSGU7dcU.eqm9JwXEYbbRjPAi1
+ # mkpasswd --method=SHA-512 --rounds 10000 "mypasswd"
+ hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
# This key will also be authorized to login as the root user
sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
--- /dev/null
+#!/usr/bin/env bash
+set -eux -o pipefail
+
+# Remove visibility of /version
+kubectl --kubeconfig=/etc/kubernetes/admin.conf replace -f - <<EOF
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "false"
+ labels:
+ kubernetes.io/bootstrapping: rbac-defaults
+ name: system:public-info-viewer
+rules:
+- nonResourceURLs:
+ - /healthz
+ - /livez
+ - /readyz
+ verbs:
+ - get
+EOF
+
+# Opt out of automatic mounting of SA token
+kubectl --kubeconfig=/etc/kubernetes/admin.conf replace -f - <<EOF
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: default
+automountServiceAccountToken: false
+EOF
--- /dev/null
+#!/usr/bin/env bash
+set -eux -o pipefail
+
+function append {
+ local -r line=$1
+ local -r file=$2
+ if [[ $(grep -c "${line}" ${file}) == 0 ]]; then
+ echo "${line}" >>${file}
+ fi
+}
+
+function replace_or_append {
+ local -r pattern=$1
+ local -r line=$2
+ local -r file=$3
+ sed -i -E '/'"${pattern}"'/ s/.*/'"${line}"'/w /tmp/changelog.txt' ${file}
+ if [[ ! -s /tmp/changelog.txt ]]; then
+ echo "${line}" >>${file}
+ fi
+}
+
+function replace_or_insert_before {
+ local -r pattern=$1
+ local -r line=$2
+ local -r before=$3
+ local -r file=$4
+ sed -i -E '/'"${pattern}"'/ s/.*/'"${line}"'/w /tmp/changelog.txt' ${file}
+ if [[ ! -s /tmp/changelog.txt ]]; then
+ cp ${file} ${file}.bak
+ awk '/'"${before}"'/ {print "'"${line}"'"}1' ${file}.bak >${file}
+ rm ${file}.bak
+ fi
+}
+
+function replace_or_insert_after {
+ local -r pattern=$1
+ local -r line=$2
+ local -r after=$3
+ local -r file=$4
+ sed -i -E '/'"${pattern}"'/ s/.*/'"${line}"'/w /tmp/changelog.txt' ${file}
+ if [[ ! -s /tmp/changelog.txt ]]; then
+ cp ${file} ${file}.bak
+ awk '/'"${after}"'/ {print; print "'"${line}"'"; next}1' ${file}.bak >${file}
+ rm ${file}.bak
+ fi
+}
+
+# Check for GRUB boot password
+# Set user and password in GRUB configuration
+# Password hash generated with grub-mkpasswd-pbkdf2, password: root
+# TODO This is currently disabled as it interferes with the reboot in set_kernel_cmdline.sh
+# cat <<END >>/etc/grub.d/00_header
+# cat <<EOF
+# set superusers="root"
+# password_pbkdf2 root grub.pbkdf2.sha512.10000.E4F52CBE09DFC3C338A314E9EDC8AA682BB2832A35FF2FF9E1D12D30EB3D58E9DDE023F88B8A82CD7BF5FC8138500CD0E67174EBA6EFACF98635A693C5AD4BB9.BB41DC42C8E2C68723B94F14F5F1E43845054A7D443C80F074E9B41C44927FEA2832B0E23C83E6B7C5E1D740B67756FA3093DA9A99B2E461A20F4831BBB289AF
+# EOF
+# END
+# update-grub
+
+# Check password hashing methods
+# Check /etc PAM and configure algorithm rounds
+sed -i -E 's/^(password\s+.*sha512)$/\1 rounds=10000/' /etc/pam.d/common-password
+echo "Passwords in /etc/shadow must be encrypted with new values"
+
+# Check group password hashing rounds
+# Configure minimum encryption algorithm rounds in /etc/login.defs
+replace_or_insert_after '^\s*SHA_CRYPT_MIN_ROUNDS\s+' 'SHA_CRYPT_MIN_ROUNDS 10000' '^#\s+SHA_CRYPT_MIN_ROUNDS' /etc/login.defs
+# Configure maximum encryption algorithm rounds in /etc/login.defs
+replace_or_insert_after '^\s*SHA_CRYPT_MAX_ROUNDS\s+' 'SHA_CRYPT_MAX_ROUNDS 10000' '^#\s+SHA_CRYPT_MAX_ROUNDS' /etc/login.defs
+
+# Checking user password aging
+# Set PASS_MAX_DAYS option in /etc/login.defs
+# PASS_MAX_DAYS of 99999 is considered unconfigued by lynis
+replace_or_insert_before '^\s*PASS_MAX_DAYS\s+' 'PASS_MAX_DAYS 99000' '^PASS_MIN_DAYS' /etc/login.defs
+
+# Default umask values
+# Set default umask in /etc/login.defs to more strict
+replace_or_append '^\s*UMASK\s+' 'UMASK 027' /etc/login.defs
+
+# Check for presence of USBGuard
+# Ensure USBGuard is installed
+apt-get -y install usbguard
+# TODO USB hubs and HID device must be enabled for BMC Console Redirection
+# Authorize USB hubs in USBGuard daemon
+append 'allow with-interface equals { 09:00:\* }' /etc/usbguard/rules.conf
+# Authorize multi-function Human Interface Devices
+append 'allow with-interface equals { 03:\*:\* 03:\*:\* }' /etc/usbguard/rules.conf
+# Set PresentControllerPolicy to apply-policy in USBGuard daemon
+sed -i -E 's/^PresentControllerPolicy\s*=\s*keep/PresentControllerPolicy=apply-policy/' /etc/usbguard/usbguard-daemon.conf
+chmod 0600 /etc/usbguard/rules.conf
+systemctl restart usbguard
+
+# Checking for debsums utility
+# Install debsums utility
+apt-get -y install debsums
+
+# Check SSH specific defined options
+# Disable AllowTcpForwarding
+replace_or_append '^\s*AllowTcpForwarding\s+' 'AllowTcpForwarding no' /etc/ssh/sshd_config
+# Set ClientAliveCountMax to 2
+replace_or_append '^\s*ClientAliveCountMax\s+' 'ClientAliveCountMax 2' /etc/ssh/sshd_config
+# Set MaxAuthTries to 3
+replace_or_append '^\s*MaxAuthTries\s+' 'MaxAuthTries 3' /etc/ssh/sshd_config
+# Set MaxSessions to 2
+# TODO MaxSessions of 2 prevents lynis from running under bluval
+replace_or_append '^\s*MaxSessions\s+' 'MaxSessions 10' /etc/ssh/sshd_config
+# Set server Port to 2222
+# TODO lynis, etc. robot files need to be updated to handle a different port
+replace_or_append '^\s*Port\s+' 'Port 22' /etc/ssh/sshd_config
+# Set client Port to 2222
+# TODO lynis, etc. robot files need to be updated to handle a different port
+replace_or_append '^\s*Port\s+' ' Port 22' /etc/ssh/ssh_config
+# Disable TCPKeepAlive
+replace_or_append '^\s*TCPKeepAlive\s+' 'TCPKeepAlive no' /etc/ssh/sshd_config
+# Restrict SSH to administrators
+replace_or_append '^\s*AllowGroups\s+' 'AllowGroups root sudo' /etc/ssh/sshd_config
+# Restart SSH
+systemctl restart ssh
+
+# Check sysctl key pairs in scan profile
+cat <<EOF >/etc/sysctl.d/99-zzz-icn.conf
+fs.suid_dumpable = 0
+kernel.core_uses_pid = 1
+kernel.dmesg_restrict = 1
+kernel.kptr_restrict = 2
+kernel.sysrq = 0
+net.ipv4.conf.all.accept_redirects = 0
+# TODO forwarding required by k8s
+# net.ipv4.conf.all.forwarding = 0
+net.ipv4.conf.all.log_martians = 1
+net.ipv4.conf.all.rp_filter = 1
+net.ipv4.conf.all.send_redirects = 0
+net.ipv4.conf.default.accept_redirects = 0
+net.ipv4.conf.default.accept_source_route = 0
+net.ipv4.conf.default.log_martians = 1
+net.ipv6.conf.all.accept_redirects = 0
+net.ipv6.conf.default.accept_redirects = 0
+EOF
+sysctl --system
+
+# Check compiler permissions
+# Uninstall compilers
+apt-get -y remove gcc binutils
# The user account created in all the machines.
userData:
name: ubuntu
- # mkpasswd --method=SHA-512 --rounds 4096 "mypasswd"
- hashedPassword: $6$rounds=4096$acxyX2VqfHJSAc2$sgVf5uTHHPCX6u50NHnJmhIoqbcL9J12jlBAaWKvd3w8uYO0iXgcBrEhtvHLgSGU7dcU.eqm9JwXEYbbRjPAi1
+ # mkpasswd --method=SHA-512 --rounds 10000 "mypasswd"
+ hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
# This key will also be authorized to login as the root user
sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
- name: {{ $clusterName }}-flux-addon
kind: ConfigMap
{{- end }}
+ - name: {{ $clusterName }}-podsecurity-addon
+ kind: ConfigMap
{{- end }}
- apt-get install -y kubelet={{ $cluster.kubeVersion }} kubeadm={{ $cluster.kubeVersion }} kubectl={{ $cluster.kubeVersion }}
- systemctl enable --now kubelet
postKubeadmCommands:
+ - /usr/local/bin/harden_os.sh
# This must be done after kubeadm as the cabpk provider relies
# on files in /var/run, which won't persist after a reboot
- /usr/local/bin/set_kernel_cmdline.sh
- path: /etc/systemd/system/containerd.service.d/override.conf
content: |
{{ $.Files.Get "resources/override.conf" | indent 10 }}
+ - path: /usr/local/bin/harden_os.sh
+ permissions: '0777'
+ content: |
+{{ $.Files.Get "resources/harden_os.sh" | indent 10 }}
- path: /usr/local/bin/set_kernel_cmdline.sh
permissions: '0777'
content: |
sshAuthorizedKeys:
- {{ $cluster.userData.sshAuthorizedKey }}
sudo: "ALL=(ALL) NOPASSWD:ALL"
+ groups: sudo # Necessary to allow SSH logins (see /etc/ssh/sshd_config)
- name: root
sshAuthorizedKeys:
- {{ $cluster.userData.sshAuthorizedKey }}
name: {{ $clusterName }}
spec:
kubeadmConfigSpec:
+ clusterConfiguration:
+ apiServer:
+ extraArgs:
+ enable-admission-plugins: NodeRestriction,PodSecurityPolicy
initConfiguration:
nodeRegistration:
kubeletExtraArgs:
- mkdir -p /home/ubuntu/.kube
- cp /etc/kubernetes/admin.conf /home/ubuntu/.kube/config
- chown ubuntu:ubuntu /home/ubuntu/.kube/config
+ - mkdir -p /root/.kube
+ - cp /etc/kubernetes/admin.conf /root/.kube/config
+ - /usr/local/bin/harden_os.sh
+ # Normally any bootstrap resources needed would be applied with a
+ # ClusterResourceSet. However instead of apply, replace must be
+ # used to harden K8s.
+ - /usr/local/bin/harden_k8s.sh
# This must be done after kubeadm as the cabpk provider relies on
# files in /var/run, which won't persist after a reboot
- /usr/local/bin/set_kernel_cmdline.sh
- path: /etc/systemd/system/containerd.service.d/override.conf
content: |
{{ $.Files.Get "resources/override.conf" | indent 8 }}
+ - path: /usr/local/bin/harden_os.sh
+ permissions: '0777'
+ content: |
+{{ $.Files.Get "resources/harden_os.sh" | indent 8 }}
+ - path: /usr/local/bin/harden_k8s.sh
+ permissions: '0777'
+ content: |
+{{ $.Files.Get "resources/harden_k8s.sh" | indent 8 }}
- path: /usr/local/bin/set_kernel_cmdline.sh
permissions: '0777'
content: |
sshAuthorizedKeys:
- {{ $cluster.userData.sshAuthorizedKey }}
sudo: "ALL=(ALL) NOPASSWD:ALL"
+ groups: sudo # Necessary to allow SSH logins (see /etc/ssh/sshd_config)
- name: root
sshAuthorizedKeys:
- {{ $cluster.userData.sshAuthorizedKey }}
--- /dev/null
+{{- range $clusterName, $cluster := .Values.clusters }}
+---
+apiVersion: v1
+data:
+ podsecurity.yaml: |
+ ---
+ apiVersion: policy/v1beta1
+ kind: PodSecurityPolicy
+ metadata:
+ name: privileged
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+ spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ allowedCapabilities:
+ - '*'
+ volumes:
+ - '*'
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ hostIPC: true
+ hostPID: true
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+ ---
+ apiVersion: policy/v1beta1
+ kind: PodSecurityPolicy
+ metadata:
+ name: baseline
+ annotations:
+ # Optional: Allow the default AppArmor profile, requires setting the default.
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+ spec:
+ privileged: false
+ # The moby default capability set, minus NET_RAW
+ allowedCapabilities:
+ - 'CHOWN'
+ - 'DAC_OVERRIDE'
+ - 'FSETID'
+ - 'FOWNER'
+ - 'MKNOD'
+ - 'SETGID'
+ - 'SETUID'
+ - 'SETFCAP'
+ - 'SETPCAP'
+ - 'NET_BIND_SERVICE'
+ - 'SYS_CHROOT'
+ - 'KILL'
+ - 'AUDIT_WRITE'
+ # Allow all volume types except hostpath
+ volumes:
+ # 'core' volume types
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ # Assume that ephemeral CSI drivers & persistentVolumes set up by the cluster admin are safe to use.
+ - 'csi'
+ - 'persistentVolumeClaim'
+ - 'ephemeral'
+ # Allow all other non-hostpath volume types.
+ - 'awsElasticBlockStore'
+ - 'azureDisk'
+ - 'azureFile'
+ - 'cephFS'
+ - 'cinder'
+ - 'fc'
+ - 'flexVolume'
+ - 'flocker'
+ - 'gcePersistentDisk'
+ - 'gitRepo'
+ - 'glusterfs'
+ - 'iscsi'
+ - 'nfs'
+ - 'photonPersistentDisk'
+ - 'portworxVolume'
+ - 'quobyte'
+ - 'rbd'
+ - 'scaleIO'
+ - 'storageos'
+ - 'vsphereVolume'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ readOnlyRootFilesystem: false
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ # This policy assumes the nodes are using AppArmor rather than SELinux.
+ # The PSP SELinux API cannot express the SELinux Pod Security Standards,
+ # so if using SELinux, you must choose a more restrictive default.
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+ ---
+ apiVersion: policy/v1beta1
+ kind: PodSecurityPolicy
+ metadata:
+ name: restricted
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
+ apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+ apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
+ spec:
+ privileged: false
+ # Required to prevent escalations to root.
+ allowPrivilegeEscalation: false
+ requiredDropCapabilities:
+ - ALL
+ # Allow core volume types.
+ volumes:
+ - 'configMap'
+ - 'emptyDir'
+ - 'projected'
+ - 'secret'
+ - 'downwardAPI'
+ # Assume that ephemeral CSI drivers & persistentVolumes set up by the cluster admin are safe to use.
+ - 'csi'
+ - 'persistentVolumeClaim'
+ - 'ephemeral'
+ hostNetwork: false
+ hostIPC: false
+ hostPID: false
+ runAsUser:
+ # Require the container to run without root privileges.
+ rule: 'MustRunAsNonRoot'
+ seLinux:
+ # This policy assumes the nodes are using AppArmor rather than SELinux.
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ fsGroup:
+ rule: 'MustRunAs'
+ ranges:
+ # Forbid adding the root group.
+ - min: 1
+ max: 65535
+ readOnlyRootFilesystem: false
+ ---
+ apiVersion: policy/v1beta1
+ kind: PodSecurityPolicy
+ metadata:
+ name: icn
+ annotations:
+ seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+ spec:
+ privileged: true
+ allowPrivilegeEscalation: true
+ volumes:
+ - '*'
+ hostNetwork: true
+ hostPorts:
+ - min: 0
+ max: 65535
+ hostIPC: true
+ hostPID: true
+ runAsUser:
+ rule: 'RunAsAny'
+ seLinux:
+ rule: 'RunAsAny'
+ supplementalGroups:
+ rule: 'RunAsAny'
+ fsGroup:
+ rule: 'RunAsAny'
+ allowedCapabilities:
+ - 'NET_ADMIN'
+ - 'SYS_ADMIN'
+ - 'SYS_NICE'
+ - 'SYS_PTRACE'
+ requiredDropCapabilities:
+ - 'NET_RAW'
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: psp:privileged
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+ rules:
+ - apiGroups:
+ - policy
+ resourceNames:
+ - privileged
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: psp:baseline
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+ rules:
+ - apiGroups:
+ - policy
+ resourceNames:
+ - baseline
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: psp:icn
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+ rules:
+ - apiGroups:
+ - policy
+ resourceNames:
+ - icn
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRole
+ metadata:
+ name: psp:restricted
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+ rules:
+ - apiGroups:
+ - policy
+ resourceNames:
+ - restricted
+ resources:
+ - podsecuritypolicies
+ verbs:
+ - use
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: psp:privileged:nodes
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+ subjects:
+ - kind: Group
+ name: system:nodes
+ apiGroup: rbac.authorization.k8s.io
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: RoleBinding
+ metadata:
+ name: psp:privileged:kube-system
+ namespace: kube-system
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: psp:privileged
+ subjects:
+ - kind: Group
+ name: system:serviceaccounts:kube-system
+ apiGroup: rbac.authorization.k8s.io
+ ---
+ apiVersion: rbac.authorization.k8s.io/v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: psp:icn:any
+ roleRef:
+ kind: ClusterRole
+ name: psp:icn
+ apiGroup: rbac.authorization.k8s.io
+ subjects:
+ - kind: Group
+ name: system:authenticated
+ apiGroup: rbac.authorization.k8s.io
+kind: ConfigMap
+metadata:
+ creationTimestamp: null
+ name: {{ $clusterName }}-podsecurity-addon
+{{- end }}
kustomize build ${BUILDDIR}/webhook/base | KUBECONFIG=${cluster_kubeconfig} kubectl delete -f -
}
+function is_kata_deployed {
+ local -r cluster_name=${CLUSTER_NAME:-e2etest}
+ local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
+ kubectl --kubeconfig=${cluster_kubeconfig} get runtimeclass/kata-qemu
+}
+
function test_kata {
# Create a temporary kubeconfig file for the tests
local -r cluster_name=${CLUSTER_NAME:-e2etest}
local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
clusterctl -n metal3 get kubeconfig ${cluster_name} >${cluster_kubeconfig}
+ # Ensure that Kata has been deployed first
+ wait_for is_kata_deployed
+
deploy_webhook ${cluster_name}
clone_kud_repository
pushd ${KUDPATH}/kud/tests
# Optional
userData:
name: ubuntu
- hashedPassword: $6$rounds=4096$acxyX2VqfHJSAc2$sgVf5uTHHPCX6u50NHnJmhIoqbcL9J12jlBAaWKvd3w8uYO0iXgcBrEhtvHLgSGU7dcU.eqm9JwXEYbbRjPAi1
+ hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
fqdn: machine-1.akraino.icn.org
--- /dev/null
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
--- /dev/null
+# A simple chart to create a RoleBinding of a Namespace to a
+# PodSecurityPolicy. This can be used with the Flux "dependsOn"
+# feature to install the binding before deploying additional
+# HelmReleases into the Namespace.
+apiVersion: v2
+name: podsecurity
+type: application
+version: 0.1.0
--- /dev/null
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: {{ .Values.roleRef }}-{{ .Release.Namespace }}
+ namespace: {{ .Release.Namespace }}
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: {{ .Values.roleRef }}
+subjects:
+- kind: Group
+ name: system:serviceaccounts:{{ .Release.Namespace }}
+ apiGroup: rbac.authorization.k8s.io
--- /dev/null
+# Under ICN, roleRef may be psp:restricted, psp:baseline, or
+# psp:privileged
+roleRef: psp:baseline
interface: ens5
userData:
name: ubuntu
- hashedPassword: $6$rounds=4096$acxyX2VqfHJSAc2$sgVf5uTHHPCX6u50NHnJmhIoqbcL9J12jlBAaWKvd3w8uYO0iXgcBrEhtvHLgSGU7dcU.eqm9JwXEYbbRjPAi1
+ hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
flux:
repositoryName: icn
targetNamespace: kud
install:
createNamespace: true
+ dependsOn:
+ - name: kud-podsecurity
targetNamespace: kud
install:
createNamespace: true
+ dependsOn:
+ - name: kud-podsecurity
targetNamespace: emco
install:
createNamespace: true
+ dependsOn:
+ - name: emco-podsecurity
values:
global:
repository: integratedcloudnative/
--- /dev/null
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: emco-podsecurity
+ namespace: flux-system
+spec:
+ interval: 5m
+ chart:
+ spec:
+ chart: deploy/podsecurity
+ sourceRef:
+ kind: GitRepository
+ name: icn
+ namespace: flux-system
+ interval: 1m
+ releaseName: emco-podsecurity
+ targetNamespace: emco
+ install:
+ createNamespace: true
+ values:
+ roleRef: psp:privileged
targetNamespace: emco
install:
createNamespace: true
+ dependsOn:
+ - name: emco-podsecurity
values:
global:
repository: integratedcloudnative/
targetNamespace: kud
install:
createNamespace: true
+ dependsOn:
+ - name: kud-podsecurity
--- /dev/null
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+ name: kud-podsecurity
+ namespace: flux-system
+spec:
+ interval: 5m
+ chart:
+ spec:
+ chart: deploy/podsecurity
+ sourceRef:
+ kind: GitRepository
+ name: icn
+ namespace: flux-system
+ interval: 1m
+ releaseName: kud-podsecurity
+ targetNamespace: kud
+ install:
+ createNamespace: true
+ values:
+ roleRef: psp:privileged
targetNamespace: kud
install:
createNamespace: true
+ dependsOn:
+ - name: kud-podsecurity
targetNamespace: kud
install:
createNamespace: true
+ dependsOn:
+ - name: kud-podsecurity
targetNamespace: kud
install:
createNamespace: true
+ dependsOn:
+ - name: kud-podsecurity
targetNamespace: kud
install:
createNamespace: true
+ dependsOn:
+ - name: kud-podsecurity
targetNamespace: kud
install:
createNamespace: true
+ dependsOn:
+ - name: kud-podsecurity
[[ $(kubectl -n metal3 get cluster e2etest -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') == "True" ]]
}
-function are_kustomizations_ready {
- [[ $(kubectl --kubeconfig=${BUILDDIR}/e2etest-admin.conf get Kustomization -n flux-system -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c -v True) == 0 ]]
-}
-
-function are_helmreleases_ready {
- [[ $(kubectl --kubeconfig=${BUILDDIR}/e2etest-admin.conf get HelmRelease -n flux-system -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c -v True) == 0 ]]
+function is_control_plane_ready {
+ [[ $(kubectl --kubeconfig=${BUILDDIR}/e2etest-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c -v True) == 0 ]]
}
function wait_for_all_ready {
wait_for is_cluster_ready
clusterctl -n metal3 get kubeconfig e2etest >${BUILDDIR}/e2etest-admin.conf
chmod 600 ${BUILDDIR}/e2etest-admin.conf
- # TODO The following checks are not ideal: resources created by
- # operators aren't detected here, but this is the best that can be
- # currently done
- WAIT_FOR_INTERVAL=30s
- wait_for are_kustomizations_ready
- wait_for are_helmreleases_ready
+ wait_for is_control_plane_ready
}
case $1 in
#Cluster API version to use
CAPI_VERSION="v0.4.3"
+#Cluster API version to use
+CAPM3_VERSION="v0.5.1"
+
#The flux version to use
FLUX_VERSION="0.20.0"