Merge "Temporarily remove kubevirt from addons self-test"
authorKuralamudhan Ramakrishnan <kuralamudhan.ramakrishnan@intel.com>
Tue, 7 Dec 2021 00:28:23 +0000 (00:28 +0000)
committerGerrit Code Review <gerrit@akraino.org>
Tue, 7 Dec 2021 00:28:23 +0000 (00:28 +0000)
31 files changed:
deploy/cluster-api/cluster-api.sh
deploy/clusters/clusters.sh
deploy/clusters/ha-dhcp-values.yaml
deploy/clusters/resources/harden_k8s.sh [new file with mode: 0644]
deploy/clusters/resources/harden_os.sh [new file with mode: 0644]
deploy/clusters/static-values.yaml
deploy/clusters/templates/clusterresourceset.yaml
deploy/clusters/templates/kubeadmconfigtemplate.yaml
deploy/clusters/templates/kubeadmcontrolplane.yaml
deploy/clusters/templates/podsecurity-addon.yaml [new file with mode: 0644]
deploy/kata/kata.sh
deploy/machines/example-values.yaml
deploy/podsecurity/.helmignore [new file with mode: 0644]
deploy/podsecurity/Chart.yaml [new file with mode: 0644]
deploy/podsecurity/templates/rolebinding.yaml [new file with mode: 0644]
deploy/podsecurity/values.yaml [new file with mode: 0644]
deploy/site/vm/clusters-values.yaml
deploy/site/vm/e2etest/cdi-operator-release.yaml
deploy/site/vm/e2etest/cpu-manager-release.yaml
deploy/site/vm/e2etest/emco-db-release.yaml
deploy/site/vm/e2etest/emco-podsecurity-release.yaml [new file with mode: 0644]
deploy/site/vm/e2etest/emco-tools-release.yaml
deploy/site/vm/e2etest/kubevirt-operator-release.yaml
deploy/site/vm/e2etest/kud-podsecurity.yaml [new file with mode: 0644]
deploy/site/vm/e2etest/multus-cni-release.yaml
deploy/site/vm/e2etest/node-feature-discovery-release.yaml
deploy/site/vm/e2etest/ovn4nfv-release.yaml
deploy/site/vm/e2etest/qat-device-plugin-release.yaml
deploy/site/vm/e2etest/sriov-network-operator-release.yaml
deploy/site/vm/vm.sh
env/lib/common.sh

index f1e48d9..b1f533c 100755 (executable)
@@ -9,7 +9,7 @@ source $LIBDIR/common.sh
 
 function deploy {
     export EXP_CLUSTER_RESOURCE_SET=true
-    clusterctl init --infrastructure=metal3
+    clusterctl init --infrastructure=metal3:${CAPM3_VERSION}
 }
 
 function clean {
index b10d2ca..73f63d8 100755 (executable)
@@ -68,6 +68,154 @@ $(kubectl create configmap flux-addon --from-file=${SCRIPTDIR}/addons/flux-syste
 {{- end }}
 EOF
     sed -i -e 's/  name: flux-addon/  name: {{ $clusterName }}-flux-addon/' ${SCRIPTDIR}/templates/flux-addon.yaml
+
+    # PodSecurityPolicy is being replaced in future versions of K8s.
+    # The recommended practice is described by K8s at
+    # - https://kubernetes.io/docs/concepts/policy/pod-security-policy/#recommended-practice
+    # - https://kubernetes.io/docs/concepts/security/pod-security-standards/
+    # and provides three levels: privileged, baseline, and restricted.
+    #
+    # The question to answer here is how to reconcile the K8s levels
+    # against the Akraino security requirements.
+    #
+    # For the time being, the below populates the cluster with the K8s
+    # recommended levels and provides an additional policy (icn) bound
+    # to the system:authenticated group to meet the Akraino
+    # requirements.
+    cat <<EOF >${SCRIPTDIR}/addons/podsecurity.yaml
+---
+$(curl -sL https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/policy/privileged-psp.yaml)
+---
+$(curl -sL https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/policy/baseline-psp.yaml)
+---
+$(curl -sL https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/policy/restricted-psp.yaml)
+---
+$(curl -sL https://raw.githubusercontent.com/kubernetes/website/main/content/en/examples/policy/privileged-psp.yaml |
+  sed -e 's/  name: privileged/  name: icn/' |
+  sed -e '/^  allowedCapabilities:/,/^  [!-]/d')
+  allowedCapabilities:
+    - 'NET_ADMIN'
+    - 'SYS_ADMIN'
+    - 'SYS_NICE'
+    - 'SYS_PTRACE'
+  requiredDropCapabilities:
+    - 'NET_RAW'
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: psp:privileged
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+  - policy
+  resourceNames:
+  - privileged
+  resources:
+  - podsecuritypolicies
+  verbs:
+  - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: psp:baseline
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+  - policy
+  resourceNames:
+  - baseline
+  resources:
+  - podsecuritypolicies
+  verbs:
+  - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: psp:icn
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+  - policy
+  resourceNames:
+  - icn
+  resources:
+  - podsecuritypolicies
+  verbs:
+  - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  name: psp:restricted
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+rules:
+- apiGroups:
+  - policy
+  resourceNames:
+  - restricted
+  resources:
+  - podsecuritypolicies
+  verbs:
+  - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: psp:privileged:nodes
+  namespace: kube-system
+  labels:
+    addonmanager.kubernetes.io/mode: Reconcile
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:privileged
+subjects:
+- kind: Group
+  name: system:nodes
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: psp:privileged:kube-system
+  namespace: kube-system
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: psp:privileged
+subjects:
+- kind: Group
+  name: system:serviceaccounts:kube-system
+  apiGroup: rbac.authorization.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: psp:icn:any
+roleRef:
+  kind: ClusterRole
+  name: psp:icn
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: Group
+  name: system:authenticated
+  apiGroup: rbac.authorization.k8s.io
+EOF
+    cat <<EOF >${SCRIPTDIR}/templates/podsecurity-addon.yaml
+{{- range \$clusterName, \$cluster := .Values.clusters }}
+---
+$(kubectl create configmap podsecurity-addon --from-file=${SCRIPTDIR}/addons/podsecurity.yaml -o yaml --dry-run=client)
+{{- end }}
+EOF
+    sed -i -e 's/  name: podsecurity-addon/  name: {{ $clusterName }}-podsecurity-addon/' ${SCRIPTDIR}/templates/podsecurity-addon.yaml
+
 }
 
 case $1 in
index b001dad..e36ad63 100644 (file)
@@ -39,8 +39,8 @@ clusters:
     # The user account created in all the machines.
     userData:
       name: ubuntu
-      # mkpasswd --method=SHA-512 --rounds 4096 "mypasswd"
-      hashedPassword: $6$rounds=4096$acxyX2VqfHJSAc2$sgVf5uTHHPCX6u50NHnJmhIoqbcL9J12jlBAaWKvd3w8uYO0iXgcBrEhtvHLgSGU7dcU.eqm9JwXEYbbRjPAi1
+      # mkpasswd --method=SHA-512 --rounds 10000 "mypasswd"
+      hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
       # This key will also be authorized to login as the root user
       sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
 
diff --git a/deploy/clusters/resources/harden_k8s.sh b/deploy/clusters/resources/harden_k8s.sh
new file mode 100644 (file)
index 0000000..7c7780b
--- /dev/null
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+set -eux -o pipefail
+
+# Remove visibility of /version
+kubectl --kubeconfig=/etc/kubernetes/admin.conf replace -f - <<EOF
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  annotations:
+    rbac.authorization.kubernetes.io/autoupdate: "false"
+  labels:
+    kubernetes.io/bootstrapping: rbac-defaults
+  name: system:public-info-viewer
+rules:
+- nonResourceURLs:
+  - /healthz
+  - /livez
+  - /readyz
+  verbs:
+  - get
+EOF
+
+# Opt out of automatic mounting of SA token
+kubectl --kubeconfig=/etc/kubernetes/admin.conf replace -f - <<EOF
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: default
+automountServiceAccountToken: false
+EOF
diff --git a/deploy/clusters/resources/harden_os.sh b/deploy/clusters/resources/harden_os.sh
new file mode 100644 (file)
index 0000000..8af1893
--- /dev/null
@@ -0,0 +1,143 @@
+#!/usr/bin/env bash
+set -eux -o pipefail
+
+function append {
+    local -r line=$1
+    local -r file=$2
+    if [[ $(grep -c "${line}" ${file}) == 0 ]]; then
+       echo "${line}" >>${file}
+    fi
+}
+
+function replace_or_append {
+    local -r pattern=$1
+    local -r line=$2
+    local -r file=$3
+    sed -i -E '/'"${pattern}"'/ s/.*/'"${line}"'/w /tmp/changelog.txt' ${file}
+    if [[ ! -s /tmp/changelog.txt ]]; then
+       echo "${line}" >>${file}
+    fi
+}
+
+function replace_or_insert_before {
+    local -r pattern=$1
+    local -r line=$2
+    local -r before=$3
+    local -r file=$4
+    sed -i -E '/'"${pattern}"'/ s/.*/'"${line}"'/w /tmp/changelog.txt' ${file}
+    if [[ ! -s /tmp/changelog.txt ]]; then
+       cp ${file} ${file}.bak
+       awk '/'"${before}"'/ {print "'"${line}"'"}1' ${file}.bak >${file}
+       rm ${file}.bak
+    fi
+}
+
+function replace_or_insert_after {
+    local -r pattern=$1
+    local -r line=$2
+    local -r after=$3
+    local -r file=$4
+    sed -i -E '/'"${pattern}"'/ s/.*/'"${line}"'/w /tmp/changelog.txt' ${file}
+    if [[ ! -s /tmp/changelog.txt ]]; then
+       cp ${file} ${file}.bak
+       awk '/'"${after}"'/ {print; print "'"${line}"'"; next}1' ${file}.bak >${file}
+       rm ${file}.bak
+    fi
+}
+
+# Check for GRUB boot password
+# Set user and password in GRUB configuration
+# Password hash generated with grub-mkpasswd-pbkdf2, password: root
+# TODO This is currently disabled as it interferes with the reboot in set_kernel_cmdline.sh
+# cat <<END >>/etc/grub.d/00_header
+# cat <<EOF
+# set superusers="root"
+# password_pbkdf2 root grub.pbkdf2.sha512.10000.E4F52CBE09DFC3C338A314E9EDC8AA682BB2832A35FF2FF9E1D12D30EB3D58E9DDE023F88B8A82CD7BF5FC8138500CD0E67174EBA6EFACF98635A693C5AD4BB9.BB41DC42C8E2C68723B94F14F5F1E43845054A7D443C80F074E9B41C44927FEA2832B0E23C83E6B7C5E1D740B67756FA3093DA9A99B2E461A20F4831BBB289AF
+# EOF
+# END
+# update-grub
+
+# Check password hashing methods
+# Check /etc PAM and configure algorithm rounds
+sed -i -E 's/^(password\s+.*sha512)$/\1 rounds=10000/' /etc/pam.d/common-password
+echo "Passwords in /etc/shadow must be encrypted with new values"
+
+# Check group password hashing rounds
+# Configure minimum encryption algorithm rounds in /etc/login.defs
+replace_or_insert_after '^\s*SHA_CRYPT_MIN_ROUNDS\s+' 'SHA_CRYPT_MIN_ROUNDS 10000' '^#\s+SHA_CRYPT_MIN_ROUNDS' /etc/login.defs
+# Configure maximum encryption algorithm rounds in /etc/login.defs
+replace_or_insert_after '^\s*SHA_CRYPT_MAX_ROUNDS\s+' 'SHA_CRYPT_MAX_ROUNDS 10000' '^#\s+SHA_CRYPT_MAX_ROUNDS' /etc/login.defs
+
+# Checking user password aging
+# Set PASS_MAX_DAYS option in /etc/login.defs
+# PASS_MAX_DAYS of 99999 is considered unconfigued by lynis
+replace_or_insert_before '^\s*PASS_MAX_DAYS\s+' 'PASS_MAX_DAYS 99000' '^PASS_MIN_DAYS' /etc/login.defs
+
+# Default umask values
+# Set default umask in /etc/login.defs to more strict
+replace_or_append '^\s*UMASK\s+' 'UMASK 027' /etc/login.defs
+
+# Check for presence of USBGuard
+# Ensure USBGuard is installed
+apt-get -y install usbguard
+# TODO USB hubs and HID device must be enabled for BMC Console Redirection
+# Authorize USB hubs in USBGuard daemon
+append 'allow with-interface equals { 09:00:\* }' /etc/usbguard/rules.conf
+# Authorize multi-function Human Interface Devices
+append 'allow with-interface equals { 03:\*:\* 03:\*:\* }' /etc/usbguard/rules.conf
+# Set PresentControllerPolicy to apply-policy in USBGuard daemon
+sed -i -E 's/^PresentControllerPolicy\s*=\s*keep/PresentControllerPolicy=apply-policy/' /etc/usbguard/usbguard-daemon.conf
+chmod 0600 /etc/usbguard/rules.conf
+systemctl restart usbguard
+
+# Checking for debsums utility
+# Install debsums utility
+apt-get -y install debsums
+
+# Check SSH specific defined options
+# Disable AllowTcpForwarding
+replace_or_append '^\s*AllowTcpForwarding\s+' 'AllowTcpForwarding no' /etc/ssh/sshd_config
+# Set ClientAliveCountMax to 2
+replace_or_append '^\s*ClientAliveCountMax\s+' 'ClientAliveCountMax 2' /etc/ssh/sshd_config
+# Set MaxAuthTries to 3
+replace_or_append '^\s*MaxAuthTries\s+' 'MaxAuthTries 3' /etc/ssh/sshd_config
+# Set MaxSessions to 2
+# TODO MaxSessions of 2 prevents lynis from running under bluval
+replace_or_append '^\s*MaxSessions\s+' 'MaxSessions 10' /etc/ssh/sshd_config
+# Set server Port to 2222
+# TODO lynis, etc. robot files need to be updated to handle a different port
+replace_or_append '^\s*Port\s+' 'Port 22' /etc/ssh/sshd_config
+# Set client Port to 2222
+# TODO lynis, etc. robot files need to be updated to handle a different port
+replace_or_append '^\s*Port\s+' '    Port 22' /etc/ssh/ssh_config
+# Disable TCPKeepAlive
+replace_or_append '^\s*TCPKeepAlive\s+' 'TCPKeepAlive no' /etc/ssh/sshd_config
+# Restrict SSH to administrators
+replace_or_append '^\s*AllowGroups\s+' 'AllowGroups root sudo' /etc/ssh/sshd_config
+# Restart SSH
+systemctl restart ssh
+
+# Check sysctl key pairs in scan profile
+cat <<EOF >/etc/sysctl.d/99-zzz-icn.conf
+fs.suid_dumpable = 0
+kernel.core_uses_pid = 1
+kernel.dmesg_restrict = 1
+kernel.kptr_restrict = 2
+kernel.sysrq = 0
+net.ipv4.conf.all.accept_redirects = 0
+# TODO forwarding required by k8s
+# net.ipv4.conf.all.forwarding = 0
+net.ipv4.conf.all.log_martians = 1
+net.ipv4.conf.all.rp_filter = 1
+net.ipv4.conf.all.send_redirects = 0
+net.ipv4.conf.default.accept_redirects = 0
+net.ipv4.conf.default.accept_source_route = 0
+net.ipv4.conf.default.log_martians = 1
+net.ipv6.conf.all.accept_redirects = 0
+net.ipv6.conf.default.accept_redirects = 0
+EOF
+sysctl --system
+
+# Check compiler permissions
+# Uninstall compilers
+apt-get -y remove gcc binutils
index 4df5373..f4ecfe0 100644 (file)
@@ -35,8 +35,8 @@ clusters:
     # The user account created in all the machines.
     userData:
       name: ubuntu
-      # mkpasswd --method=SHA-512 --rounds 4096 "mypasswd"
-      hashedPassword: $6$rounds=4096$acxyX2VqfHJSAc2$sgVf5uTHHPCX6u50NHnJmhIoqbcL9J12jlBAaWKvd3w8uYO0iXgcBrEhtvHLgSGU7dcU.eqm9JwXEYbbRjPAi1
+      # mkpasswd --method=SHA-512 --rounds 10000 "mypasswd"
+      hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
       # This key will also be authorized to login as the root user
       sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
 
index 5146d3a..d12e253 100644 (file)
@@ -17,4 +17,6 @@ spec:
   - name: {{ $clusterName }}-flux-addon
     kind: ConfigMap
 {{- end }}
+  - name: {{ $clusterName }}-podsecurity-addon
+    kind: ConfigMap
 {{- end }}
index 2bfc97d..741bf69 100644 (file)
@@ -31,6 +31,7 @@ spec:
       - apt-get install -y kubelet={{ $cluster.kubeVersion }} kubeadm={{ $cluster.kubeVersion }} kubectl={{ $cluster.kubeVersion }}
       - systemctl enable --now kubelet
       postKubeadmCommands:
+      - /usr/local/bin/harden_os.sh
       # This must be done after kubeadm as the cabpk provider relies
       # on files in /var/run, which won't persist after a reboot
       - /usr/local/bin/set_kernel_cmdline.sh
@@ -39,6 +40,10 @@ spec:
       - path: /etc/systemd/system/containerd.service.d/override.conf
         content: |
 {{ $.Files.Get "resources/override.conf" | indent 10 }}
+      - path: /usr/local/bin/harden_os.sh
+        permissions: '0777'
+        content: |
+{{ $.Files.Get "resources/harden_os.sh" | indent 10 }}
       - path: /usr/local/bin/set_kernel_cmdline.sh
         permissions: '0777'
         content: |
@@ -51,6 +56,7 @@ spec:
         sshAuthorizedKeys:
         - {{ $cluster.userData.sshAuthorizedKey }}
         sudo: "ALL=(ALL) NOPASSWD:ALL"
+        groups: sudo # Necessary to allow SSH logins (see /etc/ssh/sshd_config)
       - name: root
         sshAuthorizedKeys:
         - {{ $cluster.userData.sshAuthorizedKey }}
index a3881b6..99c8bef 100644 (file)
@@ -6,6 +6,10 @@ metadata:
   name: {{ $clusterName }}
 spec:
   kubeadmConfigSpec:
+    clusterConfiguration:
+      apiServer:
+        extraArgs:
+          enable-admission-plugins: NodeRestriction,PodSecurityPolicy
     initConfiguration:
       nodeRegistration:
         kubeletExtraArgs:
@@ -43,6 +47,13 @@ spec:
     - mkdir -p /home/ubuntu/.kube
     - cp /etc/kubernetes/admin.conf /home/ubuntu/.kube/config
     - chown ubuntu:ubuntu /home/ubuntu/.kube/config
+    - mkdir -p /root/.kube
+    - cp /etc/kubernetes/admin.conf /root/.kube/config
+    - /usr/local/bin/harden_os.sh
+    # Normally any bootstrap resources needed would be applied with a
+    # ClusterResourceSet.  However instead of apply, replace must be
+    # used to harden K8s.
+    - /usr/local/bin/harden_k8s.sh
     # This must be done after kubeadm as the cabpk provider relies on
     # files in /var/run, which won't persist after a reboot
     - /usr/local/bin/set_kernel_cmdline.sh
@@ -52,6 +63,14 @@ spec:
     - path: /etc/systemd/system/containerd.service.d/override.conf
       content: |
 {{ $.Files.Get "resources/override.conf" | indent 8 }}
+    - path: /usr/local/bin/harden_os.sh
+      permissions: '0777'
+      content: |
+{{ $.Files.Get "resources/harden_os.sh" | indent 8 }}
+    - path: /usr/local/bin/harden_k8s.sh
+      permissions: '0777'
+      content: |
+{{ $.Files.Get "resources/harden_k8s.sh" | indent 8 }}
     - path: /usr/local/bin/set_kernel_cmdline.sh
       permissions: '0777'
       content: |
@@ -64,6 +83,7 @@ spec:
       sshAuthorizedKeys:
       - {{ $cluster.userData.sshAuthorizedKey }}
       sudo: "ALL=(ALL) NOPASSWD:ALL"
+      groups: sudo # Necessary to allow SSH logins (see /etc/ssh/sshd_config)
     - name: root
       sshAuthorizedKeys:
       - {{ $cluster.userData.sshAuthorizedKey }}
diff --git a/deploy/clusters/templates/podsecurity-addon.yaml b/deploy/clusters/templates/podsecurity-addon.yaml
new file mode 100644 (file)
index 0000000..55b9607
--- /dev/null
@@ -0,0 +1,301 @@
+{{- range $clusterName, $cluster := .Values.clusters }}
+---
+apiVersion: v1
+data:
+  podsecurity.yaml: |
+    ---
+    apiVersion: policy/v1beta1
+    kind: PodSecurityPolicy
+    metadata:
+      name: privileged
+      annotations:
+        seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+    spec:
+      privileged: true
+      allowPrivilegeEscalation: true
+      allowedCapabilities:
+      - '*'
+      volumes:
+      - '*'
+      hostNetwork: true
+      hostPorts:
+      - min: 0
+        max: 65535
+      hostIPC: true
+      hostPID: true
+      runAsUser:
+        rule: 'RunAsAny'
+      seLinux:
+        rule: 'RunAsAny'
+      supplementalGroups:
+        rule: 'RunAsAny'
+      fsGroup:
+        rule: 'RunAsAny'
+    ---
+    apiVersion: policy/v1beta1
+    kind: PodSecurityPolicy
+    metadata:
+      name: baseline
+      annotations:
+        # Optional: Allow the default AppArmor profile, requires setting the default.
+        apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+        apparmor.security.beta.kubernetes.io/defaultProfileName:  'runtime/default'
+        seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+    spec:
+      privileged: false
+      # The moby default capability set, minus NET_RAW
+      allowedCapabilities:
+        - 'CHOWN'
+        - 'DAC_OVERRIDE'
+        - 'FSETID'
+        - 'FOWNER'
+        - 'MKNOD'
+        - 'SETGID'
+        - 'SETUID'
+        - 'SETFCAP'
+        - 'SETPCAP'
+        - 'NET_BIND_SERVICE'
+        - 'SYS_CHROOT'
+        - 'KILL'
+        - 'AUDIT_WRITE'
+      # Allow all volume types except hostpath
+      volumes:
+        # 'core' volume types
+        - 'configMap'
+        - 'emptyDir'
+        - 'projected'
+        - 'secret'
+        - 'downwardAPI'
+        # Assume that ephemeral CSI drivers & persistentVolumes set up by the cluster admin are safe to use.
+        - 'csi'
+        - 'persistentVolumeClaim'
+        - 'ephemeral'
+        # Allow all other non-hostpath volume types.
+        - 'awsElasticBlockStore'
+        - 'azureDisk'
+        - 'azureFile'
+        - 'cephFS'
+        - 'cinder'
+        - 'fc'
+        - 'flexVolume'
+        - 'flocker'
+        - 'gcePersistentDisk'
+        - 'gitRepo'
+        - 'glusterfs'
+        - 'iscsi'
+        - 'nfs'
+        - 'photonPersistentDisk'
+        - 'portworxVolume'
+        - 'quobyte'
+        - 'rbd'
+        - 'scaleIO'
+        - 'storageos'
+        - 'vsphereVolume'
+      hostNetwork: false
+      hostIPC: false
+      hostPID: false
+      readOnlyRootFilesystem: false
+      runAsUser:
+        rule: 'RunAsAny'
+      seLinux:
+        # This policy assumes the nodes are using AppArmor rather than SELinux.
+        # The PSP SELinux API cannot express the SELinux Pod Security Standards,
+        # so if using SELinux, you must choose a more restrictive default.
+        rule: 'RunAsAny'
+      supplementalGroups:
+        rule: 'RunAsAny'
+      fsGroup:
+        rule: 'RunAsAny'
+    ---
+    apiVersion: policy/v1beta1
+    kind: PodSecurityPolicy
+    metadata:
+      name: restricted
+      annotations:
+        seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default'
+        apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
+        apparmor.security.beta.kubernetes.io/defaultProfileName:  'runtime/default'
+    spec:
+      privileged: false
+      # Required to prevent escalations to root.
+      allowPrivilegeEscalation: false
+      requiredDropCapabilities:
+        - ALL
+      # Allow core volume types.
+      volumes:
+        - 'configMap'
+        - 'emptyDir'
+        - 'projected'
+        - 'secret'
+        - 'downwardAPI'
+        # Assume that ephemeral CSI drivers & persistentVolumes set up by the cluster admin are safe to use.
+        - 'csi'
+        - 'persistentVolumeClaim'
+        - 'ephemeral'
+      hostNetwork: false
+      hostIPC: false
+      hostPID: false
+      runAsUser:
+        # Require the container to run without root privileges.
+        rule: 'MustRunAsNonRoot'
+      seLinux:
+        # This policy assumes the nodes are using AppArmor rather than SELinux.
+        rule: 'RunAsAny'
+      supplementalGroups:
+        rule: 'MustRunAs'
+        ranges:
+          # Forbid adding the root group.
+          - min: 1
+            max: 65535
+      fsGroup:
+        rule: 'MustRunAs'
+        ranges:
+          # Forbid adding the root group.
+          - min: 1
+            max: 65535
+      readOnlyRootFilesystem: false
+    ---
+    apiVersion: policy/v1beta1
+    kind: PodSecurityPolicy
+    metadata:
+      name: icn
+      annotations:
+        seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
+    spec:
+      privileged: true
+      allowPrivilegeEscalation: true
+      volumes:
+      - '*'
+      hostNetwork: true
+      hostPorts:
+      - min: 0
+        max: 65535
+      hostIPC: true
+      hostPID: true
+      runAsUser:
+        rule: 'RunAsAny'
+      seLinux:
+        rule: 'RunAsAny'
+      supplementalGroups:
+        rule: 'RunAsAny'
+      fsGroup:
+        rule: 'RunAsAny'
+      allowedCapabilities:
+        - 'NET_ADMIN'
+        - 'SYS_ADMIN'
+        - 'SYS_NICE'
+        - 'SYS_PTRACE'
+      requiredDropCapabilities:
+        - 'NET_RAW'
+    ---
+    apiVersion: rbac.authorization.k8s.io/v1
+    kind: ClusterRole
+    metadata:
+      name: psp:privileged
+      labels:
+        addonmanager.kubernetes.io/mode: Reconcile
+    rules:
+    - apiGroups:
+      - policy
+      resourceNames:
+      - privileged
+      resources:
+      - podsecuritypolicies
+      verbs:
+      - use
+    ---
+    apiVersion: rbac.authorization.k8s.io/v1
+    kind: ClusterRole
+    metadata:
+      name: psp:baseline
+      labels:
+        addonmanager.kubernetes.io/mode: Reconcile
+    rules:
+    - apiGroups:
+      - policy
+      resourceNames:
+      - baseline
+      resources:
+      - podsecuritypolicies
+      verbs:
+      - use
+    ---
+    apiVersion: rbac.authorization.k8s.io/v1
+    kind: ClusterRole
+    metadata:
+      name: psp:icn
+      labels:
+        addonmanager.kubernetes.io/mode: Reconcile
+    rules:
+    - apiGroups:
+      - policy
+      resourceNames:
+      - icn
+      resources:
+      - podsecuritypolicies
+      verbs:
+      - use
+    ---
+    apiVersion: rbac.authorization.k8s.io/v1
+    kind: ClusterRole
+    metadata:
+      name: psp:restricted
+      labels:
+        addonmanager.kubernetes.io/mode: Reconcile
+    rules:
+    - apiGroups:
+      - policy
+      resourceNames:
+      - restricted
+      resources:
+      - podsecuritypolicies
+      verbs:
+      - use
+    ---
+    apiVersion: rbac.authorization.k8s.io/v1
+    kind: RoleBinding
+    metadata:
+      name: psp:privileged:nodes
+      namespace: kube-system
+      labels:
+        addonmanager.kubernetes.io/mode: Reconcile
+    roleRef:
+      apiGroup: rbac.authorization.k8s.io
+      kind: ClusterRole
+      name: psp:privileged
+    subjects:
+    - kind: Group
+      name: system:nodes
+      apiGroup: rbac.authorization.k8s.io
+    ---
+    apiVersion: rbac.authorization.k8s.io/v1
+    kind: RoleBinding
+    metadata:
+      name: psp:privileged:kube-system
+      namespace: kube-system
+    roleRef:
+      apiGroup: rbac.authorization.k8s.io
+      kind: ClusterRole
+      name: psp:privileged
+    subjects:
+    - kind: Group
+      name: system:serviceaccounts:kube-system
+      apiGroup: rbac.authorization.k8s.io
+    ---
+    apiVersion: rbac.authorization.k8s.io/v1
+    kind: ClusterRoleBinding
+    metadata:
+      name: psp:icn:any
+    roleRef:
+      kind: ClusterRole
+      name: psp:icn
+      apiGroup: rbac.authorization.k8s.io
+    subjects:
+    - kind: Group
+      name: system:authenticated
+      apiGroup: rbac.authorization.k8s.io
+kind: ConfigMap
+metadata:
+  creationTimestamp: null
+  name: {{ $clusterName }}-podsecurity-addon
+{{- end }}
index ec3ade4..75ca024 100755 (executable)
@@ -64,12 +64,21 @@ function clean_webhook {
     kustomize build ${BUILDDIR}/webhook/base | KUBECONFIG=${cluster_kubeconfig} kubectl delete -f -
 }
 
+function is_kata_deployed {
+    local -r cluster_name=${CLUSTER_NAME:-e2etest}
+    local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
+    kubectl --kubeconfig=${cluster_kubeconfig} get runtimeclass/kata-qemu
+}
+
 function test_kata {
     # Create a temporary kubeconfig file for the tests
     local -r cluster_name=${CLUSTER_NAME:-e2etest}
     local -r cluster_kubeconfig="${BUILDDIR}/${cluster_name}.conf"
     clusterctl -n metal3 get kubeconfig ${cluster_name} >${cluster_kubeconfig}
 
+    # Ensure that Kata has been deployed first
+    wait_for is_kata_deployed
+
     deploy_webhook ${cluster_name}
     clone_kud_repository
     pushd ${KUDPATH}/kud/tests
index 3c68b2d..3138baa 100644 (file)
@@ -22,6 +22,6 @@ machines:
     # Optional
     userData:
       name: ubuntu
-      hashedPassword: $6$rounds=4096$acxyX2VqfHJSAc2$sgVf5uTHHPCX6u50NHnJmhIoqbcL9J12jlBAaWKvd3w8uYO0iXgcBrEhtvHLgSGU7dcU.eqm9JwXEYbbRjPAi1
+      hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
       sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
       fqdn: machine-1.akraino.icn.org
diff --git a/deploy/podsecurity/.helmignore b/deploy/podsecurity/.helmignore
new file mode 100644 (file)
index 0000000..0e8a0eb
--- /dev/null
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/deploy/podsecurity/Chart.yaml b/deploy/podsecurity/Chart.yaml
new file mode 100644 (file)
index 0000000..6dc32c3
--- /dev/null
@@ -0,0 +1,8 @@
+# A simple chart to create a RoleBinding of a Namespace to a
+# PodSecurityPolicy.  This can be used with the Flux "dependsOn"
+# feature to install the binding before deploying additional
+# HelmReleases into the Namespace.
+apiVersion: v2
+name: podsecurity
+type: application
+version: 0.1.0
diff --git a/deploy/podsecurity/templates/rolebinding.yaml b/deploy/podsecurity/templates/rolebinding.yaml
new file mode 100644 (file)
index 0000000..918439f
--- /dev/null
@@ -0,0 +1,14 @@
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+  name: {{ .Values.roleRef }}-{{ .Release.Namespace }}
+  namespace: {{ .Release.Namespace }}
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: {{ .Values.roleRef }}
+subjects:
+- kind: Group
+  name: system:serviceaccounts:{{ .Release.Namespace }}
+  apiGroup: rbac.authorization.k8s.io
diff --git a/deploy/podsecurity/values.yaml b/deploy/podsecurity/values.yaml
new file mode 100644 (file)
index 0000000..777e6c6
--- /dev/null
@@ -0,0 +1,3 @@
+# Under ICN, roleRef may be psp:restricted, psp:baseline, or
+# psp:privileged
+roleRef: psp:baseline
index dd8baaf..5a8d277 100644 (file)
@@ -17,7 +17,7 @@ clusters:
         interface: ens5
     userData:
       name: ubuntu
-      hashedPassword: $6$rounds=4096$acxyX2VqfHJSAc2$sgVf5uTHHPCX6u50NHnJmhIoqbcL9J12jlBAaWKvd3w8uYO0iXgcBrEhtvHLgSGU7dcU.eqm9JwXEYbbRjPAi1
+      hashedPassword: $6$rounds=10000$PJLOBdyTv23pNp$9RpaAOcibbXUMvgJScKK2JRQioXW4XAVFMRKqgCB5jC4QmtAdbA70DU2jTcpAd6pRdEZIaWFjLCNQMBmiiL40.
       sshAuthorizedKey: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrxu+fSrU51vgAO5zP5xWcTU8uLv4MkUZptE2m1BJE88JdQ80kz9DmUmq2AniMkVTy4pNeUW5PsmGJa+anN3MPM99CR9I37zRqy5i6rUDQgKjz8W12RauyeRMIBrbdy7AX1xasoTRnd6Ta47bP0egiFb+vUGnlTFhgfrbYfjbkJhVfVLCTgRw8Yj0NSK16YEyhYLbLXpix5udRpXSiFYIyAEWRCCsWJWljACr99P7EF82vCGI0UDGCCd/1upbUwZeTouD/FJBw9qppe6/1eaqRp7D36UYe3KzLpfHQNgm9AzwgYYZrD4tNN6QBMq/VUIuam0G1aLgG8IYRLs41HYkJ root@jump
     flux:
       repositoryName: icn
index 4f1a2e2..acef817 100644 (file)
@@ -18,3 +18,5 @@ spec:
   targetNamespace: kud
   install:
     createNamespace: true
+  dependsOn:
+  - name: kud-podsecurity
index 25a25e0..5ac0e43 100644 (file)
@@ -18,3 +18,5 @@ spec:
   targetNamespace: kud
   install:
     createNamespace: true
+  dependsOn:
+  - name: kud-podsecurity
index 3660206..8f15512 100644 (file)
@@ -18,6 +18,8 @@ spec:
   targetNamespace: emco
   install:
     createNamespace: true
+  dependsOn:
+  - name: emco-podsecurity
   values:
     global:
       repository: integratedcloudnative/
diff --git a/deploy/site/vm/e2etest/emco-podsecurity-release.yaml b/deploy/site/vm/e2etest/emco-podsecurity-release.yaml
new file mode 100644 (file)
index 0000000..20c6c20
--- /dev/null
@@ -0,0 +1,22 @@
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+  name: emco-podsecurity
+  namespace: flux-system
+spec:
+  interval: 5m
+  chart:
+    spec:
+      chart: deploy/podsecurity
+      sourceRef:
+        kind: GitRepository
+        name: icn
+        namespace: flux-system
+      interval: 1m
+  releaseName: emco-podsecurity
+  targetNamespace: emco
+  install:
+    createNamespace: true
+  values:
+    roleRef: psp:privileged
index e670d87..aec92a7 100644 (file)
@@ -18,6 +18,8 @@ spec:
   targetNamespace: emco
   install:
     createNamespace: true
+  dependsOn:
+  - name: emco-podsecurity
   values:
     global:
       repository: integratedcloudnative/
index c588c20..eb77f1c 100644 (file)
@@ -18,3 +18,5 @@ spec:
   targetNamespace: kud
   install:
     createNamespace: true
+  dependsOn:
+  - name: kud-podsecurity
diff --git a/deploy/site/vm/e2etest/kud-podsecurity.yaml b/deploy/site/vm/e2etest/kud-podsecurity.yaml
new file mode 100644 (file)
index 0000000..387c8e1
--- /dev/null
@@ -0,0 +1,22 @@
+---
+apiVersion: helm.toolkit.fluxcd.io/v2beta1
+kind: HelmRelease
+metadata:
+  name: kud-podsecurity
+  namespace: flux-system
+spec:
+  interval: 5m
+  chart:
+    spec:
+      chart: deploy/podsecurity
+      sourceRef:
+        kind: GitRepository
+        name: icn
+        namespace: flux-system
+      interval: 1m
+  releaseName: kud-podsecurity
+  targetNamespace: kud
+  install:
+    createNamespace: true
+  values:
+    roleRef: psp:privileged
index df2039a..d43f084 100644 (file)
@@ -18,3 +18,5 @@ spec:
   targetNamespace: kud
   install:
     createNamespace: true
+  dependsOn:
+  - name: kud-podsecurity
index 8ebe339..02c1c29 100644 (file)
@@ -18,3 +18,5 @@ spec:
   targetNamespace: kud
   install:
     createNamespace: true
+  dependsOn:
+  - name: kud-podsecurity
index 64e3bc2..c897be6 100644 (file)
@@ -18,3 +18,5 @@ spec:
   targetNamespace: kud
   install:
     createNamespace: true
+  dependsOn:
+  - name: kud-podsecurity
index cb08481..6e2cc1e 100644 (file)
@@ -18,3 +18,5 @@ spec:
   targetNamespace: kud
   install:
     createNamespace: true
+  dependsOn:
+  - name: kud-podsecurity
index 74ba035..44f8ffd 100644 (file)
@@ -18,3 +18,5 @@ spec:
   targetNamespace: kud
   install:
     createNamespace: true
+  dependsOn:
+  - name: kud-podsecurity
index 14f9319..2d89d17 100755 (executable)
@@ -30,12 +30,8 @@ function is_cluster_ready {
     [[ $(kubectl -n metal3 get cluster e2etest -o jsonpath='{.status.conditions[?(@.type=="Ready")].status}') == "True" ]]
 }
 
-function are_kustomizations_ready {
-    [[ $(kubectl --kubeconfig=${BUILDDIR}/e2etest-admin.conf get Kustomization -n flux-system -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c -v True) == 0 ]]
-}
-
-function are_helmreleases_ready {
-    [[ $(kubectl --kubeconfig=${BUILDDIR}/e2etest-admin.conf get HelmRelease -n flux-system -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c -v True) == 0 ]]
+function is_control_plane_ready {
+    [[ $(kubectl --kubeconfig=${BUILDDIR}/e2etest-admin.conf get nodes -l node-role.kubernetes.io/control-plane -o jsonpath='{range .items[*]}{.status.conditions[?(@.type=="Ready")].status}{"\n"}{end}' | grep -c -v True) == 0 ]]
 }
 
 function wait_for_all_ready {
@@ -43,12 +39,7 @@ function wait_for_all_ready {
     wait_for is_cluster_ready
     clusterctl -n metal3 get kubeconfig e2etest >${BUILDDIR}/e2etest-admin.conf
     chmod 600 ${BUILDDIR}/e2etest-admin.conf
-    # TODO The following checks are not ideal: resources created by
-    # operators aren't detected here, but this is the best that can be
-    # currently done
-    WAIT_FOR_INTERVAL=30s
-    wait_for are_kustomizations_ready
-    wait_for are_helmreleases_ready
+    wait_for is_control_plane_ready
 }
 
 case $1 in
index 809e67f..38088c3 100755 (executable)
@@ -40,6 +40,9 @@ KUSTOMIZE_VERSION="v4.3.0"
 #Cluster API version to use
 CAPI_VERSION="v0.4.3"
 
+#Cluster API version to use
+CAPM3_VERSION="v0.5.1"
+
 #The flux version to use
 FLUX_VERSION="0.20.0"