diff --git a/src/tools/genpolicy/src/daemon_set.rs b/src/tools/genpolicy/src/daemon_set.rs index 3e0a6ee44b72..19ce944b0271 100644 --- a/src/tools/genpolicy/src/daemon_set.rs +++ b/src/tools/genpolicy/src/daemon_set.rs @@ -147,4 +147,8 @@ impl yaml::K8sResource for DaemonSet { .clone() .or_else(|| Some(String::new())) } + + fn get_process_fields(&self, process: &mut policy::KataProcess) { + yaml::get_process_fields(process, &self.spec.template.spec.securityContext); + } } diff --git a/src/tools/genpolicy/src/deployment.rs b/src/tools/genpolicy/src/deployment.rs index adc7f9778df5..4167b25a9f03 100644 --- a/src/tools/genpolicy/src/deployment.rs +++ b/src/tools/genpolicy/src/deployment.rs @@ -145,4 +145,8 @@ impl yaml::K8sResource for Deployment { .clone() .or_else(|| Some(String::new())) } + + fn get_process_fields(&self, process: &mut policy::KataProcess) { + yaml::get_process_fields(process, &self.spec.template.spec.securityContext); + } } diff --git a/src/tools/genpolicy/src/job.rs b/src/tools/genpolicy/src/job.rs index e686d333c91c..63c3571776bd 100644 --- a/src/tools/genpolicy/src/job.rs +++ b/src/tools/genpolicy/src/job.rs @@ -110,4 +110,8 @@ impl yaml::K8sResource for Job { } false } + + fn get_process_fields(&self, process: &mut policy::KataProcess) { + yaml::get_process_fields(process, &self.spec.template.spec.securityContext); + } } diff --git a/src/tools/genpolicy/src/pod.rs b/src/tools/genpolicy/src/pod.rs index e7f61132cf1d..19f8822395ca 100644 --- a/src/tools/genpolicy/src/pod.rs +++ b/src/tools/genpolicy/src/pod.rs @@ -94,7 +94,7 @@ pub struct PodSpec { topologySpreadConstraints: Option>, #[serde(skip_serializing_if = "Option::is_none")] - securityContext: Option, + pub securityContext: Option, #[serde(skip_serializing_if = "Option::is_none")] priorityClassName: Option, @@ -312,9 +312,9 @@ struct SeccompProfile { /// See Reference / Kubernetes API / Workload Resources / Pod. #[derive(Clone, Debug, Serialize, Deserialize)] -struct PodSecurityContext { +pub struct PodSecurityContext { #[serde(skip_serializing_if = "Option::is_none")] - runAsUser: Option, + pub runAsUser: Option, // TODO: additional fields. } @@ -893,11 +893,7 @@ impl yaml::K8sResource for Pod { } fn get_process_fields(&self, process: &mut policy::KataProcess) { - if let Some(context) = &self.spec.securityContext { - if let Some(uid) = context.runAsUser { - process.User.UID = uid.try_into().unwrap(); - } - } + yaml::get_process_fields(process, &self.spec.securityContext); } } diff --git a/src/tools/genpolicy/src/replica_set.rs b/src/tools/genpolicy/src/replica_set.rs index 04d46e1b8191..06daedab85ca 100644 --- a/src/tools/genpolicy/src/replica_set.rs +++ b/src/tools/genpolicy/src/replica_set.rs @@ -108,4 +108,8 @@ impl yaml::K8sResource for ReplicaSet { } false } + + fn get_process_fields(&self, process: &mut policy::KataProcess) { + yaml::get_process_fields(process, &self.spec.template.spec.securityContext); + } } diff --git a/src/tools/genpolicy/src/replication_controller.rs b/src/tools/genpolicy/src/replication_controller.rs index 0c7583d81613..6fea847c7d8f 100644 --- a/src/tools/genpolicy/src/replication_controller.rs +++ b/src/tools/genpolicy/src/replication_controller.rs @@ -110,4 +110,8 @@ impl yaml::K8sResource for ReplicationController { } false } + + fn get_process_fields(&self, process: &mut policy::KataProcess) { + yaml::get_process_fields(process, &self.spec.template.spec.securityContext); + } } diff --git a/src/tools/genpolicy/src/stateful_set.rs b/src/tools/genpolicy/src/stateful_set.rs index e0c0325f42fd..04e1ee06ed5d 100644 --- a/src/tools/genpolicy/src/stateful_set.rs +++ b/src/tools/genpolicy/src/stateful_set.rs @@ -192,6 +192,10 @@ impl yaml::K8sResource for StatefulSet { .clone() .or_else(|| Some(String::new())) } + + fn get_process_fields(&self, process: &mut policy::KataProcess) { + yaml::get_process_fields(process, &self.spec.template.spec.securityContext); + } } impl StatefulSet { diff --git a/src/tools/genpolicy/src/yaml.rs b/src/tools/genpolicy/src/yaml.rs index cb48a88959c6..e7127fb7d333 100644 --- a/src/tools/genpolicy/src/yaml.rs +++ b/src/tools/genpolicy/src/yaml.rs @@ -97,8 +97,8 @@ pub trait K8sResource { } fn get_process_fields(&self, _process: &mut policy::KataProcess) { - // Just Pods can have a PodSecurityContext field, so the other - // resources can use this default get_process_fields implementation. + // No need to implement support for securityContext or similar fields + // for some of the K8s resource types. } } @@ -378,3 +378,14 @@ fn handle_unused_field(path: &str, silent_unsupported_fields: bool) { panic!("Unsupported field: {}", path); } } + +pub fn get_process_fields( + process: &mut policy::KataProcess, + security_context: &Option, +) { + if let Some(context) = security_context { + if let Some(uid) = context.runAsUser { + process.User.UID = uid.try_into().unwrap(); + } + } +} diff --git a/tests/integration/kubernetes/k8s-policy-deployment.bats b/tests/integration/kubernetes/k8s-policy-deployment.bats index 8919c7dae153..5fe7df4e9331 100644 --- a/tests/integration/kubernetes/k8s-policy-deployment.bats +++ b/tests/integration/kubernetes/k8s-policy-deployment.bats @@ -14,17 +14,24 @@ setup() { get_pod_config_dir deployment_name="policy-redis-deployment" - deployment_yaml="${pod_config_dir}/k8s-policy-deployment.yaml" + correct_deployment_yaml="${pod_config_dir}/k8s-policy-deployment.yaml" - # Add an appropriate policy to the correct YAML file. - policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")" - add_requests_to_policy_settings "${policy_settings_dir}" "ReadStreamRequest" - auto_generate_policy "${policy_settings_dir}" "${deployment_yaml}" + # Save some time by executing genpolicy a single time. + if [ "${BATS_TEST_NUMBER}" == "1" ]; then + # Add an appropriate policy to the correct YAML file. + policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")" + add_requests_to_policy_settings "${policy_settings_dir}" "ReadStreamRequest" + auto_generate_policy "${policy_settings_dir}" "${correct_deployment_yaml}" + fi + + # Start each test case with a copy of the correct yaml file. + incorrect_deployment_yaml="${pod_config_dir}/k8s-policy-deployment-incorrect.yaml" + cp "${correct_deployment_yaml}" "${incorrect_deployment_yaml}" } @test "Successful deployment with auto-generated policy and container image volumes" { # Initiate deployment - kubectl apply -f "${deployment_yaml}" + kubectl apply -f "${correct_deployment_yaml}" # Wait for the deployment to be created cmd="kubectl rollout status --timeout=1s deployment/${deployment_name} | grep 'successfully rolled out'" @@ -32,16 +39,41 @@ setup() { waitForProcess "${wait_time}" "${sleep_time}" "${cmd}" } +test_deployment_policy_error() { + # Initiate deployment + kubectl apply -f "${incorrect_deployment_yaml}" + + # Wait for the deployment pod to fail + wait_for_blocked_request "CreateContainerRequest" "${deployment_name}" +} + +@test "Policy failure: unexpected UID = 0" { + # Change the pod UID to 0 after the policy has been generated using a different + # runAsUser value. The policy would use UID = 0 by default, if there weren't + # a different runAsUser value in the YAML file. + yq -i \ + '.spec.template.spec.securityContext.runAsUser = 0' \ + "${incorrect_deployment_yaml}" + + test_deployment_policy_error +} + teardown() { auto_generate_policy_enabled || skip "Auto-generated policy tests are disabled." - # Debugging information + # Pod debugging information. Don't print the "Message:" line because it contains a truncated policy log. + info "Pod ${deployment_name}:" + kubectl describe pod "${deployment_name}" | grep -v "Message:" + + # Deployment debugging information. The --watch=false argument makes "kubectl rollout status" + # return instead of waiting for a possibly failed deployment to complete. info "Deployment ${deployment_name}:" kubectl describe deployment "${deployment_name}" - kubectl rollout status deployment/${deployment_name} + kubectl rollout status deployment/${deployment_name} --watch=false # Clean-up kubectl delete deployment "${deployment_name}" delete_tmp_policy_settings_dir "${policy_settings_dir}" + rm -f "${incorrect_deployment_yaml}" } diff --git a/tests/integration/kubernetes/k8s-policy-job.bats b/tests/integration/kubernetes/k8s-policy-job.bats index ef1ea90445cd..e3b7070fac2d 100644 --- a/tests/integration/kubernetes/k8s-policy-job.bats +++ b/tests/integration/kubernetes/k8s-policy-job.bats @@ -131,6 +131,15 @@ test_job_policy_error() { test_job_policy_error } +@test "Policy failure: unexpected UID = 222" { + # Changing the job spec after generating its policy will cause CreateContainer to be denied. + yq -i \ + '.spec.template.spec.securityContext.runAsUser = 222' \ + "${incorrect_yaml}" + + test_job_policy_error +} + teardown() { auto_generate_policy_enabled || skip "Auto-generated policy tests are disabled." diff --git a/tests/integration/kubernetes/k8s-policy-rc.bats b/tests/integration/kubernetes/k8s-policy-rc.bats index a38c57127a4f..590d565367a3 100644 --- a/tests/integration/kubernetes/k8s-policy-rc.bats +++ b/tests/integration/kubernetes/k8s-policy-rc.bats @@ -61,17 +61,18 @@ test_rc_policy() { --output=jsonpath='{.spec.replicas}') [ "${number_of_replicas}" -gt 0 ] - # The replicas pods can be in running, waiting, succeeded or failed - # status. We need them all on running state before proceeding. - cmd="kubectl describe rc ${replication_name}" - cmd+=" | grep \"Pods Status\" | grep \"${number_of_replicas} Running\"" - info "Waiting for: ${cmd}" - waitForProcess "$wait_time" "$sleep_time" "$cmd" + # Wait for all the expected pods to be created. + local count=0 + local launched_pods=() + while [ $count -lt 6 ] && [ "${#launched_pods[@]}" -ne "${number_of_replicas}" ]; do + count=$((count + 1)) + sleep 10 + launched_pods=($(kubectl get pods "--selector=app=${app_name}" \ + --output=jsonpath={.items..metadata.name})) + done # Check that the number of pods created for the replication controller # is equal to the number of replicas that we defined. - launched_pods=($(kubectl get pods "--selector=app=${app_name}" \ - --output=jsonpath={.items..metadata.name})) [ "${#launched_pods[@]}" -eq "${number_of_replicas}" ] # Check pod creation @@ -110,13 +111,13 @@ test_rc_policy() { @test "Policy failure: unexpected host device mapping" { # Changing the template spec after generating its policy will cause CreateContainer to be denied. - yq -i \ - '.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/dev/ttyS0", "name": "dev-ttys0"}]' \ - "${incorrect_yaml}" + yq -i \ + '.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/dev/ttyS0", "name": "dev-ttys0"}]' \ + "${incorrect_yaml}" - yq -i \ - '.spec.template.spec.volumes += [{"name": "dev-ttys0", "hostPath": {"path": "/dev/ttyS0"}}]' \ - "${incorrect_yaml}" + yq -i \ + '.spec.template.spec.volumes += [{"name": "dev-ttys0", "hostPath": {"path": "/dev/ttyS0"}}]' \ + "${incorrect_yaml}" test_rc_policy true } @@ -139,6 +140,15 @@ test_rc_policy() { test_rc_policy true } +@test "Policy failure: unexpected UID = 1000" { + # Changing the template spec after generating its policy will cause CreateContainer to be denied. + yq -i \ + '.spec.template.spec.securityContext.runAsUser = 1000' \ + "${incorrect_yaml}" + + test_rc_policy true +} + teardown() { auto_generate_policy_enabled || skip "Auto-generated policy tests are disabled." diff --git a/tests/integration/kubernetes/runtimeclass_workloads/k8s-policy-deployment.yaml b/tests/integration/kubernetes/runtimeclass_workloads/k8s-policy-deployment.yaml index 407b99729061..6d3cb9468226 100644 --- a/tests/integration/kubernetes/runtimeclass_workloads/k8s-policy-deployment.yaml +++ b/tests/integration/kubernetes/runtimeclass_workloads/k8s-policy-deployment.yaml @@ -25,6 +25,8 @@ spec: spec: terminationGracePeriodSeconds: 0 runtimeClassName: kata + securityContext: + runAsUser: 1000 containers: - name: master image: quay.io/opstree/redis diff --git a/tests/integration/kubernetes/runtimeclass_workloads/k8s-policy-rc.yaml b/tests/integration/kubernetes/runtimeclass_workloads/k8s-policy-rc.yaml index 52c39ae31cb0..62bcdebcd520 100644 --- a/tests/integration/kubernetes/runtimeclass_workloads/k8s-policy-rc.yaml +++ b/tests/integration/kubernetes/runtimeclass_workloads/k8s-policy-rc.yaml @@ -17,6 +17,8 @@ spec: labels: app: policy-nginx-rc spec: + securityContext: + runAsUser: 123 terminationGracePeriodSeconds: 0 runtimeClassName: kata containers: