diff --git a/.github/workflows/policy-test.yml b/.github/workflows/policy-test.yml index e55ec5be..57676ddd 100644 --- a/.github/workflows/policy-test.yml +++ b/.github/workflows/policy-test.yml @@ -41,7 +41,7 @@ jobs: - name: Run Policy tests - v2 policies run: opa test ${{ env.GKE_POLICY_DIRECTORY_V2 }} -v - name: Setup Regal - uses: StyraInc/setup-regal@v0.2.0 + uses: StyraInc/setup-regal@v1 with: - version: v0.10.1 + version: v0.20.1 - run: regal lint --format github ${{ env.GKE_POLICY_DIRECTORY_V2 }} diff --git a/.regal/config.yaml b/.regal/config.yaml index f2fbfb99..d3c2076d 100644 --- a/.regal/config.yaml +++ b/.regal/config.yaml @@ -18,15 +18,10 @@ rules: # not applicable to this project level: ignore style: - detached-metadata: - # style preference only - level: ignore line-length: level: ignore opa-fmt: level: ignore - prefer-some-in-iteration: - level: ignore - testing: - test-outside-test-package: + imports: + use-rego-v1: level: ignore diff --git a/gke-policies-v2/policy/autopilot_cluster.rego b/gke-policies-v2/policy/autopilot_cluster.rego index 197d6bbe..e730cd67 100644 --- a/gke-policies-v2/policy/autopilot_cluster.rego +++ b/gke-policies-v2/policy/autopilot_cluster.rego @@ -24,16 +24,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/choose-cluster-mode # sccCategory: AUTOPILOT_DISABLED # dataSource: gke - package gke.policy.autopilot +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.autopilot.enabled msg := "Cluster is not using Autopilot mode" } diff --git a/gke-policies-v2/policy/autopilot_cluster_test.rego b/gke-policies-v2/policy/autopilot_cluster_test.rego index 84b27faa..dda5f2b4 100644 --- a/gke-policies-v2/policy/autopilot_cluster_test.rego +++ b/gke-policies-v2/policy/autopilot_cluster_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.autopilot +package gke.policy.autopilot_test -test_autopilot_mode_enabled { - valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}, "autopilot": {"enabled": true}}}} +import future.keywords.if +import data.gke.policy.autopilot + +test_autopilot_mode_enabled if { + autopilot.valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}, "autopilot": {"enabled": true}}}} } -test_autopilot_mode_disabled { - not valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}, "autopilot": {}}}} +test_autopilot_mode_disabled if { + not autopilot.valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}, "autopilot": {}}}} } diff --git a/gke-policies-v2/policy/cluster_binary_authorization.rego b/gke-policies-v2/policy/cluster_binary_authorization.rego index aa2595ce..a317ccbf 100644 --- a/gke-policies-v2/policy/cluster_binary_authorization.rego +++ b/gke-policies-v2/policy/cluster_binary_authorization.rego @@ -28,16 +28,18 @@ # version: "1.4" # id: "5.10.5" # dataSource: gke - package gke.policy.cluster_binary_authorization +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.binary_authorization.enabled msg := "Cluster is not configured with binary authorization" } diff --git a/gke-policies-v2/policy/cluster_binary_authorization_test.rego b/gke-policies-v2/policy/cluster_binary_authorization_test.rego index 67d2b400..f6771d1f 100644 --- a/gke-policies-v2/policy/cluster_binary_authorization_test.rego +++ b/gke-policies-v2/policy/cluster_binary_authorization_test.rego @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_binary_authorization +package gke.policy.cluster_binary_authorization_test -test_cluster_not_configured_binary_authorization { - not valid with input as {"Data": {"gke": {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} +import future.keywords.if +import data.gke.policy.cluster_binary_authorization + +test_cluster_not_configured_binary_authorization if { + not cluster_binary_authorization.valid with input as {"Data": {"gke": {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} } -test_cluster_configured_binary_authorization { - valid with input as {"data": {"gke": { +test_cluster_configured_binary_authorization if { + cluster_binary_authorization.valid with input as {"data": {"gke": { "name": "cluster-not-repairing", "binary_authorization": { "enabled": true diff --git a/gke-policies-v2/policy/cluster_enable_security_posture.rego b/gke-policies-v2/policy/cluster_enable_security_posture.rego index 5656feed..48eeb385 100644 --- a/gke-policies-v2/policy/cluster_enable_security_posture.rego +++ b/gke-policies-v2/policy/cluster_enable_security_posture.rego @@ -27,16 +27,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/about-security-posture-dashboard # sccCategory: SECURITY_POSTURE_DISABLED # dataSource: gke - package gke.policy.cluster_security_posture +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.security_posture_config.mode == 2 msg := "Cluster is not configure with Security Posture" } diff --git a/gke-policies-v2/policy/cluster_enable_security_posture_test.rego b/gke-policies-v2/policy/cluster_enable_security_posture_test.rego index 916ca517..682edeef 100644 --- a/gke-policies-v2/policy/cluster_enable_security_posture_test.rego +++ b/gke-policies-v2/policy/cluster_enable_security_posture_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_security_posture +package gke.policy.cluster_security_posture_test -test_cluster_enabled_security_posture { - valid with input as {"data": {"gke": { +import future.keywords.if +import data.gke.policy.cluster_security_posture + +test_cluster_enabled_security_posture if { + cluster_security_posture.valid with input as {"data": {"gke": { "name": "cluster-test", "security_posture_config": { "mode": 2, @@ -24,8 +27,8 @@ test_cluster_enabled_security_posture { }}} } -test_cluster_unknown_security_posture { - not valid with input as {"data": {"gke": { +test_cluster_unknown_security_posture if { + not cluster_security_posture.valid with input as {"data": {"gke": { "name": "cluster-test", "security_posture_config": { "mode": 0, @@ -34,8 +37,8 @@ test_cluster_unknown_security_posture { }}} } -test_cluster_disabled_security_posture { - not valid with input as {"data": {"gke": { +test_cluster_disabled_security_posture if { + not cluster_security_posture.valid with input as {"data": {"gke": { "name": "cluster-test", "security_posture_config": { "mode": 1, @@ -44,8 +47,8 @@ test_cluster_disabled_security_posture { }}} } -test_cluster_missing_security_posture { - not valid with input as {"data": {"gke": { +test_cluster_missing_security_posture if { + not cluster_security_posture.valid with input as {"data": {"gke": { "name": "cluster-test" }}} } diff --git a/gke-policies-v2/policy/cluster_enable_workload_scanning.rego b/gke-policies-v2/policy/cluster_enable_workload_scanning.rego index b9ace876..1619ef8a 100644 --- a/gke-policies-v2/policy/cluster_enable_workload_scanning.rego +++ b/gke-policies-v2/policy/cluster_enable_workload_scanning.rego @@ -29,16 +29,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/about-workload-vulnerability-scanning # sccCategory: WORKLOAD_SCANNING_DISABLED # dataSource: gke - package gke.policy.cluster_workload_scanning +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.security_posture_config.vulnerability_mode == 2 msg := "Cluster is not configured with workload vulnerability scanning" } diff --git a/gke-policies-v2/policy/cluster_enable_workload_scanning_test.rego b/gke-policies-v2/policy/cluster_enable_workload_scanning_test.rego index d71292d1..a06d80e9 100644 --- a/gke-policies-v2/policy/cluster_enable_workload_scanning_test.rego +++ b/gke-policies-v2/policy/cluster_enable_workload_scanning_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_workload_scanning +package gke.policy.cluster_workload_scanning_test -test_cluster_enabled_workload_scanning { - valid with input as {"data": {"gke": { +import future.keywords.if +import data.gke.policy.cluster_workload_scanning + +test_cluster_enabled_workload_scanning if { + cluster_workload_scanning.valid with input as {"data": {"gke": { "name": "cluster-test", "security_posture_config": { "mode": 2, @@ -24,8 +27,8 @@ test_cluster_enabled_workload_scanning { }}} } -test_cluster_disabled_workload_scanning { - not valid with input as {"data": {"gke": { +test_cluster_disabled_workload_scanning if { + not cluster_workload_scanning.valid with input as {"data": {"gke": { "name": "cluster-test", "security_posture_config": { "mode": 1, @@ -34,8 +37,8 @@ test_cluster_disabled_workload_scanning { }}} } -test_cluster_unknown_workload_scanning { - not valid with input as {"data": {"gke": { +test_cluster_unknown_workload_scanning if { + not cluster_workload_scanning.valid with input as {"data": {"gke": { "name": "cluster-test", "security_posture_config": { "mode": 1, @@ -44,8 +47,8 @@ test_cluster_unknown_workload_scanning { }}} } -test_cluster_missing_security_posture { - not valid with input as {"data": {"gke": { +test_cluster_missing_security_posture if { + not cluster_workload_scanning.valid with input as {"data": {"gke": { "name": "cluster-test" }}} } diff --git a/gke-policies-v2/policy/cluster_gce_csi_driver.rego b/gke-policies-v2/policy/cluster_gce_csi_driver.rego index 6b505ea3..8bc03a8c 100644 --- a/gke-policies-v2/policy/cluster_gce_csi_driver.rego +++ b/gke-policies-v2/policy/cluster_gce_csi_driver.rego @@ -25,16 +25,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver # sccCategory: GCE_CSI_DRIVER_DISABLED # dataSource: gke - package gke.policy.cluster_gce_csi_driver +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.addons_config.gce_persistent_disk_csi_driver_config.enabled msg := "Cluster is not configured with GCE persistent disk CSI driver" } diff --git a/gke-policies-v2/policy/cluster_gce_csi_driver_test.rego b/gke-policies-v2/policy/cluster_gce_csi_driver_test.rego index 89fc78ce..2204b294 100644 --- a/gke-policies-v2/policy/cluster_gce_csi_driver_test.rego +++ b/gke-policies-v2/policy/cluster_gce_csi_driver_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_gce_csi_driver +package gke.policy.cluster_gce_csi_driver_test -test_gce_csi_driver_addon_empty { - not valid with input as {"data": {"gke": {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{}}}}} +import future.keywords.if +import data.gke.policy.cluster_gce_csi_driver + +test_gce_csi_driver_addon_empty if { + not cluster_gce_csi_driver.valid with input as {"data": {"gke": {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{}}}}} } -test_gce_csi_driver_addon_disabled { - not valid with input as {"data": {"gke": {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{"enabled":false}}}}} +test_gce_csi_driver_addon_disabled if { + not cluster_gce_csi_driver.valid with input as {"data": {"gke": {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{"enabled":false}}}}} } -test_gce_csi_driver_addon_enabled { - valid with input as {"data": {"gke": {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{"enabled":true}}}}} +test_gce_csi_driver_addon_enabled if { + cluster_gce_csi_driver.valid with input as {"data": {"gke": {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{"enabled":true}}}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/cluster_maintenance_window.rego b/gke-policies-v2/policy/cluster_maintenance_window.rego index a40e5285..ac8c6b17 100644 --- a/gke-policies-v2/policy/cluster_maintenance_window.rego +++ b/gke-policies-v2/policy/cluster_maintenance_window.rego @@ -27,16 +27,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions # sccCategory: MAINTENANCE_WINDOWS_DISABLED # dataSource: gke - package gke.policy.cluster_maintenance_window +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.maintenance_policy.window.Policy msg := "GKE cluster is not configured with maintenance window" } diff --git a/gke-policies-v2/policy/cluster_maintenance_window_test.rego b/gke-policies-v2/policy/cluster_maintenance_window_test.rego index fa9e76a2..dec0864d 100644 --- a/gke-policies-v2/policy/cluster_maintenance_window_test.rego +++ b/gke-policies-v2/policy/cluster_maintenance_window_test.rego @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_maintenance_window +package gke.policy.cluster_maintenance_window_test -test_cluster_not_configured_maintenance_window { - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} +import future.keywords.if +import data.gke.policy.cluster_maintenance_window + +test_cluster_not_configured_maintenance_window if { + not cluster_maintenance_window.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} } -test_cluster_configured_to_maintanace_window { - valid with input as {"data": {"gke": { +test_cluster_configured_to_maintanace_window if { + cluster_maintenance_window.valid with input as {"data": {"gke": { "name": "cluster-not-repairing", "maintenance_policy": { "window": { diff --git a/gke-policies-v2/policy/cluster_receive_updates.rego b/gke-policies-v2/policy/cluster_receive_updates.rego index 3ac1e9d0..b4b55bd1 100644 --- a/gke-policies-v2/policy/cluster_receive_updates.rego +++ b/gke-policies-v2/policy/cluster_receive_updates.rego @@ -27,21 +27,23 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-notifications # sccCategory: UPDATE_NOTIFICATIONS_DISABLED # dataSource: gke - package gke.policy.cluster_receive_updates +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.notification_config.pubsub.enabled msg := "Cluster is not configured with upgrade notifications" } -violation[msg] { +violation contains msg if { not input.data.gke.notification_config.pubsub.topic - msg := "Cluster is not configured with upgrade notofications topic" + msg := "Cluster is not configured with upgrade notifications topic" } diff --git a/gke-policies-v2/policy/cluster_receive_updates_test.rego b/gke-policies-v2/policy/cluster_receive_updates_test.rego index 06263aa7..d9767f71 100644 --- a/gke-policies-v2/policy/cluster_receive_updates_test.rego +++ b/gke-policies-v2/policy/cluster_receive_updates_test.rego @@ -12,20 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_receive_updates +package gke.policy.cluster_receive_updates_test -test_cluster_with_topic_configured { - valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {}, "notification_config": { "pubsub": { "enabled": true, "topic": "projects/project-id/topics/cluster-updates-topic"}}}}} +import future.keywords.if +import data.gke.policy.cluster_receive_updates + +test_cluster_with_topic_configured if { + cluster_receive_updates.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {}, "notification_config": { "pubsub": { "enabled": true, "topic": "projects/project-id/topics/cluster-updates-topic"}}}}} } -test_cluster_without_notification_config { - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} +test_cluster_without_notification_config if { + not cluster_receive_updates.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} } -test_cluster_without_topic_specified { - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "notification_config": { "pubsub": { "enabled": true }}}}} +test_cluster_without_topic_specified if { + not cluster_receive_updates.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "notification_config": { "pubsub": { "enabled": true }}}}} } -test_cluster_without_pubsub_enabled { - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "notification_config": { "pubsub": { "enabled": false, "topic": "projects/project-id/topics/cluster-updates-topic"}}}}} +test_cluster_without_pubsub_enabled if { + not cluster_receive_updates.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "notification_config": { "pubsub": { "enabled": false, "topic": "projects/project-id/topics/cluster-updates-topic"}}}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/cluster_release_channels.rego b/gke-policies-v2/policy/cluster_release_channels.rego index ea366599..15446fa6 100644 --- a/gke-policies-v2/policy/cluster_release_channels.rego +++ b/gke-policies-v2/policy/cluster_release_channels.rego @@ -29,16 +29,18 @@ # version: "1.4" # id: "5.5.4" # dataSource: gke - package gke.policy.cluster_release_channels +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.release_channel.channel msg := "Cluster is not enrolled in any release channel" } diff --git a/gke-policies-v2/policy/cluster_release_channels_test.rego b/gke-policies-v2/policy/cluster_release_channels_test.rego index 6e99debb..c49528e3 100644 --- a/gke-policies-v2/policy/cluster_release_channels_test.rego +++ b/gke-policies-v2/policy/cluster_release_channels_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_release_channels +package gke.policy.cluster_release_channels_test -test_cluster_not_enrolled_to_release_channels { - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} +import future.keywords.if +import data.gke.policy.cluster_release_channels + +test_cluster_not_enrolled_to_release_channels if { + not cluster_release_channels.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} } -test_cluster_enrolled_to_release_channels { - valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} +test_cluster_enrolled_to_release_channels if { + cluster_release_channels.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/control_plane_access.rego b/gke-policies-v2/policy/control_plane_access.rego index 3d3f5fef..7ef2b09c 100644 --- a/gke-policies-v2/policy/control_plane_access.rego +++ b/gke-policies-v2/policy/control_plane_access.rego @@ -30,26 +30,28 @@ # version: "1.4" # id: "5.6.3" # dataSource: gke - package gke.policy.control_plane_access +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.master_authorized_networks_config.enabled msg := "Cluster is not configured with master authorized networks" } -violation[msg] { +violation contains msg if { not input.data.gke.master_authorized_networks_config.cidr_blocks msg := "Cluster is not configured with master authorized networks CIDRs" } -violation[msg] { +violation contains msg if { count(input.data.gke.master_authorized_networks_config.cidr_blocks) < 1 msg := "Cluster is not configured with master authorized networks CIDRs" } diff --git a/gke-policies-v2/policy/control_plane_access_test.rego b/gke-policies-v2/policy/control_plane_access_test.rego index 5f1e3ce0..b41c90bf 100644 --- a/gke-policies-v2/policy/control_plane_access_test.rego +++ b/gke-policies-v2/policy/control_plane_access_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_access +package gke.policy.control_plane_access_test -test_authorized_networks_enabled { - valid with input as {"data": {"gke": {"name":"test-cluster","master_authorized_networks_config": { +import future.keywords.if +import data.gke.policy.control_plane_access + +test_authorized_networks_enabled if { + control_plane_access.valid with input as {"data": {"gke": {"name":"test-cluster","master_authorized_networks_config": { "enabled":true, "cidr_blocks":[ {"display_name":"Test Block","cidr_block":"192.168.0.0./16"} @@ -23,20 +26,20 @@ test_authorized_networks_enabled { }}}} } -test_authoized_networks_missing{ - not valid with input as {"data": {"gke": {"name":"test-cluster"}}} +test_authoized_networks_missing if { + not control_plane_access.valid with input as {"data": {"gke": {"name":"test-cluster"}}} } -test_authorized_networks_disabled{ - not valid with input as {"data": {"gke": {"name":"test-cluster","master_authorized_networks_config": {"enabled":false}}}} +test_authorized_networks_disabled if { + not control_plane_access.valid with input as {"data": {"gke": {"name":"test-cluster","master_authorized_networks_config": {"enabled":false}}}} } -test_authorized_networks_no_cidrs_block{ - not valid with input as {"data": {"gke": {"name":"test-cluster","master_authorized_networks_config": {"enabled":true}}}} +test_authorized_networks_no_cidrs_block if { + not control_plane_access.valid with input as {"data": {"gke": {"name":"test-cluster","master_authorized_networks_config": {"enabled":true}}}} } -test_authorized_networks_empty_cidrs_block{ - not valid with input as {"data": {"gke": {"name":"test-cluster","master_authorized_networks_config": { +test_authorized_networks_empty_cidrs_block if { + not control_plane_access.valid with input as {"data": {"gke": {"name":"test-cluster","master_authorized_networks_config": { "enabled":true, "cidr_blocks":[] }}}} diff --git a/gke-policies-v2/policy/control_plane_disable_cert_authentication.rego b/gke-policies-v2/policy/control_plane_disable_cert_authentication.rego index d11affaa..b22cc7c3 100644 --- a/gke-policies-v2/policy/control_plane_disable_cert_authentication.rego +++ b/gke-policies-v2/policy/control_plane_disable_cert_authentication.rego @@ -29,21 +29,23 @@ # version: "1.4" # id: "5.8.2" # dataSource: gke - package gke.policy.control_plane_certificate_auth +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.data.gke.master_auth.client_certificate msg := "Cluster authentication is configured with a client certificate" } -violation[msg] { +violation contains msg if { input.data.gke.master_auth.client_key msg := "Cluster authentication is configured with a client key" } diff --git a/gke-policies-v2/policy/control_plane_disable_cert_authentication_test.rego b/gke-policies-v2/policy/control_plane_disable_cert_authentication_test.rego index f5c419b1..57bdbc2f 100644 --- a/gke-policies-v2/policy/control_plane_disable_cert_authentication_test.rego +++ b/gke-policies-v2/policy/control_plane_disable_cert_authentication_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_certificate_auth +package gke.policy.control_plane_certificate_auth_test -test_cluster_without_client_certificate { - valid with input as {"data": {"gke": { +import future.keywords.if +import data.gke.policy.control_plane_certificate_auth + +test_cluster_without_client_certificate if { + control_plane_certificate_auth.valid with input as {"data": {"gke": { "name": "cluster-test", "master_auth": { "cluster_ca_certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMVENDQXBXZ0F3SUJBZ0lSQUpIeTI1V..." @@ -23,8 +26,8 @@ test_cluster_without_client_certificate { }}} } -test_cluster_client_certificate { - not valid with input as {"data": {"gke": { +test_cluster_client_certificate if { + not control_plane_certificate_auth.valid with input as {"data": {"gke": { "name": "cluster-test", "master_auth": { "cluster_ca_certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMVENDQXBXZ0F3SUJBZ0lSQUpIeTI1V...", diff --git a/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego b/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego index 852633ba..01f4d790 100644 --- a/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego +++ b/gke-policies-v2/policy/control_plane_disable_legacy_authorization.rego @@ -28,16 +28,18 @@ # version: "1.4" # id: "5.8.4" # dataSource: gke - package gke.policy.disable_legacy_authorization +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.data.gke.legacy_abac.enabled msg := "Cluster authorization is configured with legacy ABAC" } diff --git a/gke-policies-v2/policy/control_plane_disable_legacy_authorization_test.rego b/gke-policies-v2/policy/control_plane_disable_legacy_authorization_test.rego index 81df42a0..4296daa7 100644 --- a/gke-policies-v2/policy/control_plane_disable_legacy_authorization_test.rego +++ b/gke-policies-v2/policy/control_plane_disable_legacy_authorization_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.disable_legacy_authorization +package gke.policy.disable_legacy_authorization_test -test_enabled_legacy_authorization { - not valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": true}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true}}]}}} +import future.keywords.if +import data.gke.policy.disable_legacy_authorization + +test_enabled_legacy_authorization if { + not disable_legacy_authorization.valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": true}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true}}]}}} } -test_disabled_legacy_authorization { - valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true}}]}}} +test_disabled_legacy_authorization if { + disable_legacy_authorization.valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true}}]}}} } diff --git a/gke-policies-v2/policy/control_plane_disable_password_authentication.rego b/gke-policies-v2/policy/control_plane_disable_password_authentication.rego index 9334da29..1f48a8ed 100644 --- a/gke-policies-v2/policy/control_plane_disable_password_authentication.rego +++ b/gke-policies-v2/policy/control_plane_disable_password_authentication.rego @@ -30,21 +30,23 @@ # version: "1.4" # id: "5.8.1" # dataSource: gke - package gke.policy.control_plane_basic_auth +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.data.gke.master_auth.password msg := "Cluster authentication is configured with a client password" } -violation[msg] { +violation contains msg if { input.data.gke.master_auth.username msg := "Cluster authentication is configured with a client username" } diff --git a/gke-policies-v2/policy/control_plane_disable_password_authentication_test.rego b/gke-policies-v2/policy/control_plane_disable_password_authentication_test.rego index 0d01232b..029d007d 100644 --- a/gke-policies-v2/policy/control_plane_disable_password_authentication_test.rego +++ b/gke-policies-v2/policy/control_plane_disable_password_authentication_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_basic_auth +package gke.policy.control_plane_basic_auth_test -test_cluster_without_basic_auth { - valid with input as {"data": {"gke": { +import future.keywords.if +import data.gke.policy.control_plane_basic_auth + +test_cluster_without_basic_auth if { + control_plane_basic_auth.valid with input as {"data": {"gke": { "name": "cluster-test", "master_auth": { "cluster_ca_certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMVENDQXBXZ0F3SUJBZ0lSQUpIeTI1V..." @@ -23,8 +26,8 @@ test_cluster_without_basic_auth { }}} } -test_cluster_with_basic_auth { - not valid with input as {"data": {"gke": { +test_cluster_with_basic_auth if { + not control_plane_basic_auth.valid with input as {"data": {"gke": { "name": "cluster-test", "master_auth": { "cluster_ca_certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMVENDQXBXZ0F3SUJBZ0lSQUpIeTI1V...", diff --git a/gke-policies-v2/policy/control_plane_endpoint.rego b/gke-policies-v2/policy/control_plane_endpoint.rego index 38931df4..73a3b963 100644 --- a/gke-policies-v2/policy/control_plane_endpoint.rego +++ b/gke-policies-v2/policy/control_plane_endpoint.rego @@ -28,16 +28,18 @@ # version: "1.4" # id: "5.6.4" # dataSource: gke - package gke.policy.control_plane_endpoint +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.private_cluster_config.enable_private_endpoint msg := "Cluster is not configured with private endpoint" } diff --git a/gke-policies-v2/policy/control_plane_endpoint_test.rego b/gke-policies-v2/policy/control_plane_endpoint_test.rego index f1c6f51b..f240f975 100644 --- a/gke-policies-v2/policy/control_plane_endpoint_test.rego +++ b/gke-policies-v2/policy/control_plane_endpoint_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_endpoint +package gke.policy.control_plane_endpoint_test -test_private_endpoint_enabled { - valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_endpoint": true}}}} +import future.keywords.if +import data.gke.policy.control_plane_endpoint + +test_private_endpoint_enabled if { + control_plane_endpoint.valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_endpoint": true}}}} } -test_private_endpoint_disabled { - not valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_endpoint": false}}}} +test_private_endpoint_disabled if { + not control_plane_endpoint.valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_endpoint": false}}}} } -test_private_cluster_config_missing { - not valid with input as {"data": {"gke": {"name": "test-cluster"}}} +test_private_cluster_config_missing if { + not control_plane_endpoint.valid with input as {"data": {"gke": {"name": "test-cluster"}}} } diff --git a/gke-policies-v2/policy/control_plane_redundancy.rego b/gke-policies-v2/policy/control_plane_redundancy.rego index 3a14239b..64b4ceb9 100644 --- a/gke-policies-v2/policy/control_plane_redundancy.rego +++ b/gke-policies-v2/policy/control_plane_redundancy.rego @@ -24,23 +24,25 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/regional-clusters # sccCategory: CONTROL_PLANE_ZONAL # dataSource: gke - package gke.policy.control_plane_redundancy +import future.keywords.if +import future.keywords.contains + import data.gke.rule.cluster.location default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.location msg := "Cluster location infromation is missing" } -violation[msg] { +violation contains msg if { not location.regional(input.data.gke.location) msg := sprintf("Cluster location %q is not regional", [input.data.gke.location]) } diff --git a/gke-policies-v2/policy/control_plane_redundancy_test.rego b/gke-policies-v2/policy/control_plane_redundancy_test.rego index 8739992d..1c4343f5 100644 --- a/gke-policies-v2/policy/control_plane_redundancy_test.rego +++ b/gke-policies-v2/policy/control_plane_redundancy_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_redundancy +package gke.policy.control_plane_redundancy_test -test_control_plane_regional_location { - valid with input as {"data": {"gke": {"name": "test-cluster", "location": "europe-central2"}}} +import future.keywords.if +import data.gke.policy.control_plane_redundancy + +test_control_plane_regional_location if { + control_plane_redundancy.valid with input as {"data": {"gke": {"name": "test-cluster", "location": "europe-central2"}}} } -test_control_plane_zonal_location { - not valid with input as {"data": {"gke": {"name": "test-cluster", "location": "europe-central2-a"}}} +test_control_plane_zonal_location if { + not control_plane_redundancy.valid with input as {"data": {"gke": {"name": "test-cluster", "location": "europe-central2-a"}}} } -test_control_plane_missing_location { - not valid with input as {"data": {"gke": {"name": "test-cluster"}}} +test_control_plane_missing_location if { + not control_plane_redundancy.valid with input as {"data": {"gke": {"name": "test-cluster"}}} } diff --git a/gke-policies-v2/policy/ilb_subsetting.rego b/gke-policies-v2/policy/ilb_subsetting.rego index 7e469252..f9d49b38 100644 --- a/gke-policies-v2/policy/ilb_subsetting.rego +++ b/gke-policies-v2/policy/ilb_subsetting.rego @@ -25,16 +25,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#subsetting # sccCategory: ILB_SUBSETTING_DISABLED # dataSource: gke - package gke.policy.enable_ilb_subsetting +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.data.gke.current_node_count > 250 not input.data.gke.network_config.enable_l4ilb_subsetting = true msg := sprintf("Cluster has %v nodes and is not configured with L4 ILB Subsetting", [input.data.gke.current_node_count]) diff --git a/gke-policies-v2/policy/ilb_subsetting_test.rego b/gke-policies-v2/policy/ilb_subsetting_test.rego index 09edd111..3d16b7a3 100644 --- a/gke-policies-v2/policy/ilb_subsetting_test.rego +++ b/gke-policies-v2/policy/ilb_subsetting_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.enable_ilb_subsetting +package gke.policy.enable_ilb_subsetting_test -test_enabled_ilb_subsetting_high_nodes { - valid with input as {"data": {"gke": {"name": "test-cluster", "current_node_count": 251, "network_config": { "enable_l4ilb_subsetting": true }}}} +import future.keywords.if +import data.gke.policy.enable_ilb_subsetting + +test_enabled_ilb_subsetting_high_nodes if { + enable_ilb_subsetting.valid with input as {"data": {"gke": {"name": "test-cluster", "current_node_count": 251, "network_config": { "enable_l4ilb_subsetting": true }}}} } -test_disabled_ilb_subsetting_low_nodes { - valid with input as {"data": {"gke": {"name": "test-cluster", "current_node_count": 3, "network_config": {}}}} +test_disabled_ilb_subsetting_low_nodes if { + enable_ilb_subsetting.valid with input as {"data": {"gke": {"name": "test-cluster", "current_node_count": 3, "network_config": {}}}} } -test_disabled_ilb_subsetting_high_nodes { - not valid with input as {"data": {"gke": {"name": "test-cluster", "current_node_count": 251, "network_config": {}}}} +test_disabled_ilb_subsetting_high_nodes if { + not enable_ilb_subsetting.valid with input as {"data": {"gke": {"name": "test-cluster", "current_node_count": 251, "network_config": {}}}} } diff --git a/gke-policies-v2/policy/intranode_visibility.rego b/gke-policies-v2/policy/intranode_visibility.rego index 59b87449..12120bcb 100644 --- a/gke-policies-v2/policy/intranode_visibility.rego +++ b/gke-policies-v2/policy/intranode_visibility.rego @@ -28,16 +28,18 @@ # version: "1.4" # id: "5.6.1" # dataSource: gke - package gke.policy.networkConfig +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.networkConfig.enableIntraNodeVisibility = true msg := "Cluster is not configured with Intranode Visibility" } diff --git a/gke-policies-v2/policy/intranode_visibility_test.rego b/gke-policies-v2/policy/intranode_visibility_test.rego index 29b7fd7b..44d1b250 100644 --- a/gke-policies-v2/policy/intranode_visibility_test.rego +++ b/gke-policies-v2/policy/intranode_visibility_test.rego @@ -12,13 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.networkConfig +package gke.policy.networkConfig_test -test_enabled_intranode_visibility { - valid with input as {"data": {"gke": {"name": "test-cluster", "networkConfig": { "enableIntraNodeVisibility": true }}}} +import future.keywords.if +import data.gke.policy.networkConfig + +test_enabled_intranode_visibility if { + networkConfig.valid with input as {"data": {"gke": {"name": "test-cluster", "networkConfig": { "enableIntraNodeVisibility": true }}}} } -test_disabled_intranode_visibility { - not valid with input as {"data": {"gke": {"name": "test-cluster", "networkConfig": { "enableIntraNodeVisibility": false }}}} +test_disabled_intranode_visibility if { + not networkConfig.valid with input as {"data": {"gke": {"name": "test-cluster", "networkConfig": { "enableIntraNodeVisibility": false }}}} } diff --git a/gke-policies-v2/policy/monitoring_and_logging.rego b/gke-policies-v2/policy/monitoring_and_logging.rego index a49bcaed..16be9dfe 100644 --- a/gke-policies-v2/policy/monitoring_and_logging.rego +++ b/gke-policies-v2/policy/monitoring_and_logging.rego @@ -32,21 +32,23 @@ # version: "1.4" # id: "5.7.1" # dataSource: gke - package gke.policy.logging_and_monitoring +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.logging_config.component_config.enable_components msg := "Cluster is not configured with Cloud Logging" } -violation[msg] { +violation contains msg if { not input.data.gke.monitoring_config.component_config.enable_components msg := "Cluster is not configured with Cloud Monitoring" } diff --git a/gke-policies-v2/policy/monitoring_and_logging_test.rego b/gke-policies-v2/policy/monitoring_and_logging_test.rego index 53550e9c..985572d5 100644 --- a/gke-policies-v2/policy/monitoring_and_logging_test.rego +++ b/gke-policies-v2/policy/monitoring_and_logging_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.logging_and_monitoring +package gke.policy.logging_and_monitoring_test -test_enabled_logging_and_monitoring { - valid with input as {"data": {"gke": { +import future.keywords.if +import data.gke.policy.logging_and_monitoring + +test_enabled_logging_and_monitoring if { + logging_and_monitoring.valid with input as {"data": {"gke": { "name": "test-cluster", "logging_config": { "component_config": { @@ -31,8 +34,8 @@ test_enabled_logging_and_monitoring { }}} } -test_disabled_logging { - not valid with input as {"data": {"gke": { +test_disabled_logging if { + not logging_and_monitoring.valid with input as {"data": {"gke": { "name": "test-cluster", "logging_config": {"component_config": {}}, "monitoring_config": { @@ -43,8 +46,8 @@ test_disabled_logging { }}} } -test_disabled_monitoring { - not valid with input as {"data": {"gke": { +test_disabled_monitoring if { + not logging_and_monitoring.valid with input as {"data": {"gke": { "name": "test-cluster", "logging_config": { "component_config": { diff --git a/gke-policies-v2/policy/nap_forbid_default_sa.rego b/gke-policies-v2/policy/nap_forbid_default_sa.rego index 620811c7..1d48e4ee 100644 --- a/gke-policies-v2/policy/nap_forbid_default_sa.rego +++ b/gke-policies-v2/policy/nap_forbid_default_sa.rego @@ -29,16 +29,18 @@ # version: "1.4" # id: "5.2.1" # dataSource: gke - package gke.policy.nap_forbid_default_sa +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.autopilot.enabled input.data.gke.autoscaling.enable_node_autoprovisioning == true input.data.gke.autoscaling.autoprovisioning_node_pool_defaults.service_account == "default" diff --git a/gke-policies-v2/policy/nap_forbid_default_sa_test.rego b/gke-policies-v2/policy/nap_forbid_default_sa_test.rego index 0951a8be..d85c6845 100644 --- a/gke-policies-v2/policy/nap_forbid_default_sa_test.rego +++ b/gke-policies-v2/policy/nap_forbid_default_sa_test.rego @@ -12,20 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.nap_forbid_default_sa +package gke.policy.nap_forbid_default_sa_test -test_cluster_not_enabled_nap { - valid with input as {"data": {"gke": {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}}}} +import future.keywords.if +import data.gke.policy.nap_forbid_default_sa + +test_cluster_not_enabled_nap if { + nap_forbid_default_sa.valid with input as {"data": {"gke": {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}}}} } -test_cluster_enabled_nap_with_default_sa { - not valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "default"} }}}} +test_cluster_enabled_nap_with_default_sa if { + not nap_forbid_default_sa.valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "default"} }}}} } -test_cluster_enabled_nap_without_default_sa { - valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "dedicated-sa@project.iam.gserviceaccount.com"} }}}} +test_cluster_enabled_nap_without_default_sa if { + nap_forbid_default_sa.valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "dedicated-sa@project.iam.gserviceaccount.com"} }}}} } -test_cluster_autopilot_with_default { - valid with input as {"data": {"gke": {"name": "cluster-autopilot", "autopilot": {"enabled": true}, "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "default"} }}}} +test_cluster_autopilot_with_default if { + nap_forbid_default_sa.valid with input as {"data": {"gke": {"name": "cluster-autopilot", "autopilot": {"enabled": true}, "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "default"} }}}} } diff --git a/gke-policies-v2/policy/nap_forbid_single_zone.rego b/gke-policies-v2/policy/nap_forbid_single_zone.rego index 1651b608..2cc1f673 100644 --- a/gke-policies-v2/policy/nap_forbid_single_zone.rego +++ b/gke-policies-v2/policy/nap_forbid_single_zone.rego @@ -25,16 +25,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning#auto-provisioning_locations # sccCategory: NAP_ZONAL # dataSource: gke - package gke.policy.nap_forbid_single_zone +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.data.gke.autoscaling.enable_node_autoprovisioning == true count(input.data.gke.autoscaling.autoprovisioning_locations) == 1 msg := "Cluster is not configured with multiple zones for NAP node pools" diff --git a/gke-policies-v2/policy/nap_forbid_single_zone_test.rego b/gke-policies-v2/policy/nap_forbid_single_zone_test.rego index afe033d6..56fc4ecf 100644 --- a/gke-policies-v2/policy/nap_forbid_single_zone_test.rego +++ b/gke-policies-v2/policy/nap_forbid_single_zone_test.rego @@ -12,18 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.nap_forbid_single_zone +package gke.policy.nap_forbid_single_zone_test -test_cluster_not_enabled_nap { - valid with input as {"data": {"gke": {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}}}} +import future.keywords.if +import data.gke.policy.nap_forbid_single_zone + +test_cluster_not_enabled_nap if { + nap_forbid_single_zone.valid with input as {"data": {"gke": {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}}}} } -test_cluster_enabled_nap_without_enabled_autoprovisioning_locations_not_enabled { - valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true}}}} +test_cluster_enabled_nap_without_enabled_autoprovisioning_locations_not_enabled if { + nap_forbid_single_zone.valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true}}}} } -test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_multiple { - valid with input as {"data": {"gke": { +test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_multiple if { + nap_forbid_single_zone.valid with input as {"data": {"gke": { "name": "cluster-with-nap", "autoscaling": { "enable_node_autoprovisioning": true, @@ -35,8 +38,8 @@ test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_multiple { }}} } -test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_single { - not valid with input as {"data": {"gke": { +test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_single if { + not nap_forbid_single_zone.valid with input as {"data": {"gke": { "name": "cluster-with-nap", "autoscaling": { "enable_node_autoprovisioning": true, diff --git a/gke-policies-v2/policy/nap_integrity_monitoring.rego b/gke-policies-v2/policy/nap_integrity_monitoring.rego index 94ec5a24..5f9f5573 100644 --- a/gke-policies-v2/policy/nap_integrity_monitoring.rego +++ b/gke-policies-v2/policy/nap_integrity_monitoring.rego @@ -33,16 +33,18 @@ # version: "1.4" # id: "5.5.6" # dataSource: gke - package gke.policy.nap_integrity_monitoring +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.data.gke.autoscaling.enable_node_autoprovisioning == true input.data.gke.autoscaling.autoprovisioning_node_pool_defaults.shielded_instance_config.enable_integrity_monitoring == false msg := "Cluster is not configured with integrity monitoring for NAP node pools" diff --git a/gke-policies-v2/policy/nap_integrity_monitoring_test.rego b/gke-policies-v2/policy/nap_integrity_monitoring_test.rego index ba40bf2c..bbf53ec7 100644 --- a/gke-policies-v2/policy/nap_integrity_monitoring_test.rego +++ b/gke-policies-v2/policy/nap_integrity_monitoring_test.rego @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.nap_integrity_monitoring +package gke.policy.nap_integrity_monitoring_test -test_cluster_not_enabled_nap { - valid with input as {"data": {"gke": {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}}}} +import future.keywords.if +import data.gke.policy.nap_integrity_monitoring + +test_cluster_not_enabled_nap if { + nap_integrity_monitoring.valid with input as {"data": {"gke": {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}}}} } -test_cluster_enabled_nap_with_integrity_monitoring_enabled { - valid with input as {"data": {"gke": { +test_cluster_enabled_nap_with_integrity_monitoring_enabled if { + nap_integrity_monitoring.valid with input as {"data": {"gke": { "name": "cluster-with-nap", "autoscaling": { "enable_node_autoprovisioning": true, @@ -30,8 +33,8 @@ test_cluster_enabled_nap_with_integrity_monitoring_enabled { }}} } -test_cluster_enabled_nap_without_integrity_monitoring_enabled { - not valid with input as {"data": {"gke": { +test_cluster_enabled_nap_without_integrity_monitoring_enabled if { + not nap_integrity_monitoring.valid with input as {"data": {"gke": { "name": "cluster-with-nap", "autoscaling": { "enable_node_autoprovisioning": true, diff --git a/gke-policies-v2/policy/nap_use_cos.rego b/gke-policies-v2/policy/nap_use_cos.rego index d7d813b3..69255cce 100644 --- a/gke-policies-v2/policy/nap_use_cos.rego +++ b/gke-policies-v2/policy/nap_use_cos.rego @@ -28,18 +28,19 @@ # version: "1.4" # id: "5.5.1" # dataSource: gke - package gke.policy.nap_use_cos import future.keywords.in +import future.keywords.if +import future.keywords.contains default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.data.gke.autoscaling.enable_node_autoprovisioning == true not lower(input.data.gke.autoscaling.autoprovisioning_node_pool_defaults.image_type) in { "cos", "cos_containerd"} msg := "Cluster is not configured with COS for NAP node pools" diff --git a/gke-policies-v2/policy/nap_use_cos_test.rego b/gke-policies-v2/policy/nap_use_cos_test.rego index d236173c..9c275dc3 100644 --- a/gke-policies-v2/policy/nap_use_cos_test.rego +++ b/gke-policies-v2/policy/nap_use_cos_test.rego @@ -12,20 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.nap_use_cos +package gke.policy.nap_use_cos_test -test_cluster_not_enabled_nap { - valid with input as {"data": {"gke": {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}}}} +import future.keywords.if +import data.gke.policy.nap_use_cos + +test_cluster_not_enabled_nap if { + nap_use_cos.valid with input as {"data": {"gke": {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}}}} } -test_cluster_enabled_nap_without_cos { - not valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "ANOTHER"}}}}} +test_cluster_enabled_nap_without_cos if { + not nap_use_cos.valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "ANOTHER"}}}}} } -test_cluster_enabled_nap_with_cos_containerd { - valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "COS_CONTAINERD"}} }}} +test_cluster_enabled_nap_with_cos_containerd if { + nap_use_cos.valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "COS_CONTAINERD"}} }}} } -test_cluster_enabled_nap_with_cos { - valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "COS"}} }}} +test_cluster_enabled_nap_with_cos if { + nap_use_cos.valid with input as {"data": {"gke": {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "COS"}} }}} } diff --git a/gke-policies-v2/policy/network_policies.rego b/gke-policies-v2/policy/network_policies.rego index 98f65b5b..f70ef6d2 100644 --- a/gke-policies-v2/policy/network_policies.rego +++ b/gke-policies-v2/policy/network_policies.rego @@ -28,30 +28,32 @@ # version: "1.4" # id: "5.6.7" # dataSource: gke - package gke.policy.network_policies_engine +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.data.gke.addons_config.network_policy_config.disabled not input.data.gke.network_policy not input.data.gke.network_config.datapath_provider == 2 msg := "Cluster is not configured with Kubneretes Network Policies" } -violation[msg] { +violation contains msg if { count(input.data.gke.addons_config.network_policy_config) == 0 not input.data.gke.network_policy.enabled not input.data.gke.network_config.datapath_provider == 2 msg := "Cluster is configured with Kubneretes Network Policies without configuration" } -violation[msg] { +violation contains msg if { input.data.gke.addons_config.network_policy_config.disabled count(input.data.gke.network_policy) == 0 not input.data.gke.network_config.datapath_provider == 2 diff --git a/gke-policies-v2/policy/network_policies_test.rego b/gke-policies-v2/policy/network_policies_test.rego index d0d1f65d..9e04e887 100644 --- a/gke-policies-v2/policy/network_policies_test.rego +++ b/gke-policies-v2/policy/network_policies_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.network_policies_engine +package gke.policy.network_policies_engine_test -test_dataplane_v1_without_netpol { - not valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {"disabled": true}}, "private_cluster_config": {"enable_private_nodes": true}, "network_config": {"datapath_provider": 1}}}} +import future.keywords.if +import data.gke.policy.network_policies_engine + +test_dataplane_v1_without_netpol if { + not network_policies_engine.valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {"disabled": true}}, "private_cluster_config": {"enable_private_nodes": true}, "network_config": {"datapath_provider": 1}}}} } -test_dataplane_v1_with_netpol_disabled { - not valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {"provider": 1, "enabled": false}, "network_config": {"datapath_provider": 1}}}} +test_dataplane_v1_with_netpol_disabled if { + not network_policies_engine.valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {"provider": 1, "enabled": false}, "network_config": {"datapath_provider": 1}}}} } -test_dataplane_v1_without_netpol_conf { - not valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {}}}} +test_dataplane_v1_without_netpol_conf if { + not network_policies_engine.valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {}}}} } -test_dataplane_v1_with_netpol { - valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {"provider": 1, "enabled": true}}}} +test_dataplane_v1_with_netpol if { + network_policies_engine.valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {"provider": 1, "enabled": true}}}} } -test_dataplane_v2_with_netpol { - valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {"disabled": true}}, "network_config": {"datapath_provider": 2}}}} +test_dataplane_v2_with_netpol if { + network_policies_engine.valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {"network_policy_config": {"disabled": true}}, "network_config": {"datapath_provider": 2}}}} } diff --git a/gke-policies-v2/policy/node_local_dns_cache.rego b/gke-policies-v2/policy/node_local_dns_cache.rego index 9bc533a5..64f353f2 100644 --- a/gke-policies-v2/policy/node_local_dns_cache.rego +++ b/gke-policies-v2/policy/node_local_dns_cache.rego @@ -25,16 +25,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/nodelocal-dns-cache # sccCategory: DNS_CACHE_DISABLED # dataSource: gke - package gke.policy.node_local_dns_cache +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.addons_config.dns_cache_config.enabled = true msg := "Cluster is not configured with node local DNS cache" } diff --git a/gke-policies-v2/policy/node_local_dns_cache_test.rego b/gke-policies-v2/policy/node_local_dns_cache_test.rego index 88883a6a..ab4533d1 100644 --- a/gke-policies-v2/policy/node_local_dns_cache_test.rego +++ b/gke-policies-v2/policy/node_local_dns_cache_test.rego @@ -12,17 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_local_dns_cache +package gke.policy.node_local_dns_cache_test +import future.keywords.if +import data.gke.policy.node_local_dns_cache -test_enabled_node_local_dns_cache { - valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": { "dns_cache_config": { "enabled": true }}}}} +test_enabled_node_local_dns_cache if { + node_local_dns_cache.valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": { "dns_cache_config": { "enabled": true }}}}} } -test_absent_dns_cache_config { - not valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {}}}} +test_absent_dns_cache_config if { + not node_local_dns_cache.valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": {}}}} } -test_disabled_node_local_dns_cache { - not valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": { "dns_cache_config": { "enabled": false }}}}} +test_disabled_node_local_dns_cache if { + not node_local_dns_cache.valid with input as {"data": {"gke": {"name": "test-cluster", "addons_config": { "dns_cache_config": { "enabled": false }}}}} } diff --git a/gke-policies-v2/policy/node_pool_autorepair.rego b/gke-policies-v2/policy/node_pool_autorepair.rego index f39e8037..0496f2d5 100644 --- a/gke-policies-v2/policy/node_pool_autorepair.rego +++ b/gke-policies-v2/policy/node_pool_autorepair.rego @@ -29,17 +29,20 @@ # version: "1.4" # id: "5.5.2" # dataSource: gke - package gke.policy.node_pool_autorepair +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - some pool - not input.data.gke.node_pools[pool].management.auto_repair - msg := sprintf("Node pool %q is not configured with auto-repair", [input.data.gke.node_pools[pool].name]) +violation contains msg if { + some pool in input.data.gke.node_pools + not pool.management.auto_repair + msg := sprintf("Node pool %q is not configured with auto-repair", [pool.name]) } diff --git a/gke-policies-v2/policy/node_pool_autorepair_test.rego b/gke-policies-v2/policy/node_pool_autorepair_test.rego index e6e27aec..94875166 100644 --- a/gke-policies-v2/policy/node_pool_autorepair_test.rego +++ b/gke-policies-v2/policy/node_pool_autorepair_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_autorepair +package gke.policy.node_pool_autorepair_test -test_autorepair_for_node_pool_enabled { - valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} +import future.keywords.if +import data.gke.policy.node_pool_autorepair + +test_autorepair_for_node_pool_enabled if { + node_pool_autorepair.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} } -test_autorepair_for_node_pool_disabled{ - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": false, "auto_upgrade": true }}]}}} +test_autorepair_for_node_pool_disabled if { + not node_pool_autorepair.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": false, "auto_upgrade": true }}]}}} } -test_autorepair_for_multiple_node_pools_but_only_one_disabled{ - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }},{"name": "custom", "management": {"auto_repair": false, "auto_upgrade": true }}]}}} +test_autorepair_for_multiple_node_pools_but_only_one_disabled if { + not node_pool_autorepair.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }},{"name": "custom", "management": {"auto_repair": false, "auto_upgrade": true }}]}}} } -test_autorepair_for_node_pool_empty_managment{ - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {}}]}}} +test_autorepair_for_node_pool_empty_managment if { + not node_pool_autorepair.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {}}]}}} } -test_autorepair_for_managment_without_auto_repair_field{ - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_upgrade": true }}]}}} +test_autorepair_for_managment_without_auto_repair_field if { + not node_pool_autorepair.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_upgrade": true }}]}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_autoscaling.rego b/gke-policies-v2/policy/node_pool_autoscaling.rego index e9423c92..3cbb9f1d 100644 --- a/gke-policies-v2/policy/node_pool_autoscaling.rego +++ b/gke-policies-v2/policy/node_pool_autoscaling.rego @@ -27,17 +27,20 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler # sccCategory: NODEPOOL_AUTOSCALING_DISABLED # dataSource: gke - package gke.policy.node_pool_autoscaling +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - some pool - not input.data.gke.node_pools[pool].autoscaling.enabled - msg := sprintf("Node pool %q is not configured with autoscaling", [input.data.gke.node_pools[pool].name]) +violation contains msg if { + some pool in input.data.gke.node_pools + not pool.autoscaling.enabled + msg := sprintf("Node pool %q is not configured with autoscaling", [pool.name]) } diff --git a/gke-policies-v2/policy/node_pool_autoscaling_test.rego b/gke-policies-v2/policy/node_pool_autoscaling_test.rego index 072c66b5..bfa73324 100644 --- a/gke-policies-v2/policy/node_pool_autoscaling_test.rego +++ b/gke-policies-v2/policy/node_pool_autoscaling_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_autoscaling +package gke.policy.node_pool_autoscaling_test -test_node_pool_autoscaling_enabled { - valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}]}}} +import future.keywords.if +import data.gke.policy.node_pool_autoscaling + +test_node_pool_autoscaling_enabled if { + node_pool_autoscaling.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}]}}} } -test_node_pool_autoscaling_disabled { - not valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": false}}]}}} +test_node_pool_autoscaling_disabled if { + not node_pool_autoscaling.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": false}}]}}} } -test_multiple_node_pool_autoscaling_but_only_one_enabled { - not valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}, {"name": "custom", "autoscaling": {"enabled": false}}]}}} +test_multiple_node_pool_autoscaling_but_only_one_enabled if { + not node_pool_autoscaling.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}, {"name": "custom", "autoscaling": {"enabled": false}}]}}} } -test_multiple_node_pool_autoscaling_enabled { - valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}, {"name": "custom", "autoscaling": {"enabled": true}}]}}} +test_multiple_node_pool_autoscaling_enabled if { + node_pool_autoscaling.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}, {"name": "custom", "autoscaling": {"enabled": true}}]}}} } -test_node_pool_without_autoscaling_field { - not valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default"}]}}} +test_node_pool_without_autoscaling_field if { + not node_pool_autoscaling.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default"}]}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_autoupgrade.rego b/gke-policies-v2/policy/node_pool_autoupgrade.rego index 5d2aaed8..c9a5e090 100644 --- a/gke-policies-v2/policy/node_pool_autoupgrade.rego +++ b/gke-policies-v2/policy/node_pool_autoupgrade.rego @@ -29,17 +29,20 @@ # version: "1.4" # id: "5.5.3" # dataSource: gke - package gke.policy.node_pool_autoupgrade +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - some pool - not input.data.gke.node_pools[pool].management.auto_upgrade - msg := sprintf("Node pool %q is not configured with auto-upgrade", [input.data.gke.node_pools[pool].name]) +violation contains msg if { + some pool in input.data.gke.node_pools + not pool.management.auto_upgrade + msg := sprintf("Node pool %q is not configured with auto-upgrade", [pool.name]) } diff --git a/gke-policies-v2/policy/node_pool_autoupgrade_test.rego b/gke-policies-v2/policy/node_pool_autoupgrade_test.rego index 1b9e15b0..7847ba6c 100644 --- a/gke-policies-v2/policy/node_pool_autoupgrade_test.rego +++ b/gke-policies-v2/policy/node_pool_autoupgrade_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_autoupgrade +package gke.policy.node_pool_autoupgrade_test -test_autoupgrade_for_node_pool_enabled { - valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} +import future.keywords.if +import data.gke.policy.node_pool_autoupgrade + +test_autoupgrade_for_node_pool_enabled if { + node_pool_autoupgrade.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} } -test_autoupgrade_for_node_pool_disabled{ - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": false }}]}}} +test_autoupgrade_for_node_pool_disabled if { + not node_pool_autoupgrade.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": false }}]}}} } -test_autoupgrade_for_multiple_node_pools_but_only_one_disabled{ - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }},{"name": "custom", "management": {"auto_repair": false, "auto_upgrade": false }}]}}} +test_autoupgrade_for_multiple_node_pools_but_only_one_disabled if { + not node_pool_autoupgrade.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }},{"name": "custom", "management": {"auto_repair": false, "auto_upgrade": false }}]}}} } -test_autoupgrade_for_node_pool_empty_managment{ - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {}}]}}} +test_autoupgrade_for_node_pool_empty_managment if { + not node_pool_autoupgrade.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {}}]}}} } -test_autoupgrade_for_managment_without_auto_upgrade_field{ - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true }}]}}} +test_autoupgrade_for_managment_without_auto_upgrade_field if { + not node_pool_autoupgrade.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true }}]}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_disk_encryption.rego b/gke-policies-v2/policy/node_pool_disk_encryption.rego index 984f9fd8..82f17374 100644 --- a/gke-policies-v2/policy/node_pool_disk_encryption.rego +++ b/gke-policies-v2/policy/node_pool_disk_encryption.rego @@ -30,17 +30,20 @@ # version: "1.4" # id: "5.9.1" # dataSource: gke - package gke.policy.node_pool_cmek +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - some pool - not input.data.gke.node_pools[pool].config.boot_disk_kms_key - msg := sprintf("Node pool %q is not configured with CMEK for the boot disk", [input.data.gke.node_pools[pool].name]) +violation contains msg if { + some pool in input.data.gke.node_pools + not pool.config.boot_disk_kms_key + msg := sprintf("Node pool %q is not configured with CMEK for the boot disk", [pool.name]) } diff --git a/gke-policies-v2/policy/node_pool_disk_encryption_test.rego b/gke-policies-v2/policy/node_pool_disk_encryption_test.rego index 0e922ba8..d8b67502 100644 --- a/gke-policies-v2/policy/node_pool_disk_encryption_test.rego +++ b/gke-policies-v2/policy/node_pool_disk_encryption_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_cmek +package gke.policy.node_pool_cmek_test -test_cluster_node_pool_with_cmek { - valid with input as {"data": {"gke": { +import future.keywords.if +import data.gke.policy.node_pool_cmek + +test_cluster_node_pool_with_cmek if { + node_pool_cmek.valid with input as {"data": {"gke": { "name": "cluster-test", "node_pools": [ { @@ -28,8 +31,8 @@ test_cluster_node_pool_with_cmek { }}} } -test_cluster_node_pool_without_cmek { - not valid with input as {"data": {"gke": { +test_cluster_node_pool_without_cmek if { + not node_pool_cmek.valid with input as {"data": {"gke": { "name": "cluster-test", "node_pools": [ { diff --git a/gke-policies-v2/policy/node_pool_forbid_default_sa.rego b/gke-policies-v2/policy/node_pool_forbid_default_sa.rego index 211c9587..20980d13 100644 --- a/gke-policies-v2/policy/node_pool_forbid_default_sa.rego +++ b/gke-policies-v2/policy/node_pool_forbid_default_sa.rego @@ -29,18 +29,21 @@ # version: "1.4" # id: "5.2.1" # dataSource: gke - package gke.policy.node_pool_forbid_default_sa +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.autopilot.enabled - some pool - input.data.gke.node_pools[pool].config.service_account == "default" - msg := sprintf("Node pool %q is configured with default SA", [input.data.gke.node_pools[pool].name]) + some pool in input.data.gke.node_pools + pool.config.service_account == "default" + msg := sprintf("Node pool %q is configured with default SA", [pool.name]) } diff --git a/gke-policies-v2/policy/node_pool_forbid_default_sa_test.rego b/gke-policies-v2/policy/node_pool_forbid_default_sa_test.rego index d3de4a8e..17abac15 100644 --- a/gke-policies-v2/policy/node_pool_forbid_default_sa_test.rego +++ b/gke-policies-v2/policy/node_pool_forbid_default_sa_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_forbid_default_sa +package gke.policy.node_pool_forbid_default_sa_test -test_cluster_with_2_np_and_mixed_sas { - not valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}, {"name": "pool-1", "config": {"machine_type": "e2-standard-2", "disk_size_gb": 100, "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"], "service_account": "gke-sa@prj.iam.gserviceaccount.com", "metadata": {"disable-legacy-endpoints": "true"}, "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}}]}}} +import future.keywords.if +import data.gke.policy.node_pool_forbid_default_sa + +test_cluster_with_2_np_and_mixed_sas if { + not node_pool_forbid_default_sa.valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}, {"name": "pool-1", "config": {"machine_type": "e2-standard-2", "disk_size_gb": 100, "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"], "service_account": "gke-sa@prj.iam.gserviceaccount.com", "metadata": {"disable-legacy-endpoints": "true"}, "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}}]}}} } -test_cluster_with_2_np_and_dedicated_sas { - valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "gke-sa@prj.iam.gserviceaccount.com", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}, {"name": "pool-1", "config": {"machine_type": "e2-standard-2", "disk_size_gb": 100, "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"], "service_account": "gke-sa@prj.iam.gserviceaccount.com", "metadata": {"disable-legacy-endpoints": "true"}, "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}}]}}} +test_cluster_with_2_np_and_dedicated_sas if { + node_pool_forbid_default_sa.valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "gke-sa@prj.iam.gserviceaccount.com", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}, {"name": "pool-1", "config": {"machine_type": "e2-standard-2", "disk_size_gb": 100, "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"], "service_account": "gke-sa@prj.iam.gserviceaccount.com", "metadata": {"disable-legacy-endpoints": "true"}, "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}}]}}} } -test_cluster_with_1_np_and_default_sa { - not valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}]}}} +test_cluster_with_1_np_and_default_sa if { + not node_pool_forbid_default_sa.valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}]}}} } -test_cluster_with_1_np_and_dedicated_sa { - valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "pool-1", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "gke-sa@prj.iam.gserviceaccount.com", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}]}}} +test_cluster_with_1_np_and_dedicated_sa if { + node_pool_forbid_default_sa.valid with input as {"data": {"gke": {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "pool-1", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "gke-sa@prj.iam.gserviceaccount.com", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}]}}} } -test_autopilot_with_default { - valid with input as {"data": {"gke": {"name": "cluster-1", "autopilot": {"enabled": true}, "node_pools": [{"name": "pool-1", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD"}}]}}} +test_autopilot_with_default if { + node_pool_forbid_default_sa.valid with input as {"data": {"gke": {"name": "cluster-1", "autopilot": {"enabled": true}, "node_pools": [{"name": "pool-1", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD"}}]}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_integrity_monitoring.rego b/gke-policies-v2/policy/node_pool_integrity_monitoring.rego index bfe8101b..68be5c5f 100644 --- a/gke-policies-v2/policy/node_pool_integrity_monitoring.rego +++ b/gke-policies-v2/policy/node_pool_integrity_monitoring.rego @@ -28,17 +28,20 @@ # version: "1.4" # id: "5.5.6" # dataSource: gke - package gke.policy.node_pool_integrity_monitoring +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - some pool - not input.data.gke.node_pools[pool].config.shielded_instance_config.enable_integrity_monitoring - msg := sprintf("Node pool %q is not configured with integrity monitoring", [input.data.gke.node_pools[pool].name]) +violation contains msg if { + some pool in input.data.gke.node_pools + not pool.config.shielded_instance_config.enable_integrity_monitoring + msg := sprintf("Node pool %q is not configured with integrity monitoring", [pool.name]) } diff --git a/gke-policies-v2/policy/node_pool_integrity_monitoring_test.rego b/gke-policies-v2/policy/node_pool_integrity_monitoring_test.rego index bf5c09b0..5d12bacc 100644 --- a/gke-policies-v2/policy/node_pool_integrity_monitoring_test.rego +++ b/gke-policies-v2/policy/node_pool_integrity_monitoring_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_integrity_monitoring +package gke.policy.node_pool_integrity_monitoring_test -test_empty_shielded_instance_config { - not valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{}}}]}}} +import future.keywords.if +import data.gke.policy.node_pool_integrity_monitoring + +test_empty_shielded_instance_config if { + not node_pool_integrity_monitoring.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{}}}]}}} } -test_disabled_integrity_monitoring { - not valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_integrity_monitoring": false}}}]}}} +test_disabled_integrity_monitoring if { + not node_pool_integrity_monitoring.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_integrity_monitoring": false}}}]}}} } -test_enabled_integrity_monitoring { - valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_integrity_monitoring": true}}}]}}} +test_enabled_integrity_monitoring if { + node_pool_integrity_monitoring.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_integrity_monitoring": true}}}]}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_multi_zone.rego b/gke-policies-v2/policy/node_pool_multi_zone.rego index 752a59fe..1961c8b5 100644 --- a/gke-policies-v2/policy/node_pool_multi_zone.rego +++ b/gke-policies-v2/policy/node_pool_multi_zone.rego @@ -26,17 +26,20 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools#multiple-zones # sccCategory: NODEPOOL_ZONAL # dataSource: gke - package gke.policy.node_pool_multi_zone +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - some pool - count(input.data.gke.node_pools[pool].locations) < 2 - msg := sprintf("Node pool %q is not configured with multiple zones", [input.data.gke.node_pools[pool].name]) +violation contains msg if { + some pool in input.data.gke.node_pools + count(pool.locations) < 2 + msg := sprintf("Node pool %q is not configured with multiple zones", [pool.name]) } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_multi_zone_test.rego b/gke-policies-v2/policy/node_pool_multi_zone_test.rego index 92ef0c5a..ef0a2af9 100644 --- a/gke-policies-v2/policy/node_pool_multi_zone_test.rego +++ b/gke-policies-v2/policy/node_pool_multi_zone_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_multi_zone +package gke.policy.node_pool_multi_zone_test -test_node_pool_one_zone { - not valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a"]}]}}} +import future.keywords.if +import data.gke.policy.node_pool_multi_zone + +test_node_pool_one_zone if { + not node_pool_multi_zone.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a"]}]}}} } -test_node_pool_two_zones { - valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a", "us-central1-b"]}]}}} +test_node_pool_two_zones if { + node_pool_multi_zone.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a", "us-central1-b"]}]}}} } -test_node_pool_three_zones { - valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a", "us-central1-b", "us-central1-c"]}]}}} +test_node_pool_three_zones if { + node_pool_multi_zone.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a", "us-central1-b", "us-central1-c"]}]}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_secure_boot.rego b/gke-policies-v2/policy/node_pool_secure_boot.rego index 1969c7b4..b5b6f780 100644 --- a/gke-policies-v2/policy/node_pool_secure_boot.rego +++ b/gke-policies-v2/policy/node_pool_secure_boot.rego @@ -28,17 +28,20 @@ # version: "1.4" # id: "5.5.7" # dataSource: gke - package gke.policy.node_pool_secure_boot +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - some pool - not input.data.gke.node_pools[pool].config.shielded_instance_config.enable_secure_boot - msg := sprintf("Node pool %q is not configured with secure boot", [input.data.gke.node_pools[pool].name]) +violation contains msg if { + some pool in input.data.gke.node_pools + not pool.config.shielded_instance_config.enable_secure_boot + msg := sprintf("Node pool %q is not configured with secure boot", [pool.name]) } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_secure_boot_test.rego b/gke-policies-v2/policy/node_pool_secure_boot_test.rego index d0cd3930..c91d4a7f 100644 --- a/gke-policies-v2/policy/node_pool_secure_boot_test.rego +++ b/gke-policies-v2/policy/node_pool_secure_boot_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_secure_boot +package gke.policy.node_pool_secure_boot_test -test_empty_shielded_instance_config { - not valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{}}}]}}} +import future.keywords.if +import data.gke.policy.node_pool_secure_boot + +test_empty_shielded_instance_config if { + not node_pool_secure_boot.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{}}}]}}} } -test_disabled_secure_boot { - not valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_secure_boot": false}}}]}}} +test_disabled_secure_boot if { + not node_pool_secure_boot.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_secure_boot": false}}}]}}} } -test_enabled_secure_boot { - valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_secure_boot": true}}}]}}} +test_enabled_secure_boot if { + node_pool_secure_boot.valid with input as {"data": {"gke": {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_secure_boot": true}}}]}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_use_cos.rego b/gke-policies-v2/policy/node_pool_use_cos.rego index a9674f30..12fe9337 100644 --- a/gke-policies-v2/policy/node_pool_use_cos.rego +++ b/gke-policies-v2/policy/node_pool_use_cos.rego @@ -30,18 +30,20 @@ # version: "1.4" # id: "5.5.1" # dataSource: gke - package gke.policy.node_pool_use_cos +import future.keywords.if +import future.keywords.contains + import future.keywords.in default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { some pool not lower(input.data.gke.node_pools[pool].config.image_type) in {"cos", "cos_containerd"} not startswith(lower(input.data.gke.node_pools[pool].config.image_type), "windows") diff --git a/gke-policies-v2/policy/node_pool_use_cos_test.rego b/gke-policies-v2/policy/node_pool_use_cos_test.rego index 3d863c2b..483321a3 100644 --- a/gke-policies-v2/policy/node_pool_use_cos_test.rego +++ b/gke-policies-v2/policy/node_pool_use_cos_test.rego @@ -12,36 +12,39 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_use_cos +package gke.policy.node_pool_use_cos_test -test_node_pool_using_cos { - valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}}]}}} +import future.keywords.if +import data.gke.policy.node_pool_use_cos + +test_node_pool_using_cos if { + node_pool_use_cos.valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}}]}}} } -test_node_pool_using_cos_containerd { - valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos_containerd"}}]}}} +test_node_pool_using_cos_containerd if { + node_pool_use_cos.valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos_containerd"}}]}}} } -test_node_pool_using_cos_uppercase { - valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "COS"}}]}}} +test_node_pool_using_cos_uppercase if { + node_pool_use_cos.valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "COS"}}]}}} } -test_node_pool_using_cos_containerd_uppercase { - valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "COS_CONTAINERD"}}]}}} +test_node_pool_using_cos_containerd_uppercase if { + node_pool_use_cos.valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "COS_CONTAINERD"}}]}}} } -test_node_pool_not_using_cos { - not valid with input as {"data": {"gke": {"name": "cluster-not-cos", "node_pools": [{"name": "default", "config": {"image_type": "another_image"}}]}}} +test_node_pool_not_using_cos if { + not node_pool_use_cos.valid with input as {"data": {"gke": {"name": "cluster-not-cos", "node_pools": [{"name": "default", "config": {"image_type": "another_image"}}]}}} } -test_multiple_node_pool_using_cos_but_only_one { - not valid with input as {"data": {"gke": {"name": "cluster-not-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}},{"name": "custom", "config": {"image_type": "other"}}]}}} +test_multiple_node_pool_using_cos_but_only_one if { + not node_pool_use_cos.valid with input as {"data": {"gke": {"name": "cluster-not-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}},{"name": "custom", "config": {"image_type": "other"}}]}}} } -test_multiple_node_pool_using_cos { - valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}},{"name": "custom", "config": {"image_type": "cos_containerd"}}]}}} +test_multiple_node_pool_using_cos if { + node_pool_use_cos.valid with input as {"data": {"gke": {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}},{"name": "custom", "config": {"image_type": "cos_containerd"}}]}}} } -test_windows_node_pool { - valid with input as {"data": {"gke": {"name": "windows-server", "node_pools": [{"name": "default", "config": {"image_type": "windows-server"}}]}}} +test_windows_node_pool if { + node_pool_use_cos.valid with input as {"data": {"gke": {"name": "windows-server", "node_pools": [{"name": "default", "config": {"image_type": "windows-server"}}]}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/node_pool_version_skew.rego b/gke-policies-v2/policy/node_pool_version_skew.rego index 4fee1705..f89663fa 100644 --- a/gke-policies-v2/policy/node_pool_version_skew.rego +++ b/gke-policies-v2/policy/node_pool_version_skew.rego @@ -28,54 +28,57 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/upgrading-a-cluster#upgrading-nodes # sccCategory: NODEPOOL_VERSION_SKEW_UNSUPPORTED # dataSource: gke - package gke.policy.node_pool_version_skew +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false expr := `^([0-9]+)\.([0-9]+)\.([0-9]+)(-.+)*$` -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.current_master_version msg := "Control plane version is undefined" } -violation[msg] { - some node_pool - not input.data.gke.node_pools[node_pool].version - msg := sprintf("Node pool %q version is undefined", [input.data.gke.node_pools[node_pool].name]) +violation contains msg if { + some pool in input.data.gke.node_pools + not pool.version + msg := sprintf("Node pool %q version is undefined", [pool.name]) } -violation[msg] { +violation contains msg if { master_ver := regex.find_all_string_submatch_n(expr, input.data.gke.current_master_version, 1) count(master_ver) == 0 msg := sprintf("Control plane version %q does not match version regex", [input.data.gke.current_master_version]) } -violation[msg] { - some node_pool - node_pool_ver := regex.find_all_string_submatch_n(expr, input.data.gke.node_pools[node_pool].version, 1) +violation contains msg if { + some pool in input.data.gke.node_pools + node_pool_ver := regex.find_all_string_submatch_n(expr, pool.version, 1) count(node_pool_ver) == 0 - msg := sprintf("Node pool %q version %q does not match version regex", [input.data.gke.node_pools[node_pool].name, input.data.gke.node_pools[node_pool].version]) + msg := sprintf("Node pool %q version %q does not match version regex", [pool.name, pool.version]) } -violation[msg] { +violation contains msg if { master_ver := regex.find_all_string_submatch_n(expr, input.data.gke.current_master_version, 1) - some node_pool - node_pool_ver := regex.find_all_string_submatch_n(expr, input.data.gke.node_pools[node_pool].version, 1) + some pool in input.data.gke.node_pools + node_pool_ver := regex.find_all_string_submatch_n(expr, pool.version, 1) master_ver[0][1] != node_pool_ver[0][1] - msg := sprintf("Node pool %q and control plane major versions differ", [input.data.gke.node_pools[node_pool].name]) + msg := sprintf("Node pool %q and control plane major versions differ", [pool.name]) } -violation[msg] { +violation contains msg if { master_ver := regex.find_all_string_submatch_n(expr, input.data.gke.current_master_version, 1) - some node_pool - node_pool_ver := regex.find_all_string_submatch_n(expr, input.data.gke.node_pools[node_pool].version, 1) + some pool in input.data.gke.node_pools + node_pool_ver := regex.find_all_string_submatch_n(expr, pool.version, 1) minor_diff := to_number(master_ver[0][2]) - to_number(node_pool_ver[0][2]) abs(minor_diff) > 2 - msg := sprintf("Node pool %q and control plane minor versions difference is greater than 2", [input.data.gke.node_pools[node_pool].name]) + msg := sprintf("Node pool %q and control plane minor versions difference is greater than 2", [pool.name]) } diff --git a/gke-policies-v2/policy/node_pool_version_skew_test.rego b/gke-policies-v2/policy/node_pool_version_skew_test.rego index 8684c2f8..85649e63 100644 --- a/gke-policies-v2/policy/node_pool_version_skew_test.rego +++ b/gke-policies-v2/policy/node_pool_version_skew_test.rego @@ -12,32 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_version_skew +package gke.policy.node_pool_version_skew_test -test_empty_master_version { - not valid with input as {"data": {"gke": {"name":"cluster","node_pools":[{"name":"default-pool","version":"1.22.1-gke.600"}]}}} +import future.keywords.if +import data.gke.policy.node_pool_version_skew + +test_empty_master_version if { + not node_pool_version_skew.valid with input as {"data": {"gke": {"name":"cluster","node_pools":[{"name":"default-pool","version":"1.22.1-gke.600"}]}}} } -test_empty_nodepool_version { - not valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool"}]}}} +test_empty_nodepool_version if { + not node_pool_version_skew.valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool"}]}}} } -test_invalid_master_version { - not valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.22.A","node_pools":[{"name":"default-pool","version":"1.22.1-gke.600"}]}}} +test_invalid_master_version if { + not node_pool_version_skew.valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.22.A","node_pools":[{"name":"default-pool","version":"1.22.1-gke.600"}]}}} } -test_invalid_nodepool_version { - not valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool","version":"1.22"}]}}} +test_invalid_nodepool_version if { + not node_pool_version_skew.valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool","version":"1.22"}]}}} } -test_different_major { - not valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool","version":"2.22.1-gke.200"}]}}} +test_different_major if { + not node_pool_version_skew.valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool","version":"2.22.1-gke.200"}]}}} } -test_greater_minor { - not valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.24.10-gke.200","node_pools":[{"name":"default-pool","version":"1.21.5-gke.200"}]}}} +test_greater_minor if { + not node_pool_version_skew.valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.24.10-gke.200","node_pools":[{"name":"default-pool","version":"1.21.5-gke.200"}]}}} } -test_good_minor { - valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.24.10-gke.200","node_pools":[{"name":"default-pool","version":"1.22.5-gke.200"}]}}} +test_good_minor if { + node_pool_version_skew.valid with input as {"data": {"gke": {"name":"cluster","current_master_version":"1.24.10-gke.200","node_pools":[{"name":"default-pool","version":"1.22.5-gke.200"}]}}} } diff --git a/gke-policies-v2/policy/node_rbac_security_group.rego b/gke-policies-v2/policy/node_rbac_security_group.rego index e33f63e4..39ff37c7 100644 --- a/gke-policies-v2/policy/node_rbac_security_group.rego +++ b/gke-policies-v2/policy/node_rbac_security_group.rego @@ -31,16 +31,18 @@ # version: "1.4" # id: "5.8.3" # dataSource: gke - package gke.policy.rbac_security_group_enabled +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.authenticator_groups_config.enabled msg := "Cluster is not configured with Google Groups for RBAC" } diff --git a/gke-policies-v2/policy/node_rbac_security_group_test.rego b/gke-policies-v2/policy/node_rbac_security_group_test.rego index bd4ab898..bef68a67 100644 --- a/gke-policies-v2/policy/node_rbac_security_group_test.rego +++ b/gke-policies-v2/policy/node_rbac_security_group_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.rbac_security_group_enabled +package gke.policy.rbac_security_group_enabled_test -test_rbac_group_enabled { - valid with input as {"data": {"gke": {"name": "cluster1", "authenticator_groups_config": {"enabled": true}}}} +import future.keywords.if +import data.gke.policy.rbac_security_group_enabled + +test_rbac_group_enabled if { + rbac_security_group_enabled.valid with input as {"data": {"gke": {"name": "cluster1", "authenticator_groups_config": {"enabled": true}}}} } -test_rbac_group_disabled { - not valid with input as {"data": {"gke": {"name": "cluster1", "authenticator_groups_config": {"enabled": false}}}} +test_rbac_group_disabled if { + not rbac_security_group_enabled.valid with input as {"data": {"gke": {"name": "cluster1", "authenticator_groups_config": {"enabled": false}}}} } -test_rbac_group_without_authenticator_group { - not valid with input as {"data": {"gke": {"name": "cluster1"}}} +test_rbac_group_without_authenticator_group if { + not rbac_security_group_enabled.valid with input as {"data": {"gke": {"name": "cluster1"}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/private_cluster.rego b/gke-policies-v2/policy/private_cluster.rego index fd92d0a0..6596238e 100644 --- a/gke-policies-v2/policy/private_cluster.rego +++ b/gke-policies-v2/policy/private_cluster.rego @@ -27,16 +27,18 @@ # version: "1.4" # id: "5.6.5" # dataSource: gke - package gke.policy.private_cluster +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.private_cluster_config.enable_private_nodes msg := "Cluster is not configured with private nodes" } diff --git a/gke-policies-v2/policy/private_cluster_test.rego b/gke-policies-v2/policy/private_cluster_test.rego index 3bf7c9ee..cc76eb8b 100644 --- a/gke-policies-v2/policy/private_cluster_test.rego +++ b/gke-policies-v2/policy/private_cluster_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.private_cluster +package gke.policy.private_cluster_test -test_private_nodes_enabled { - valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}}}} +import future.keywords.if +import data.gke.policy.private_cluster + +test_private_nodes_enabled if { + private_cluster.valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}}}} } -test_private_nodes_disabled { - not valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}}}} +test_private_nodes_disabled if { + not private_cluster.valid with input as {"data": {"gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}}}} } -test_private_cluster_config_missing { - not valid with input as {"data": {"gke": {"name": "test-cluster"}}} +test_private_cluster_config_missing if { + not private_cluster.valid with input as {"data": {"gke": {"name": "test-cluster"}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/secret_encryption.rego b/gke-policies-v2/policy/secret_encryption.rego index c0f6bab0..fee89d17 100644 --- a/gke-policies-v2/policy/secret_encryption.rego +++ b/gke-policies-v2/policy/secret_encryption.rego @@ -30,16 +30,18 @@ # version: "1.4" # id: "5.3.1" # dataSource: gke - package gke.policy.secret_encryption +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.data.gke.database_encryption.state != 1 msg := "Cluster is not configured with kubernetes secrets encryption" } diff --git a/gke-policies-v2/policy/secret_encryption_test.rego b/gke-policies-v2/policy/secret_encryption_test.rego index 82815d83..ae8121f5 100644 --- a/gke-policies-v2/policy/secret_encryption_test.rego +++ b/gke-policies-v2/policy/secret_encryption_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.secret_encryption +package gke.policy.secret_encryption_test -test_enabled_encryption { - valid with input as {"data": {"gke": {"name": "cluster-1", "database_encryption": {"state": 1}}}} +import future.keywords.if +import data.gke.policy.secret_encryption + +test_enabled_encryption if { + secret_encryption.valid with input as {"data": {"gke": {"name": "cluster-1", "database_encryption": {"state": 1}}}} } -test_disabled_encryption { - not valid with input as {"data": {"gke": {"name": "cluster-1", "database_encryption": {"state": 2}}}} +test_disabled_encryption if { + not secret_encryption.valid with input as {"data": {"gke": {"name": "cluster-1", "database_encryption": {"state": 2}}}} } diff --git a/gke-policies-v2/policy/shielded_nodes.rego b/gke-policies-v2/policy/shielded_nodes.rego index 3a0bfece..ed26dbd7 100644 --- a/gke-policies-v2/policy/shielded_nodes.rego +++ b/gke-policies-v2/policy/shielded_nodes.rego @@ -28,16 +28,18 @@ # version: "1.4" # id: "5.5.5" # dataSource: gke - package gke.policy.shielded_nodes +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.shielded_nodes.enabled = true msg := "Cluster is not configured with shielded nodes" } diff --git a/gke-policies-v2/policy/shielded_nodes_test.rego b/gke-policies-v2/policy/shielded_nodes_test.rego index ffc8040d..7e852469 100644 --- a/gke-policies-v2/policy/shielded_nodes_test.rego +++ b/gke-policies-v2/policy/shielded_nodes_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.shielded_nodes +package gke.policy.shielded_nodes_test -test_enabled_shielded_nodes { - valid with input as {"data": {"gke": {"name": "test-cluster", "shielded_nodes": { "enabled": true }}}} +import future.keywords.if +import data.gke.policy.shielded_nodes + +test_enabled_shielded_nodes if { + shielded_nodes.valid with input as {"data": {"gke": {"name": "test-cluster", "shielded_nodes": { "enabled": true }}}} } -test_disabled_shielded_nodes { - not valid with input as {"data": {"gke": {"name": "test-cluster", "shielded_nodes": {}}}} +test_disabled_shielded_nodes if { + not shielded_nodes.valid with input as {"data": {"gke": {"name": "test-cluster", "shielded_nodes": {}}}} } diff --git a/gke-policies-v2/policy/vpc_native_cluster.rego b/gke-policies-v2/policy/vpc_native_cluster.rego index f9a6a91d..f119641d 100644 --- a/gke-policies-v2/policy/vpc_native_cluster.rego +++ b/gke-policies-v2/policy/vpc_native_cluster.rego @@ -28,22 +28,25 @@ # version: "1.4" # id: "5.6.2" # dataSource: gke - package gke.policy.vpc_native_cluster +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - some pool - not input.data.gke.node_pools[pool].network_config.pod_ipv4_cidr_block - msg := sprintf("Nodepool %q is not configured with use VPC-native routing", [input.data.gke.node_pools[pool].name]) +violation contains msg if { + some pool in input.data.gke.node_pools + not pool.network_config.pod_ipv4_cidr_block + msg := sprintf("Nodepool %q is not configured with use VPC-native routing", [pool.name]) } -violation[msg] { +violation contains msg if { not input.data.gke.ip_allocation_policy.use_ip_aliases msg := "Cluster is not configured with VPC-native routing" } diff --git a/gke-policies-v2/policy/vpc_native_cluster_test.rego b/gke-policies-v2/policy/vpc_native_cluster_test.rego index 19949cbe..af3ca847 100644 --- a/gke-policies-v2/policy/vpc_native_cluster_test.rego +++ b/gke-policies-v2/policy/vpc_native_cluster_test.rego @@ -12,20 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.vpc_native_cluster +package gke.policy.vpc_native_cluster_test -test_vpc_native_cluster_with_pods_range { - valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": true}, "node_pools": [{"name": "default", "network_config": { "pod_range": "gke-cluster-1-vpc-pods-273c12cd", "pod_ipv4_cidr_block": "10.48.0.0/14" }, "management": {"auto_repair": true, "auto_upgrade": true }}]}}} +import future.keywords.if +import data.gke.policy.vpc_native_cluster + +test_vpc_native_cluster_with_pods_range if { + vpc_native_cluster.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": true}, "node_pools": [{"name": "default", "network_config": { "pod_range": "gke-cluster-1-vpc-pods-273c12cd", "pod_ipv4_cidr_block": "10.48.0.0/14" }, "management": {"auto_repair": true, "auto_upgrade": true }}]}}} } -test_vpc_native_cluster_without_pods_range { - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} +test_vpc_native_cluster_without_pods_range if { + not vpc_native_cluster.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]}}} } -test_vpc_native_cluster_using_ip_aliases { - valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": true}}}} +test_vpc_native_cluster_using_ip_aliases if { + vpc_native_cluster.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": true}}}} } -test_vpc_native_cluster_not_using_ip_aliases { - not valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": false}}}} +test_vpc_native_cluster_not_using_ip_aliases if { + not vpc_native_cluster.valid with input as {"data": {"gke": {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": false}}}} } \ No newline at end of file diff --git a/gke-policies-v2/policy/workload_identity.rego b/gke-policies-v2/policy/workload_identity.rego index 34114b37..28f61f7c 100644 --- a/gke-policies-v2/policy/workload_identity.rego +++ b/gke-policies-v2/policy/workload_identity.rego @@ -30,16 +30,18 @@ # version: "1.4" # id: "5.2.2" # dataSource: gke - package gke.policy.workload_identity +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.data.gke.workload_identity_config.workload_pool msg := "Cluster is not configured with Workload Identity" } diff --git a/gke-policies-v2/policy/workload_identity_test.rego b/gke-policies-v2/policy/workload_identity_test.rego index 109a6dd1..49de58f8 100644 --- a/gke-policies-v2/policy/workload_identity_test.rego +++ b/gke-policies-v2/policy/workload_identity_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.workload_identity +package gke.policy.workload_identity_test -test_enabled_workload_identity { - valid with input as {"data": {"gke": {"name": "test-cluster", "workload_identity_config": { "workload_pool": "foo_pool.svc.id.goog" }}}} +import future.keywords.if +import data.gke.policy.workload_identity + +test_enabled_workload_identity if { + workload_identity.valid with input as {"data": {"gke": {"name": "test-cluster", "workload_identity_config": { "workload_pool": "foo_pool.svc.id.goog" }}}} } -test_disabled_workload_identity { - not valid with input as {"data": {"gke": {"name": "test-cluster"}}} +test_disabled_workload_identity if { + not workload_identity.valid with input as {"data": {"gke": {"name": "test-cluster"}}} } diff --git a/gke-policies-v2/rule/cluster/location.rego b/gke-policies-v2/rule/cluster/location.rego index f3d80dad..97d938b3 100644 --- a/gke-policies-v2/rule/cluster/location.rego +++ b/gke-policies-v2/rule/cluster/location.rego @@ -14,10 +14,12 @@ package gke.rule.cluster.location -regional(location) { +import future.keywords.if + +regional(location) if { regex.match(`^[^-]+-[^-]+$`, location) } -zonal(location) { +zonal(location) if { regex.match(`^[^-]+-[^-]+-[^-]+$`, location) } diff --git a/gke-policies-v2/rule/cluster/location_test.rego b/gke-policies-v2/rule/cluster/location_test.rego index 68c13b08..1a743c78 100644 --- a/gke-policies-v2/rule/cluster/location_test.rego +++ b/gke-policies-v2/rule/cluster/location_test.rego @@ -12,22 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.rule.cluster.location +package gke.rule.cluster.location_test -test_regional { - location := "europe-central2" - regional(location) - not zonal(location) +import future.keywords.if +import data.gke.rule.cluster.location + +test_regional if { + loc := "europe-central2" + location.regional(loc) + not location.zonal(loc) } -test_zonal { - location := "europe-central2-a" - zonal(location) - not regional(location) +test_zonal if { + loc := "europe-central2-a" + location.zonal(loc) + not location.regional(loc) } -test_not_regional_nor_zonal { - location := "test" - not regional(location) - not zonal(location) +test_not_regional_nor_zonal if { + loc := "test" + not location.regional(loc) + not location.zonal(loc) } diff --git a/gke-policies-v2/rule/nodepool/location.rego b/gke-policies-v2/rule/nodepool/location.rego index 491d99d8..deef525f 100644 --- a/gke-policies-v2/rule/nodepool/location.rego +++ b/gke-policies-v2/rule/nodepool/location.rego @@ -14,12 +14,15 @@ package gke.rule.nodepool.location -regional[nodepool] { +import future.keywords.if +import future.keywords.contains + +regional contains nodepool if { nodepool := input.data.gke.node_pools[_] count(nodepool.locations) > 1 } -zonal[nodepool] { +zonal contains nodepool if { nodepool := input.data.gke.node_pools[_] count(nodepool.locations) < 2 } \ No newline at end of file diff --git a/gke-policies-v2/scalability/limit_namespaces.rego b/gke-policies-v2/scalability/limit_namespaces.rego index 962b00f2..862cfcaa 100644 --- a/gke-policies-v2/scalability/limit_namespaces.rego +++ b/gke-policies-v2/scalability/limit_namespaces.rego @@ -24,18 +24,20 @@ # externalURI: https://github.com/kubernetes/community/blob/master/sig-scalability/configs-and-limits/thresholds.md # sccCategory: NAMESPACES_LIMIT # dataSource: monitoring - package gke.scalability.namespaces +import future.keywords.if +import future.keywords.contains + default valid := false default limit := 10000 default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { warn_limit := round(limit * threshold * 0.01) input.data.monitoring.namespaces.scalar > warn_limit msg := sprintf("Total number of namespaces %d has reached warning level %d (limit is %d)", [input.data.monitoring.namespaces.scalar, warn_limit, limit]) diff --git a/gke-policies-v2/scalability/limit_secrets_encryption.rego b/gke-policies-v2/scalability/limit_secrets_encryption.rego index dda17c96..2fec4014 100644 --- a/gke-policies-v2/scalability/limit_secrets_encryption.rego +++ b/gke-policies-v2/scalability/limit_secrets_encryption.rego @@ -24,18 +24,20 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/planning-large-clusters#limits-best-practices-large-scale-clusters # sccCategory: SECRETS_WITH_ENCRYPTION_LIMIT # dataSource: monitoring, gke - package gke.scalability.secrets_with_enc +import future.keywords.if +import future.keywords.contains + default valid := false default limit := 30000 default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { warn_limit := round(limit * threshold * 0.01) secrets_cnt := input.data.monitoring.secrets.scalar input.data.gke.database_encryption.state == 1 diff --git a/gke-policies-v2/scalability/limit_secrets_encryption_test.rego b/gke-policies-v2/scalability/limit_secrets_encryption_test.rego index 65880bd7..1ca5cc45 100644 --- a/gke-policies-v2/scalability/limit_secrets_encryption_test.rego +++ b/gke-policies-v2/scalability/limit_secrets_encryption_test.rego @@ -12,18 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.secrets_with_enc +package gke.scalability.secrets_with_enc_test -test_secrets_with_enc_above_warn_limit { - not valid with input as {"data": {"monitoring": {"secrets": { "name": "secrets", "scalar": 28000}}, "gke": {"name": "cluster-1", "database_encryption": {"state": 1}}}} +import future.keywords.if +import data.gke.scalability.secrets_with_enc + +test_secrets_with_enc_above_warn_limit if { + not secrets_with_enc.valid with input as {"data": {"monitoring": {"secrets": { "name": "secrets", "scalar": 28000}}, "gke": {"name": "cluster-1", "database_encryption": {"state": 1}}}} } -test_secrets_with_enc_below_warn_limit { - valid with input as {"data": {"monitoring": {"secrets": { "name": "secrets", "scalar": 307}}, "gke": {"name": "cluster-1", "database_encryption": {"state": 1}}}} +test_secrets_with_enc_below_warn_limit if { + secrets_with_enc.valid with input as {"data": {"monitoring": {"secrets": { "name": "secrets", "scalar": 307}}, "gke": {"name": "cluster-1", "database_encryption": {"state": 1}}}} } -test_secrets_no_enc_above_warn_limit { - valid with input as {"data": {"monitoring": {"secrets": { "name": "secrets", "scalar": 28000}}, "gke": {"name": "cluster-1", "database_encryption": {"state": 2}}}} +test_secrets_no_enc_above_warn_limit if { + secrets_with_enc.valid with input as {"data": {"monitoring": {"secrets": { "name": "secrets", "scalar": 28000}}, "gke": {"name": "cluster-1", "database_encryption": {"state": 2}}}} } diff --git a/gke-policies-v2/scalability/limit_services.rego b/gke-policies-v2/scalability/limit_services.rego index 08d59c2b..dcac8c83 100644 --- a/gke-policies-v2/scalability/limit_services.rego +++ b/gke-policies-v2/scalability/limit_services.rego @@ -25,18 +25,20 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/planning-large-clusters#limits-best-practices-large-scale-clusters # sccCategory: SERVICES_LIMIT # dataSource: monitoring - package gke.scalability.services +import future.keywords.if +import future.keywords.contains + default valid := false default limit := 10000 default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { warn_limit := round(limit * threshold * 0.01) input.data.monitoring.services.scalar > warn_limit msg := sprintf("Total number of services %d has reached warning level %d (limit is %d)", [input.data.monitoring.services.scalar, warn_limit, limit]) diff --git a/gke-policies-v2/scalability/limit_services_per_ns.rego b/gke-policies-v2/scalability/limit_services_per_ns.rego index fac6e461..1bae9ada 100644 --- a/gke-policies-v2/scalability/limit_services_per_ns.rego +++ b/gke-policies-v2/scalability/limit_services_per_ns.rego @@ -25,18 +25,20 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/planning-large-clusters#limits-best-practices-large-scale-clusters # sccCategory: SERVICES_PER_NS_LIMIT # dataSource: monitoring - package gke.scalability.services_per_ns +import future.keywords.if +import future.keywords.contains + default valid := false default limit := 5000 default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { warn_limit := round(limit * threshold * 0.01) some namespace srv_cnt := input.data.monitoring.services_per_ns.vector[namespace] diff --git a/gke-policies-v2/scalability/limit_services_per_ns_test.rego b/gke-policies-v2/scalability/limit_services_per_ns_test.rego index 4bacc1a5..78ac5665 100644 --- a/gke-policies-v2/scalability/limit_services_per_ns_test.rego +++ b/gke-policies-v2/scalability/limit_services_per_ns_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.services_per_ns +package gke.scalability.services_per_ns_test -test_services_per_ns_above_warn_limit { - not valid with input as {"data": {"monitoring": {"services_per_ns": { "name": "services_per_ns", "vector": {"default": 1, "kube-system": 4, "demo-test":4523}}}}} +import future.keywords.if +import data.gke.scalability.services_per_ns + +test_services_per_ns_above_warn_limit if { + not services_per_ns.valid with input as {"data": {"monitoring": {"services_per_ns": { "name": "services_per_ns", "vector": {"default": 1, "kube-system": 4, "demo-test":4523}}}}} } -test_services_per_ns_below_warn_limit { - valid with input as {"data": {"monitoring": {"services_per_ns": { "name": "services_per_ns", "vector": {"default": 1, "kube-system": 4, "demo-test":453}}}}} +test_services_per_ns_below_warn_limit if { + services_per_ns.valid with input as {"data": {"monitoring": {"services_per_ns": { "name": "services_per_ns", "vector": {"default": 1, "kube-system": 4, "demo-test":453}}}}} } diff --git a/gke-policies-v2/scalability/limit_services_test.rego b/gke-policies-v2/scalability/limit_services_test.rego index 92a015ff..8f76fa7d 100644 --- a/gke-policies-v2/scalability/limit_services_test.rego +++ b/gke-policies-v2/scalability/limit_services_test.rego @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.services +package gke.scalability.services_test -test_services_above_warn_limit { - not valid with input as {"data": {"monitoring": {"services": { "name": "services", "scalar": 8840}}}} +import future.keywords.if +import data.gke.scalability.services + +test_services_above_warn_limit if { + not services.valid with input as {"data": {"monitoring": {"services": { "name": "services", "scalar": 8840}}}} } -test_services_below_warn_limit { - valid with input as {"data": {"monitoring": {"services": { "name": "services", "scalar": 6400}}}} +test_services_below_warn_limit if { + services.valid with input as {"data": {"monitoring": {"services": { "name": "services", "scalar": 6400}}}} } diff --git a/gke-policies-v2/scalability/limits_containers.rego b/gke-policies-v2/scalability/limits_containers.rego index 35173312..6fa62c2f 100644 --- a/gke-policies-v2/scalability/limits_containers.rego +++ b/gke-policies-v2/scalability/limits_containers.rego @@ -24,26 +24,28 @@ # externalURI: https://cloud.google.com/kubernetes-engine/quotas # sccCategory: CONTAINERS_LIMIT # dataSource: monitoring, gke - package gke.scalability.containers +import future.keywords.if +import future.keywords.contains + default valid := false default limit_standard := 400000 default limit_autopilot := 24000 default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { warn_limit := round(limit_standard * threshold * 0.01) not input.data.gke.autopilot.enabled input.data.monitoring.containers.scalar > warn_limit msg := sprintf("Total number of containers %d has reached warning level %d (limit is %d for standard clusters)", [input.data.monitoring.containers.scalar, warn_limit, limit_standard]) } -violation[msg] { +violation contains msg if { warn_limit := round(limit_autopilot * threshold * 0.01) input.data.gke.autopilot.enabled input.data.monitoring.containers.scalar > warn_limit diff --git a/gke-policies-v2/scalability/limits_containers_test.rego b/gke-policies-v2/scalability/limits_containers_test.rego index 2a20f1c7..7191a108 100644 --- a/gke-policies-v2/scalability/limits_containers_test.rego +++ b/gke-policies-v2/scalability/limits_containers_test.rego @@ -12,20 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.containers +package gke.scalability.containers_test -test_containers_above_warn_limit_std { - not valid with input as {"data": {"monitoring": {"containers": { "name": "containers", "scalar": 352000}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": false}}}} +import future.keywords.if +import data.gke.scalability.containers + +test_containers_above_warn_limit_std if { + not containers.valid with input as {"data": {"monitoring": {"containers": { "name": "containers", "scalar": 352000}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": false}}}} } -test_containers_below_warn_limit_std { - valid with input as {"data": {"monitoring": {"containers": { "name": "containers", "scalar": 121303}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": false}}}} +test_containers_below_warn_limit_std if { + containers.valid with input as {"data": {"monitoring": {"containers": { "name": "containers", "scalar": 121303}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": false}}}} } -test_containers_above_warn_limit_auto { - not valid with input as {"data": {"monitoring": {"containers": { "name": "containers", "scalar": 121303}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} +test_containers_above_warn_limit_auto if { + not containers.valid with input as {"data": {"monitoring": {"containers": { "name": "containers", "scalar": 121303}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} } -test_containers_below_warn_limit_auto { - valid with input as {"data": {"monitoring": {"containers": { "name": "containers", "scalar": 18403}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} +test_containers_below_warn_limit_auto if { + containers.valid with input as {"data": {"monitoring": {"containers": { "name": "containers", "scalar": 18403}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} } diff --git a/gke-policies-v2/scalability/limits_hpas.rego b/gke-policies-v2/scalability/limits_hpas.rego index c29b00ad..53b382da 100644 --- a/gke-policies-v2/scalability/limits_hpas.rego +++ b/gke-policies-v2/scalability/limits_hpas.rego @@ -24,18 +24,20 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/horizontalpodautoscaler#scalability # sccCategory: HPAS_OPTIMAL_LIMIT # dataSource: monitoring - package gke.scalability.hpas +import future.keywords.if +import future.keywords.contains + default valid := false default limit := 300 default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { warn_limit := round(limit * threshold * 0.01) hpas := input.data.monitoring.hpas.scalar hpas > warn_limit diff --git a/gke-policies-v2/scalability/limits_hpas_test.rego b/gke-policies-v2/scalability/limits_hpas_test.rego index f56bc237..63dbde0e 100644 --- a/gke-policies-v2/scalability/limits_hpas_test.rego +++ b/gke-policies-v2/scalability/limits_hpas_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.hpas +package gke.scalability.hpas_test -test_hpas_above_warn_limit { - not valid with input as {"data": {"monitoring": {"hpas": { "name": "hpas", "scalar": 254}}}} +import future.keywords.if +import data.gke.scalability.hpas + +test_hpas_above_warn_limit if { + not hpas.valid with input as {"data": {"monitoring": {"hpas": { "name": "hpas", "scalar": 254}}}} } -test_hpas_below_warn_limit { - valid with input as {"data": {"monitoring": {"hpas": { "name": "hpas", "scalar": 180}}}} +test_hpas_below_warn_limit if { + hpas.valid with input as {"data": {"monitoring": {"hpas": { "name": "hpas", "scalar": 180}}}} } diff --git a/gke-policies-v2/scalability/limits_nodes.rego b/gke-policies-v2/scalability/limits_nodes.rego index 35124e75..cb5953ab 100644 --- a/gke-policies-v2/scalability/limits_nodes.rego +++ b/gke-policies-v2/scalability/limits_nodes.rego @@ -26,9 +26,11 @@ # externalURI: https://cloud.google.com/kubernetes-engine/quotas # sccCategory: NODES_LIMIT # dataSource: monitoring, gke - package gke.scalability.nodes +import future.keywords.if +import future.keywords.contains + default valid := false default private_nodes_limit := 15000 @@ -36,11 +38,11 @@ default public_nodes_limit := 5000 default autopilot_nodes_limit := 1000 default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { warn_limit := round(private_nodes_limit * threshold * 0.01) nodes := input.data.monitoring.nodes.scalar is_private := input.data.gke.private_cluster_config.enable_private_nodes @@ -49,7 +51,7 @@ violation[msg] { msg := sprintf("nodes found: %d higher than the limit for private clusters: %d", [nodes, warn_limit]) } -violation[msg] { +violation contains msg if { warn_limit := round(public_nodes_limit * threshold * 0.01) nodes := input.data.monitoring.nodes.scalar is_private := input.data.gke.private_cluster_config.enable_private_nodes @@ -58,7 +60,7 @@ violation[msg] { msg := sprintf("nodes found: %d higher than the limit for non private clusters: %d", [nodes, warn_limit]) } -violation[msg] { +violation contains msg if { warn_limit := round(autopilot_nodes_limit * threshold * 0.01) nodes := input.data.monitoring.nodes.scalar input.data.gke.autopilot.enabled diff --git a/gke-policies-v2/scalability/limits_nodes_per_pool_zone.rego b/gke-policies-v2/scalability/limits_nodes_per_pool_zone.rego index 452c8bb8..1c4e23ba 100644 --- a/gke-policies-v2/scalability/limits_nodes_per_pool_zone.rego +++ b/gke-policies-v2/scalability/limits_nodes_per_pool_zone.rego @@ -25,18 +25,20 @@ # externalURI: https://cloud.google.com/kubernetes-engine/quotas # sccCategory: NODES_PER_POOL_ZONE_LIMIT # dataSource: monitoring, gke - package gke.scalability.nodes_per_pool_zone +import future.keywords.if +import future.keywords.contains + default valid := false default limit := 1000 default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { warn_limit := round(limit * threshold * 0.01) some nodepool, zone not input.data.gke.autopilot.enabled diff --git a/gke-policies-v2/scalability/limits_nodes_per_pool_zone_test.rego b/gke-policies-v2/scalability/limits_nodes_per_pool_zone_test.rego index 371e14ee..ba6af2a1 100644 --- a/gke-policies-v2/scalability/limits_nodes_per_pool_zone_test.rego +++ b/gke-policies-v2/scalability/limits_nodes_per_pool_zone_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.nodes_per_pool_zone +package gke.scalability.nodes_per_pool_zone_test -test_nodes_per_pool_zone_above_warn_limit { - not valid with input as {"data": {"monitoring": {"nodes_per_pool_zone": { "name": "nodes_per_pool_zone", "vector": {"default-pool": {"europe-central2-a": 642, "europe-central2-b": 734,"europe-central2-a": 821}}}}}} +import future.keywords.if +import data.gke.scalability.nodes_per_pool_zone + +test_nodes_per_pool_zone_above_warn_limit if { + not nodes_per_pool_zone.valid with input as {"data": {"monitoring": {"nodes_per_pool_zone": { "name": "nodes_per_pool_zone", "vector": {"default-pool": {"europe-central2-a": 642, "europe-central2-b": 734,"europe-central2-a": 821}}}}}} } -test_nodes_per_pool_zone_below_warn_limit { - valid with input as {"data": {"monitoring": {"nodes_per_pool_zone": { "name": "nodes_per_pool_zone", "vector": {"default-pool": {"europe-central2-a": 642, "europe-central2-b": 734,"europe-central2-a": 690}}}}}} +test_nodes_per_pool_zone_below_warn_limit if { + nodes_per_pool_zone.valid with input as {"data": {"monitoring": {"nodes_per_pool_zone": { "name": "nodes_per_pool_zone", "vector": {"default-pool": {"europe-central2-a": 642, "europe-central2-b": 734,"europe-central2-a": 690}}}}}} } -test_nodes_per_pool_zone_autopilot { - valid with input as {"data": {"monitoring": {"nodes_per_pool_zone": { "name": "nodes_per_pool_zone", "vector": {"default-pool": {"europe-central2-a": 642, "europe-central2-b": 734,"europe-central2-a": 821}}}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} +test_nodes_per_pool_zone_autopilot if { + nodes_per_pool_zone.valid with input as {"data": {"monitoring": {"nodes_per_pool_zone": { "name": "nodes_per_pool_zone", "vector": {"default-pool": {"europe-central2-a": 642, "europe-central2-b": 734,"europe-central2-a": 821}}}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} } diff --git a/gke-policies-v2/scalability/limits_nodes_test.rego b/gke-policies-v2/scalability/limits_nodes_test.rego index 33e92f79..5161c3d0 100644 --- a/gke-policies-v2/scalability/limits_nodes_test.rego +++ b/gke-policies-v2/scalability/limits_nodes_test.rego @@ -12,28 +12,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.nodes +package gke.scalability.nodes_test -test_nodes_nbr_not_exceeded_for_private { - valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 1}}, "gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}}}} +import future.keywords.if +import data.gke.scalability.nodes + +test_nodes_nbr_not_exceeded_for_private if { + nodes.valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 1}}, "gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}}}} } -test_nodes_nbr_exceeded_for_private { - not valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 16000}}, "gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}}}} +test_nodes_nbr_exceeded_for_private if { + not nodes.valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 16000}}, "gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}}}} } -test_nodes_nbr_not_exceeded_for_public { - valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 1}}, "gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}}}} +test_nodes_nbr_not_exceeded_for_public if { + nodes.valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 1}}, "gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}}}} } -test_nodes_nbr_exceeded_for_public { - not valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 6000}}, "gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}}}} +test_nodes_nbr_exceeded_for_public if { + not nodes.valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 6000}}, "gke": {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}}}} } -test_nodes_nbr_not_exceeded_for_autopilot { - valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 30}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} +test_nodes_nbr_not_exceeded_for_autopilot if { + nodes.valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 30}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} } -test_nodes_nbr_exceeded_for_autopilot { - not valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 900}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} +test_nodes_nbr_exceeded_for_autopilot if { + not nodes.valid with input as {"data": {"monitoring": {"nodes": { "name": "nodes", "scalar": 900}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} } \ No newline at end of file diff --git a/gke-policies-v2/scalability/limits_pods.rego b/gke-policies-v2/scalability/limits_pods.rego index b1abbaec..aa9e76f6 100644 --- a/gke-policies-v2/scalability/limits_pods.rego +++ b/gke-policies-v2/scalability/limits_pods.rego @@ -24,26 +24,28 @@ # externalURI: https://cloud.google.com/kubernetes-engine/quotas # sccCategory: PODS_LIMIT # dataSource: monitoring - package gke.scalability.pods +import future.keywords.if +import future.keywords.contains + default valid := false default limit_standard := 200000 default limit_autopilot := 12000 default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { warn_limit := round(limit_standard * threshold * 0.01) not input.data.gke.autopilot.enabled input.data.monitoring.pods.scalar > warn_limit msg := sprintf("Total number of pods %d has reached warning level %d (limit is %d for standard clusters)", [input.data.monitoring.pods.scalar, warn_limit, limit_standard]) } -violation[msg] { +violation contains msg if { warn_limit := round(limit_autopilot * threshold * 0.01) input.data.gke.autopilot input.data.gke.autopilot.enabled diff --git a/gke-policies-v2/scalability/limits_pods_per_node.rego b/gke-policies-v2/scalability/limits_pods_per_node.rego index 9c0cb4ef..859968a0 100644 --- a/gke-policies-v2/scalability/limits_pods_per_node.rego +++ b/gke-policies-v2/scalability/limits_pods_per_node.rego @@ -25,17 +25,19 @@ # externalURI: https://cloud.google.com/kubernetes-engine/quotas # sccCategory: PODS_PER_NODE_LIMIT # dataSource: monitoring, gke - package gke.scalability.pods_per_node +import future.keywords.if +import future.keywords.contains + default valid := false default threshold := 80 -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { some nodepool, node pods_cnt := input.data.monitoring.pods_per_node.vector[nodepool][node] pooldata := [object | object := input.data.gke.node_pools[_]; object.name == nodepool] diff --git a/gke-policies-v2/scalability/limits_pods_per_node_test.rego b/gke-policies-v2/scalability/limits_pods_per_node_test.rego index 55bdc533..dc5bfb8a 100644 --- a/gke-policies-v2/scalability/limits_pods_per_node_test.rego +++ b/gke-policies-v2/scalability/limits_pods_per_node_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.pods_per_node +package gke.scalability.pods_per_node_test -test_pods_per_node_above_warn_limit { - not valid with input as {"data": {"monitoring": {"pods_per_node": { "name": "pods_per_node", "vector": {"default-pool": {"gke-cluster-demo-default-pool-0767d05a-lkkp": 46, "gke-cluster-demo-default-pool-0f74dd4f-3zsv": 97}}}}, "gke":{"node_pools":[{"name": "default-pool", "max_pods_constraint":{"max_pods_per_node":110}}]}}} +import future.keywords.if +import data.gke.scalability.pods_per_node + +test_pods_per_node_above_warn_limit if { + not pods_per_node.valid with input as {"data": {"monitoring": {"pods_per_node": { "name": "pods_per_node", "vector": {"default-pool": {"gke-cluster-demo-default-pool-0767d05a-lkkp": 46, "gke-cluster-demo-default-pool-0f74dd4f-3zsv": 97}}}}, "gke":{"node_pools":[{"name": "default-pool", "max_pods_constraint":{"max_pods_per_node":110}}]}}} } -test_pods_per_node_below_warn_limit { - valid with input as {"data": {"monitoring": {"pods_per_node": { "name": "pods_per_node", "vector": {"default-pool": {"gke-cluster-demo-default-pool-0767d05a-lkkp": 46, "gke-cluster-demo-default-pool-0f74dd4f-3zsv": 32}}}}, "gke":{"node_pools":[{"name": "default-pool", "max_pods_constraint":{"max_pods_per_node":64}}]}}} +test_pods_per_node_below_warn_limit if { + pods_per_node.valid with input as {"data": {"monitoring": {"pods_per_node": { "name": "pods_per_node", "vector": {"default-pool": {"gke-cluster-demo-default-pool-0767d05a-lkkp": 46, "gke-cluster-demo-default-pool-0f74dd4f-3zsv": 32}}}}, "gke":{"node_pools":[{"name": "default-pool", "max_pods_constraint":{"max_pods_per_node":64}}]}}} } diff --git a/gke-policies-v2/scalability/limits_pods_test.rego b/gke-policies-v2/scalability/limits_pods_test.rego index fbd826a9..fe6d6c7a 100644 --- a/gke-policies-v2/scalability/limits_pods_test.rego +++ b/gke-policies-v2/scalability/limits_pods_test.rego @@ -11,21 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +package gke.scalability.pods_test -package gke.scalability.pods +import future.keywords.if +import data.gke.scalability.pods -test_pods_above_warn_limit_std { - not valid with input as {"data": {"monitoring": {"pods": { "name": "pods", "scalar": 160424}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": false}}}} +test_pods_above_warn_limit_std if { + not pods.valid with input as {"data": {"monitoring": {"pods": { "name": "pods", "scalar": 160424}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": false}}}} } -test_pods_below_warn_limit_std { - valid with input as {"data": {"monitoring": {"pods": { "name": "pods", "scalar": 98504}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": false}}}} +test_pods_below_warn_limit_std if { + pods.valid with input as {"data": {"monitoring": {"pods": { "name": "pods", "scalar": 98504}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": false}}}} } -test_pods_above_warn_limit_auto { - not valid with input as {"data": {"monitoring": {"pods": { "name": "pods", "scalar": 98504}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} +test_pods_above_warn_limit_auto if { + not pods.valid with input as {"data": {"monitoring": {"pods": { "name": "pods", "scalar": 98504}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} } -test_pods_below_warn_limit_auto { - valid with input as {"data": {"monitoring": {"pods": { "name": "pods", "scalar": 5050}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} +test_pods_below_warn_limit_auto if { + pods.valid with input as {"data": {"monitoring": {"pods": { "name": "pods", "scalar": 5050}}, "gke": {"name": "test-cluster", "autopilot": {"enabled": true}}}} } diff --git a/gke-policies/policy/autopilot_cluster.rego b/gke-policies/policy/autopilot_cluster.rego index 929552d3..b3475394 100644 --- a/gke-policies/policy/autopilot_cluster.rego +++ b/gke-policies/policy/autopilot_cluster.rego @@ -13,26 +13,29 @@ # limitations under the License. # METADATA -# title: GKE Autopilot mode +# title: Use GKE Autopilot mode # description: GKE Autopilot mode is the recommended way to operate a GKE cluster # custom: # group: Management # severity: Medium # recommendation: > -# Autopilot mode (recommended): GKE manages the underlying infrastructure such as node configuration, +# Autopilot mode (recommended): GKE manages the underlying infrastructure such as node configuration, # autoscaling, auto-upgrades, baseline security configurations, and baseline networking configuration. # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/choose-cluster-mode # sccCategory: AUTOPILOT_DISABLED - +# dataSource: gke package gke.policy.autopilot -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.autopilot.enabled - msg := "GKE Autopilot mode is the recommended way to operate a GKE cluster" + msg := "Cluster is not using Autopilot mode" } diff --git a/gke-policies/policy/autopilot_cluster_test.rego b/gke-policies/policy/autopilot_cluster_test.rego index cb7fd555..1ee37f75 100644 --- a/gke-policies/policy/autopilot_cluster_test.rego +++ b/gke-policies/policy/autopilot_cluster_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.autopilot +package gke.policy.autopilot_test -test_autopilot_mode_enabled { - valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}, "autopilot": {"enabled": true}} +import future.keywords.if +import data.gke.policy.autopilot + +test_autopilot_mode_enabled if { + autopilot.valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}, "autopilot": {"enabled": true}} } -test_autopilot_mode_disabled { - not valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}, "autopilot": {}} +test_autopilot_mode_disabled if { + not autopilot.valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}, "autopilot": {}} } diff --git a/gke-policies/policy/cluster_binary_authorization.rego b/gke-policies/policy/cluster_binary_authorization.rego index 07fb9578..2106a505 100644 --- a/gke-policies/policy/cluster_binary_authorization.rego +++ b/gke-policies/policy/cluster_binary_authorization.rego @@ -27,16 +27,19 @@ # cis: # version: "1.4" # id: "5.10.5" - +# dataSource: gke package gke.policy.cluster_binary_authorization -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.binary_authorization.enabled - msg := "GKE cluster has not configured binary authorization policies" + msg := "Cluster is not configured with binary authorization" } diff --git a/gke-policies/policy/cluster_binary_authorization_test.rego b/gke-policies/policy/cluster_binary_authorization_test.rego index f960b4f2..a49e6d50 100644 --- a/gke-policies/policy/cluster_binary_authorization_test.rego +++ b/gke-policies/policy/cluster_binary_authorization_test.rego @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_binary_authorization +package gke.policy.cluster_binary_authorization_test -test_cluster_not_configured_binary_authorization { - not valid with input as {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} +import future.keywords.if +import data.gke.policy.cluster_binary_authorization + +test_cluster_not_configured_binary_authorization if { + not cluster_binary_authorization.valid with input as {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} } -test_cluster_configured_binary_authorization { - valid with input as { +test_cluster_configured_binary_authorization if { + cluster_binary_authorization.valid with input as { "name": "cluster-not-repairing", "binary_authorization": { "enabled": true diff --git a/gke-policies/policy/cluster_enable_security_posture.rego b/gke-policies/policy/cluster_enable_security_posture.rego index d368c644..dacb91ef 100644 --- a/gke-policies/policy/cluster_enable_security_posture.rego +++ b/gke-policies/policy/cluster_enable_security_posture.rego @@ -27,16 +27,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/about-security-posture-dashboard # sccCategory: SECURITY_POSTURE_DISABLED # dataSource: gke - package gke.policy.cluster_security_posture +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.security_posture_config.mode == 2 - msg := "GKE cluster has not enabled Security Posture" + msg := "Cluster is not configure with Security Posture" } diff --git a/gke-policies/policy/cluster_enable_security_posture_test.rego b/gke-policies/policy/cluster_enable_security_posture_test.rego index 6d187322..087c8ad2 100644 --- a/gke-policies/policy/cluster_enable_security_posture_test.rego +++ b/gke-policies/policy/cluster_enable_security_posture_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_security_posture +package gke.policy.cluster_security_posture_test -test_cluster_enabled_security_posture { - valid with input as { +import future.keywords.if +import data.gke.policy.cluster_security_posture + +test_cluster_enabled_security_posture if { + cluster_security_posture.valid with input as { "name": "cluster-test", "security_posture_config": { "mode": 2, @@ -24,8 +27,8 @@ test_cluster_enabled_security_posture { } } -test_cluster_unknown_security_posture { - not valid with input as { +test_cluster_unknown_security_posture if { + not cluster_security_posture.valid with input as { "name": "cluster-test", "security_posture_config": { "mode": 0, @@ -34,8 +37,8 @@ test_cluster_unknown_security_posture { } } -test_cluster_disabled_security_posture { - not valid with input as { +test_cluster_disabled_security_posture if { + not cluster_security_posture.valid with input as { "name": "cluster-test", "security_posture_config": { "mode": 1, @@ -44,8 +47,8 @@ test_cluster_disabled_security_posture { } } -test_cluster_missing_security_posture { - not valid with input as { +test_cluster_missing_security_posture if { + not cluster_security_posture.valid with input as { "name": "cluster-test" } } diff --git a/gke-policies/policy/cluster_enable_workload_scanning.rego b/gke-policies/policy/cluster_enable_workload_scanning.rego index 2405ff4d..dde9fb41 100644 --- a/gke-policies/policy/cluster_enable_workload_scanning.rego +++ b/gke-policies/policy/cluster_enable_workload_scanning.rego @@ -29,16 +29,18 @@ # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/about-workload-vulnerability-scanning # sccCategory: WORKLOAD_SCANNING_DISABLED # dataSource: gke - package gke.policy.cluster_workload_scanning +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.security_posture_config.vulnerability_mode == 2 - msg := "GKE cluster has not configured workload vulnerability scanning" + msg := "Cluster is not configured with workload vulnerability scanning" } diff --git a/gke-policies/policy/cluster_enable_workload_scanning_test.rego b/gke-policies/policy/cluster_enable_workload_scanning_test.rego index 745d1042..2471edfc 100644 --- a/gke-policies/policy/cluster_enable_workload_scanning_test.rego +++ b/gke-policies/policy/cluster_enable_workload_scanning_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_workload_scanning +package gke.policy.cluster_workload_scanning_test -test_cluster_enabled_workload_scanning { - valid with input as { +import future.keywords.if +import data.gke.policy.cluster_workload_scanning + +test_cluster_enabled_workload_scanning if { + cluster_workload_scanning.valid with input as { "name": "cluster-test", "security_posture_config": { "mode": 2, @@ -24,8 +27,8 @@ test_cluster_enabled_workload_scanning { } } -test_cluster_disabled_workload_scanning { - not valid with input as { +test_cluster_disabled_workload_scanning if { + not cluster_workload_scanning.valid with input as { "name": "cluster-test", "security_posture_config": { "mode": 1, @@ -34,8 +37,8 @@ test_cluster_disabled_workload_scanning { } } -test_cluster_unknown_workload_scanning { - not valid with input as { +test_cluster_unknown_workload_scanning if { + not cluster_workload_scanning.valid with input as { "name": "cluster-test", "security_posture_config": { "mode": 1, @@ -44,8 +47,8 @@ test_cluster_unknown_workload_scanning { } } -test_cluster_missing_security_posture { - not valid with input as { +test_cluster_missing_security_posture if { + not cluster_workload_scanning.valid with input as { "name": "cluster-test" } } diff --git a/gke-policies/policy/cluster_gce_csi_driver.rego b/gke-policies/policy/cluster_gce_csi_driver.rego index c50d703d..afbe1892 100644 --- a/gke-policies/policy/cluster_gce_csi_driver.rego +++ b/gke-policies/policy/cluster_gce_csi_driver.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use Compute Engine persistent disk CSI driver +# title: Enable Compute Engine persistent disk CSI driver # description: Automatic deployment and management of the Compute Engine persistent disk CSI driver. The driver provides support for features like customer managed encryption keys or volume snapshots. # custom: # group: Management @@ -24,16 +24,19 @@ # Select the "Enable Compute Engine Persistent Disk CSI Driver " checkbox and click "Save changes". # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/persistent-volumes/gce-pd-csi-driver # sccCategory: GCE_CSI_DRIVER_DISABLED - +# dataSource: gke package gke.policy.cluster_gce_csi_driver -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.addons_config.gce_persistent_disk_csi_driver_config.enabled - msg := "GKE cluster has not configured GCE persistent disk CSI driver" + msg := "Cluster is not configured with GCE persistent disk CSI driver" } diff --git a/gke-policies/policy/cluster_gce_csi_driver_test.rego b/gke-policies/policy/cluster_gce_csi_driver_test.rego index 4944f9c0..0f062767 100644 --- a/gke-policies/policy/cluster_gce_csi_driver_test.rego +++ b/gke-policies/policy/cluster_gce_csi_driver_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_gce_csi_driver +package gke.policy.cluster_gce_csi_driver_test -test_gce_csi_driver_addon_empty { - not valid with input as {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{}}} +import future.keywords.if +import data.gke.policy.cluster_gce_csi_driver + +test_gce_csi_driver_addon_empty if { + not cluster_gce_csi_driver.valid with input as {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{}}} } -test_gce_csi_driver_addon_empty { - not valid with input as {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{"enabled":false}}} +test_gce_csi_driver_addon_disabled if { + not cluster_gce_csi_driver.valid with input as {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{"enabled":false}}} } -test_gce_csi_driver_addon_enabled { - valid with input as {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{"enabled":true}}} -} \ No newline at end of file +test_gce_csi_driver_addon_enabled if { + cluster_gce_csi_driver.valid with input as {"name":"cluster-demo","addons_config":{"gce_persistent_disk_csi_driver_config":{"enabled":true}}} +} diff --git a/gke-policies/policy/cluster_maintenance_window.rego b/gke-policies/policy/cluster_maintenance_window.rego index bead4d2b..28a77ee0 100644 --- a/gke-policies/policy/cluster_maintenance_window.rego +++ b/gke-policies/policy/cluster_maintenance_window.rego @@ -13,8 +13,8 @@ # limitations under the License. # METADATA -# title: Schedule maintenance windows and exclusions -# description: GKE cluster should schedule maintenance windows and exclusions to upgrade predictability and to align updates with off-peak business hours. +# title: Enable maintenance windows +# description: GKE cluster should use maintenance windows and exclusions to upgrade predictability and to align updates with off-peak business hours. # custom: # group: Management # severity: Medium @@ -26,16 +26,19 @@ # Click "Save changes" once done. # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/maintenance-windows-and-exclusions # sccCategory: MAINTENANCE_WINDOWS_DISABLED - +# dataSource: gke package gke.policy.cluster_maintenance_window -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.maintenance_policy.window.Policy - msg := "GKE cluster has not configured maintenance window" + msg := "GKE cluster is not configured with maintenance window" } diff --git a/gke-policies/policy/cluster_maintenance_window_test.rego b/gke-policies/policy/cluster_maintenance_window_test.rego index cea79f4a..453576e6 100644 --- a/gke-policies/policy/cluster_maintenance_window_test.rego +++ b/gke-policies/policy/cluster_maintenance_window_test.rego @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_maintenance_window +package gke.policy.cluster_maintenance_window_test -test_cluster_not_configured_maintenance_window { - not valid with input as {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} +import future.keywords.if +import data.gke.policy.cluster_maintenance_window + +test_cluster_not_configured_maintenance_window if { + not cluster_maintenance_window.valid with input as {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} } -test_cluster_configured_to_maintanace_window { - valid with input as { +test_cluster_configured_to_maintanace_window if { + cluster_maintenance_window.valid with input as { "name": "cluster-not-repairing", "maintenance_policy": { "window": { diff --git a/gke-policies/policy/cluster_receive_updates.rego b/gke-policies/policy/cluster_receive_updates.rego index 137e8f80..c3951f37 100644 --- a/gke-policies/policy/cluster_receive_updates.rego +++ b/gke-policies/policy/cluster_receive_updates.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Receive updates about new GKE versions +# title: Enable GKE upgrade notifications # description: GKE cluster should be proactively receive updates about GKE upgrades and GKE versions # custom: # group: Management @@ -26,19 +26,24 @@ # Click "Save changes" once done. # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-notifications # sccCategory: UPDATE_NOTIFICATIONS_DISABLED - +# dataSource: gke package gke.policy.cluster_receive_updates -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.notification_config.pubsub.enabled - msg := "Pub/Sub notifications are not enabled" -} { + msg := "Cluster is not configured with upgrade notifications" +} + +violation contains msg if { not input.notification_config.pubsub.topic - msg := "Pub/Sub topic is not configured" + msg := "Cluster is not configured with upgrade notifications topic" } diff --git a/gke-policies/policy/cluster_receive_updates_test.rego b/gke-policies/policy/cluster_receive_updates_test.rego index 5aca8ce4..66a42751 100644 --- a/gke-policies/policy/cluster_receive_updates_test.rego +++ b/gke-policies/policy/cluster_receive_updates_test.rego @@ -12,20 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_receive_updates +package gke.policy.cluster_receive_updates_test -test_cluster_with_topic_configured { - valid with input as {"name": "cluster-not-repairing", "release_channel": {}, "notification_config": { "pubsub": { "enabled": true, "topic": "projects/project-id/topics/cluster-updates-topic"}}} +import future.keywords.if +import data.gke.policy.cluster_receive_updates + +test_cluster_with_topic_configured if { + cluster_receive_updates.valid with input as {"name": "cluster-not-repairing", "release_channel": {}, "notification_config": { "pubsub": { "enabled": true, "topic": "projects/project-id/topics/cluster-updates-topic"}}} } -test_cluster_without_notification_config { - not valid with input as {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} +test_cluster_without_notification_config if { + not cluster_receive_updates.valid with input as {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} } -test_cluster_without_topic_specified { - not valid with input as {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "notification_config": { "pubsub": { "enabled": true }}} +test_cluster_without_topic_specified if { + not cluster_receive_updates.valid with input as {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "notification_config": { "pubsub": { "enabled": true }}} } -test_cluster_without_pubsub_enabled { - not valid with input as {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "notification_config": { "pubsub": { "enabled": false, "topic": "projects/project-id/topics/cluster-updates-topic"}}} +test_cluster_without_pubsub_enabled if { + not cluster_receive_updates.valid with input as {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "notification_config": { "pubsub": { "enabled": false, "topic": "projects/project-id/topics/cluster-updates-topic"}}} } \ No newline at end of file diff --git a/gke-policies/policy/cluster_release_channels.rego b/gke-policies/policy/cluster_release_channels.rego index 927f024f..83b73528 100644 --- a/gke-policies/policy/cluster_release_channels.rego +++ b/gke-policies/policy/cluster_release_channels.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Enrollment in Release Channels +# title: Enroll cluster in Release Channels # description: GKE cluster should be enrolled in release channels # custom: # group: Security @@ -28,16 +28,19 @@ # cis: # version: "1.4" # id: "5.5.4" - +# dataSource: gke package gke.policy.cluster_release_channels -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.release_channel.channel - msg := "GKE cluster is not enrolled in release channel" + msg := "Cluster is not enrolled in any release channel" } diff --git a/gke-policies/policy/cluster_release_channels_test.rego b/gke-policies/policy/cluster_release_channels_test.rego index 3175daf9..2dd42989 100644 --- a/gke-policies/policy/cluster_release_channels_test.rego +++ b/gke-policies/policy/cluster_release_channels_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.cluster_release_channels +package gke.policy.cluster_release_channels_test -test_cluster_not_enrolled_to_release_channels { - not valid with input as {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} +import future.keywords.if +import data.gke.policy.cluster_release_channels + +test_cluster_not_enrolled_to_release_channels if { + not cluster_release_channels.valid with input as {"name": "cluster-not-repairing", "release_channel": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} } -test_cluster_enrolled_to_release_channels { - valid with input as {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} +test_cluster_enrolled_to_release_channels if { + cluster_release_channels.valid with input as {"name": "cluster-not-repairing", "release_channel": {"channel": 2 }, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} } \ No newline at end of file diff --git a/gke-policies/policy/control_plane_access.rego b/gke-policies/policy/control_plane_access.rego index 402e5682..009ebd84 100644 --- a/gke-policies/policy/control_plane_access.rego +++ b/gke-policies/policy/control_plane_access.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control Plane endpoint access +# title: Limit Control Plane endpoint access # description: Control Plane endpoint access should be limited to authorized networks only # custom: # group: Security @@ -29,26 +29,29 @@ # cis: # version: "1.4" # id: "5.6.3" - +# dataSource: gke package gke.policy.control_plane_access -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.master_authorized_networks_config.enabled - msg := "GKE cluster has not enabled master authorized networks configuration" + msg := "Cluster is not configured with master authorized networks" } -violation[msg] { +violation contains msg if { not input.master_authorized_networks_config.cidr_blocks - msg := "GKE cluster's master authorized networks has no CIDR blocks element" + msg := "Cluster is not configured with master authorized networks CIDRs" } -violation[msg] { +violation contains msg if { count(input.master_authorized_networks_config.cidr_blocks) < 1 - msg := "GKE cluster's master authorized networks has no CIDR blocks defined" + msg := "Cluster is not configured with master authorized networks CIDRs" } diff --git a/gke-policies/policy/control_plane_access_test.rego b/gke-policies/policy/control_plane_access_test.rego index 6dd8570a..666df379 100644 --- a/gke-policies/policy/control_plane_access_test.rego +++ b/gke-policies/policy/control_plane_access_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_access +package gke.policy.control_plane_access_test -test_authorized_networks_enabled { - valid with input as {"name":"test-cluster","master_authorized_networks_config": { +import future.keywords.if +import data.gke.policy.control_plane_access + +test_authorized_networks_enabled if { + control_plane_access.valid with input as {"name":"test-cluster","master_authorized_networks_config": { "enabled":true, "cidr_blocks":[ {"display_name":"Test Block","cidr_block":"192.168.0.0./16"} @@ -23,20 +26,20 @@ test_authorized_networks_enabled { }} } -test_authoized_networks_missing{ - not valid with input as {"name":"test-cluster"} +test_authoized_networks_missing if { + not control_plane_access.valid with input as {"name":"test-cluster"} } -test_authorized_networks_disabled{ - not valid with input as {"name":"test-cluster","master_authorized_networks_config": {"enabled":false}} +test_authorized_networks_disabled if { + not control_plane_access.valid with input as {"name":"test-cluster","master_authorized_networks_config": {"enabled":false}} } -test_authorized_networks_no_cidrs_block{ - not valid with input as {"name":"test-cluster","master_authorized_networks_config": {"enabled":true}} +test_authorized_networks_no_cidrs_block if { + not control_plane_access.valid with input as {"name":"test-cluster","master_authorized_networks_config": {"enabled":true}} } -test_authorized_networks_empty_cidrs_block{ - not valid with input as {"name":"test-cluster","master_authorized_networks_config": { +test_authorized_networks_empty_cidrs_block if { + not control_plane_access.valid with input as {"name":"test-cluster","master_authorized_networks_config": { "enabled":true, "cidr_blocks":[] }} diff --git a/gke-policies/policy/control_plane_disable_cert_authentication.rego b/gke-policies/policy/control_plane_disable_cert_authentication.rego index c6089214..8c0d8b67 100644 --- a/gke-policies/policy/control_plane_disable_cert_authentication.rego +++ b/gke-policies/policy/control_plane_disable_cert_authentication.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control plane user certificate authentication +# title: Disable control plane certificate authentication # description: >- # Disable Client Certificates, which require certificate rotation, for authentication. Instead, # use another authentication method like OpenID Connect. @@ -29,21 +29,23 @@ # version: "1.4" # id: "5.8.2" # dataSource: gke - package gke.policy.control_plane_certificate_auth +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.master_auth.client_certificate - msg := "The GKE cluster authentication should not be configured with a client certificate" + msg := "Cluster authentication is configured with a client certificate" } -violation[msg] { +violation contains msg if { input.master_auth.client_key - msg := "The GKE cluster authentication should not be configured with a client key" + msg := "Cluster authentication is configured with a client key" } diff --git a/gke-policies/policy/control_plane_disable_cert_authentication_test.rego b/gke-policies/policy/control_plane_disable_cert_authentication_test.rego index 018c45c2..8b72a84c 100644 --- a/gke-policies/policy/control_plane_disable_cert_authentication_test.rego +++ b/gke-policies/policy/control_plane_disable_cert_authentication_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_certificate_auth +package gke.policy.control_plane_certificate_auth_test -test_cluster_without_client_certificate { - valid with input as { +import future.keywords.if +import data.gke.policy.control_plane_certificate_auth + +test_cluster_without_client_certificate if { + control_plane_certificate_auth.valid with input as { "name": "cluster-test", "master_auth": { "cluster_ca_certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMVENDQXBXZ0F3SUJBZ0lSQUpIeTI1V..." @@ -23,8 +26,8 @@ test_cluster_without_client_certificate { } } -test_cluster_client_certificate { - not valid with input as { +test_cluster_client_certificate if { + not control_plane_certificate_auth.valid with input as { "name": "cluster-test", "master_auth": { "cluster_ca_certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMVENDQXBXZ0F3SUJBZ0lSQUpIeTI1V...", diff --git a/gke-policies/policy/control_plane_disable_legacy_authorization.rego b/gke-policies/policy/control_plane_disable_legacy_authorization.rego index 71786dff..a56d640d 100644 --- a/gke-policies/policy/control_plane_disable_legacy_authorization.rego +++ b/gke-policies/policy/control_plane_disable_legacy_authorization.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE RBAC authorization +# title: Disable legacy ABAC authorization # description: GKE cluster should use RBAC instead of legacy ABAC authorization # custom: # group: Security @@ -27,16 +27,19 @@ # cis: # version: "1.4" # id: "5.8.4" - +# dataSource: gke package gke.policy.disable_legacy_authorization -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.legacy_abac.enabled - msg := "The GKE cluster is configured to use legacy ABAC authorization mechanism" + msg := "Cluster authorization is configured with legacy ABAC" } diff --git a/gke-policies/policy/control_plane_disable_legacy_authorization_test.rego b/gke-policies/policy/control_plane_disable_legacy_authorization_test.rego index 5fdbc035..462b3c85 100644 --- a/gke-policies/policy/control_plane_disable_legacy_authorization_test.rego +++ b/gke-policies/policy/control_plane_disable_legacy_authorization_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.disable_legacy_authorization +package gke.policy.disable_legacy_authorization_test -test_enabled_legacy_authorization { - not valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": true}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true}}]} +import future.keywords.if +import data.gke.policy.disable_legacy_authorization + +test_enabled_legacy_authorization if { + not disable_legacy_authorization.valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": true}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true}}]} } -test_disabled_legacy_authorization { - valid with input as {"name": "cluster-1", "legacy_abac": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true}}]} +test_disabled_legacy_authorization if { + disable_legacy_authorization.valid with input as {"name": "cluster-1", "legacy_abac": {}, "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true}}]} } diff --git a/gke-policies/policy/control_plane_disable_password_authentication.rego b/gke-policies/policy/control_plane_disable_password_authentication.rego index f54d18f8..464ad848 100644 --- a/gke-policies/policy/control_plane_disable_password_authentication.rego +++ b/gke-policies/policy/control_plane_disable_password_authentication.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control plane user basic authentication +# title: Disalbe control plane basic authentication # description: >- # Disable Basic Authentication (basic auth) for API server authentication as it uses static # passwords which need to be rotated. @@ -30,21 +30,23 @@ # version: "1.4" # id: "5.8.1" # dataSource: gke - package gke.policy.control_plane_basic_auth +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.master_auth.password - msg := "The GKE cluster authentication should not be configured with a client password" + msg := "Cluster authentication is configured with a client password" } -violation[msg] { +violation contains msg if { input.master_auth.username - msg := "The GKE cluster authentication should not be configured with a client username" + msg := "Cluster authentication is configured with a client username" } diff --git a/gke-policies/policy/control_plane_disable_password_authentication_test.rego b/gke-policies/policy/control_plane_disable_password_authentication_test.rego index 8dafcfd3..af23dc82 100644 --- a/gke-policies/policy/control_plane_disable_password_authentication_test.rego +++ b/gke-policies/policy/control_plane_disable_password_authentication_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_basic_auth +package gke.policy.control_plane_basic_auth_test -test_cluster_without_basic_auth { - valid with input as { +import future.keywords.if +import data.gke.policy.control_plane_basic_auth + +test_cluster_without_basic_auth if { + control_plane_basic_auth.valid with input as { "name": "cluster-test", "master_auth": { "cluster_ca_certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMVENDQXBXZ0F3SUJBZ0lSQUpIeTI1V..." @@ -23,8 +26,8 @@ test_cluster_without_basic_auth { } } -test_cluster_with_basic_auth { - not valid with input as { +test_cluster_with_basic_auth if { + not control_plane_basic_auth.valid with input as { "name": "cluster-test", "master_auth": { "cluster_ca_certificate": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVMVENDQXBXZ0F3SUJBZ0lSQUpIeTI1V...", diff --git a/gke-policies/policy/control_plane_endpoint.rego b/gke-policies/policy/control_plane_endpoint.rego index 40e0eaa7..b9055475 100644 --- a/gke-policies/policy/control_plane_endpoint.rego +++ b/gke-policies/policy/control_plane_endpoint.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control Plane endpoint visibility +# title: Enable control plane private endpoint # description: Control Plane endpoint should be locked from external access # custom: # group: Security @@ -27,16 +27,19 @@ # cis: # version: "1.4" # id: "5.6.4" - +# dataSource: gke package gke.policy.control_plane_endpoint -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.private_cluster_config.enable_private_endpoint - msg := "GKE cluster has not enabled private endpoint" + msg := "Cluster is not configured with private endpoint" } diff --git a/gke-policies/policy/control_plane_endpoint_test.rego b/gke-policies/policy/control_plane_endpoint_test.rego index c5e7b26a..6ae91bd6 100644 --- a/gke-policies/policy/control_plane_endpoint_test.rego +++ b/gke-policies/policy/control_plane_endpoint_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_endpoint +package gke.policy.control_plane_endpoint_test -test_private_endpoint_enabled { - valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_endpoint": true}} +import future.keywords.if +import data.gke.policy.control_plane_endpoint + +test_private_endpoint_enabled if { + control_plane_endpoint.valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_endpoint": true}} } -test_private_endpoint_disabled { - not valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_endpoint": false}} +test_private_endpoint_disabled if { + not control_plane_endpoint.valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_endpoint": false}} } -test_private_cluster_config_missing { - not valid with input as {"name": "test-cluster"} +test_private_cluster_config_missing if { + not control_plane_endpoint.valid with input as {"name": "test-cluster"} } diff --git a/gke-policies/policy/control_plane_redundancy.rego b/gke-policies/policy/control_plane_redundancy.rego index 26561cc0..4553d830 100644 --- a/gke-policies/policy/control_plane_redundancy.rego +++ b/gke-policies/policy/control_plane_redundancy.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Control Plane redundancy +# title: Ensure redundancy of the Control Plane # description: GKE cluster should be regional for maximum availability of control plane during upgrades and zonal outages # custom: # group: Availability @@ -23,23 +23,25 @@ # The cluster must be recreated, ensuring that regional location type is choosen. # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/regional-clusters # sccCategory: CONTROL_PLANE_ZONAL - +# dataSource: gke package gke.policy.control_plane_redundancy -import data.gke.rule.cluster.location.regional +import future.keywords.if +import future.keywords.contains +import data.gke.rule.cluster.location -default valid = false +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.location - msg := "Missing GKE cluster location object" + msg := "Cluster location infromation is missing" } -violation[msg] { - not regional(input.location) - msg := sprintf("Invalid GKE Control plane location %q (not regional)", [input.location]) +violation contains msg if { + not location.regional(input.location) + msg := sprintf("Cluster location %q is not regional", [input.location]) } diff --git a/gke-policies/policy/control_plane_redundancy_test.rego b/gke-policies/policy/control_plane_redundancy_test.rego index dceb43d7..bbec8581 100644 --- a/gke-policies/policy/control_plane_redundancy_test.rego +++ b/gke-policies/policy/control_plane_redundancy_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.control_plane_redundancy +package gke.policy.control_plane_redundancy_test -test_control_plane_regional_location { - valid with input as {"name": "test-cluster", "location": "europe-central2"} +import future.keywords.if +import data.gke.policy.control_plane_redundancy + +test_control_plane_regional_location if { + control_plane_redundancy.valid with input as {"name": "test-cluster", "location": "europe-central2"} } -test_control_plane_zonal_location { - not valid with input as {"name": "test-cluster", "location": "europe-central2-a"} +test_control_plane_zonal_location if { + not control_plane_redundancy.valid with input as {"name": "test-cluster", "location": "europe-central2-a"} } -test_control_plane_missing_location { - not valid with input as {"name": "test-cluster"} +test_control_plane_missing_location if { + not control_plane_redundancy.valid with input as {"name": "test-cluster"} } diff --git a/gke-policies/policy/ilb_subsetting.rego b/gke-policies/policy/ilb_subsetting.rego index 81e6b39c..1f3ae4e9 100644 --- a/gke-policies/policy/ilb_subsetting.rego +++ b/gke-policies/policy/ilb_subsetting.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE L4 ILB Subsetting +# title: Enable GKE L4 ILB Subsetting # description: GKE cluster should use GKE L4 ILB Subsetting if nodes > 250 # custom: # group: Scalability @@ -24,19 +24,20 @@ # Select the "Enable subsetting for L4 internal load balancers" checkbox and click "Save changes". # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/internal-load-balancing#subsetting # sccCategory: ILB_SUBSETTING_DISABLED - +# dataSource: gke package gke.policy.enable_ilb_subsetting -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.current_node_count > 250 not input.network_config.enable_l4ilb_subsetting = true - - msg := sprintf("The GKE cluster has %v nodes but is not configured to use L4 ILB Subsetting", [input.current_node_count]) - + msg := sprintf("Cluster has %v nodes and is not configured with L4 ILB Subsetting", [input.current_node_count]) } diff --git a/gke-policies/policy/ilb_subsetting_test.rego b/gke-policies/policy/ilb_subsetting_test.rego index a1a8c169..5e4ee599 100644 --- a/gke-policies/policy/ilb_subsetting_test.rego +++ b/gke-policies/policy/ilb_subsetting_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.enable_ilb_subsetting +package gke.policy.enable_ilb_subsetting_test -test_enabled_ilb_subsetting_high_nodes { - valid with input as {"name": "test-cluster", "current_node_count": 251, "network_config": { "enable_l4ilb_subsetting": true }} +import future.keywords.if +import data.gke.policy.enable_ilb_subsetting + +test_enabled_ilb_subsetting_high_nodes if { + enable_ilb_subsetting.valid with input as {"name": "test-cluster", "current_node_count": 251, "network_config": { "enable_l4ilb_subsetting": true }} } -test_disabled_ilb_subsetting_low_nodes { - valid with input as {"name": "test-cluster", "current_node_count": 3, "network_config": {}} +test_disabled_ilb_subsetting_low_nodes if { + enable_ilb_subsetting.valid with input as {"name": "test-cluster", "current_node_count": 3, "network_config": {}} } -test_disabled_ilb_subsetting_high_nodes { - not valid with input as {"name": "test-cluster", "current_node_count": 251, "network_config": {}} +test_disabled_ilb_subsetting_high_nodes if { + not enable_ilb_subsetting.valid with input as {"name": "test-cluster", "current_node_count": 251, "network_config": {}} } diff --git a/gke-policies/policy/intranode_visibility.rego b/gke-policies/policy/intranode_visibility.rego index 5859b8cd..739d1b76 100644 --- a/gke-policies/policy/intranode_visibility.rego +++ b/gke-policies/policy/intranode_visibility.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE intranode visibility +# title: Enable GKE intranode visibility # description: GKE cluster should have intranode visibility enabled # custom: # group: Security @@ -28,16 +28,18 @@ # version: "1.4" # id: "5.6.1" # dataSource: gke - package gke.policy.networkConfig +import future.keywords.if +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.networkConfig.enableIntraNodeVisibility = true - msg := "The GKE cluster does not have Intranode Visibility enabled" + msg := "Cluster is not configured with Intranode Visibility" } diff --git a/gke-policies/policy/intranode_visibility_test.rego b/gke-policies/policy/intranode_visibility_test.rego index ac974454..1f0670b2 100644 --- a/gke-policies/policy/intranode_visibility_test.rego +++ b/gke-policies/policy/intranode_visibility_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.networkConfig +package gke.policy.networkConfig_test -test_enabled_intranode_visibility { - valid with input as {"name": "test-cluster", "networkConfig": { "enableIntraNodeVisibility": true }} +import future.keywords.if +import data.gke.policy.networkConfig + +test_enabled_intranode_visibility if { + networkConfig.valid with input as {"name": "test-cluster", "networkConfig": { "enableIntraNodeVisibility": true }} } -test_disabled_intranode_visibility { - not valid with input as {"name": "test-cluster", "networkConfig": { "enableIntraNodeVisibility": false }} +test_disabled_intranode_visibility if { + not networkConfig.valid with input as {"name": "test-cluster", "networkConfig": { "enableIntraNodeVisibility": false }} } diff --git a/gke-policies/policy/monitoring_and_logging.rego b/gke-policies/policy/monitoring_and_logging.rego index 44e03abf..0e9bbc7e 100644 --- a/gke-policies/policy/monitoring_and_logging.rego +++ b/gke-policies/policy/monitoring_and_logging.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Cloud Monitoring and Logging +# title: Enable Cloud Monitoring and Logging # description: GKE cluster should use Cloud Logging and Monitoring # custom: # group: Maintenance @@ -31,23 +31,24 @@ # cis: # version: "1.4" # id: "5.7.1" - +# dataSource: gke package gke.policy.logging_and_monitoring -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.logging_config.component_config.enable_components - - msg := "The GKE cluster does not have Cloud Logging enabled" + msg := "Cluster is not configured with Cloud Logging" } -violation[msg] { +violation contains msg if { not input.monitoring_config.component_config.enable_components - - msg := "The GKE cluster does not have Cloud Monitoring enabled" + msg := "Cluster is not configured with Cloud Monitoring" } \ No newline at end of file diff --git a/gke-policies/policy/monitoring_and_logging_test.rego b/gke-policies/policy/monitoring_and_logging_test.rego index a75c92b8..1e1f9014 100644 --- a/gke-policies/policy/monitoring_and_logging_test.rego +++ b/gke-policies/policy/monitoring_and_logging_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.logging_and_monitoring +package gke.policy.logging_and_monitoring_test -test_enabled_logging_and_monitoring { - valid with input as { +import future.keywords.if +import data.gke.policy.logging_and_monitoring + +test_enabled_logging_and_monitoring if { + logging_and_monitoring.valid with input as { "name": "test-cluster", "logging_config": { "component_config": { @@ -31,8 +34,8 @@ test_enabled_logging_and_monitoring { } } -test_disabled_logging { - not valid with input as { +test_disabled_logging if { + not logging_and_monitoring.valid with input as { "name": "test-cluster", "logging_config": {"component_config": {}}, "monitoring_config": { @@ -43,8 +46,8 @@ test_disabled_logging { } } -test_disabled_monitoring { - not valid with input as { +test_disabled_monitoring if { + not logging_and_monitoring.valid with input as { "name": "test-cluster", "logging_config": { "component_config": { diff --git a/gke-policies/policy/nap_forbid_default_sa.rego b/gke-policies/policy/nap_forbid_default_sa.rego index d12ecce5..0809f950 100644 --- a/gke-policies/policy/nap_forbid_default_sa.rego +++ b/gke-policies/policy/nap_forbid_default_sa.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Forbid default Service Accounts in Node Auto-Provisioning +# title: Change default Service Accounts in Node Auto-Provisioning # description: Node Auto-Provisioning configuration should not allow default Service Accounts # custom: # group: Security @@ -28,18 +28,21 @@ # cis: # version: "1.4" # id: "5.2.1" - +# dataSource: gke package gke.policy.nap_forbid_default_sa -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.autopilot.enabled input.autoscaling.enable_node_autoprovisioning == true input.autoscaling.autoprovisioning_node_pool_defaults.service_account == "default" - msg := "GKE cluster Node Auto-Provisioning should have a dedicated Service Account configured" + msg := "Cluster is configured with default service account for Node Auto-Provisioning" } diff --git a/gke-policies/policy/nap_forbid_default_sa_test.rego b/gke-policies/policy/nap_forbid_default_sa_test.rego index c79e7c5c..1a28273d 100644 --- a/gke-policies/policy/nap_forbid_default_sa_test.rego +++ b/gke-policies/policy/nap_forbid_default_sa_test.rego @@ -12,20 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.nap_forbid_default_sa +package gke.policy.nap_forbid_default_sa_test -test_cluster_not_enabled_nap { - valid with input as {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}} +import future.keywords.if +import data.gke.policy.nap_forbid_default_sa + +test_cluster_not_enabled_nap if { + nap_forbid_default_sa.valid with input as {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}} } -test_cluster_enabled_nap_with_default_sa { - not valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "default"} }} +test_cluster_enabled_nap_with_default_sa if { + not nap_forbid_default_sa.valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "default"} }} } -test_cluster_enabled_nap_without_default_sa { - valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "dedicated-sa@project.iam.gserviceaccount.com"} }} +test_cluster_enabled_nap_without_default_sa if { + nap_forbid_default_sa.valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "dedicated-sa@project.iam.gserviceaccount.com"} }} } -test_cluster_autopilot_with_default { - valid with input as {"name": "cluster-autopilot", "autopilot": {"enabled": true}, "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "default"} }} +test_cluster_autopilot_with_default if { + nap_forbid_default_sa.valid with input as {"name": "cluster-autopilot", "autopilot": {"enabled": true}, "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": { "service_account": "default"} }} } diff --git a/gke-policies/policy/nap_forbid_single_zone.rego b/gke-policies/policy/nap_forbid_single_zone.rego index 0ad658e3..f45b00ad 100644 --- a/gke-policies/policy/nap_forbid_single_zone.rego +++ b/gke-policies/policy/nap_forbid_single_zone.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Ensure that node pool locations within Node Auto-Provisioning are covering more than one zone (or not enforced at all) +# title: Ensure redundancy of Node Auto-provisioning node pools # description: Node Auto-Provisioning configuration should cover more than one zone # custom: # group: Security @@ -24,17 +24,20 @@ # Under the "Node pool location", select multiple zone checkboxes. Click "Save changes" once done. # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/node-auto-provisioning#auto-provisioning_locations # sccCategory: NAP_ZONAL - +# dataSource: gke package gke.policy.nap_forbid_single_zone -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.autoscaling.enable_node_autoprovisioning == true count(input.autoscaling.autoprovisioning_locations) == 1 - msg := "GKE cluster Node Auto-Provisioning configuration should cover more than one zone" + msg := "Cluster is not configured with multiple zones for NAP node pools" } diff --git a/gke-policies/policy/nap_forbid_single_zone_test.rego b/gke-policies/policy/nap_forbid_single_zone_test.rego index 7757a877..89ee91fc 100644 --- a/gke-policies/policy/nap_forbid_single_zone_test.rego +++ b/gke-policies/policy/nap_forbid_single_zone_test.rego @@ -12,18 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.nap_forbid_single_zone +package gke.policy.nap_forbid_single_zone_test -test_cluster_not_enabled_nap { - valid with input as {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}} +import future.keywords.if +import data.gke.policy.nap_forbid_single_zone + +test_cluster_not_enabled_nap if { + nap_forbid_single_zone.valid with input as {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}} } -test_cluster_enabled_nap_without_enabled_autoprovisioning_locations_not_enabled { - valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true}} +test_cluster_enabled_nap_without_enabled_autoprovisioning_locations_not_enabled if { + nap_forbid_single_zone.valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true}} } -test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_multiple { - valid with input as { +test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_multiple if { + nap_forbid_single_zone.valid with input as { "name": "cluster-with-nap", "autoscaling": { "enable_node_autoprovisioning": true, @@ -35,8 +38,8 @@ test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_multiple { } } -test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_single { - not valid with input as { +test_cluster_enabled_nap_with_enabled_autoprovisioning_locations_single if { + not nap_forbid_single_zone.valid with input as { "name": "cluster-with-nap", "autoscaling": { "enable_node_autoprovisioning": true, diff --git a/gke-policies/policy/nap_integrity_monitoring.rego b/gke-policies/policy/nap_integrity_monitoring.rego index fb371034..7861a23c 100644 --- a/gke-policies/policy/nap_integrity_monitoring.rego +++ b/gke-policies/policy/nap_integrity_monitoring.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Ensure that nodes in Node Auto-Provisioning node pools will use integrity monitoring +# title: Enable integrity monitoring for Node Auto-Provisioning node pools # description: Nodes in Node Auto-Provisioning should use integrity monitoring # custom: # group: Security @@ -32,18 +32,20 @@ # cis: # version: "1.4" # id: "5.5.6" - +# dataSource: gke package gke.policy.nap_integrity_monitoring -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.autoscaling.enable_node_autoprovisioning == true input.autoscaling.autoprovisioning_node_pool_defaults.shielded_instance_config.enable_integrity_monitoring == false - - msg := "GKE cluster Node Auto-Provisioning configuration use integrity monitoring" + msg := "Cluster is not configured with integrity monitoring for NAP node pools" } diff --git a/gke-policies/policy/nap_integrity_monitoring_test.rego b/gke-policies/policy/nap_integrity_monitoring_test.rego index 267d8010..8eb337b9 100644 --- a/gke-policies/policy/nap_integrity_monitoring_test.rego +++ b/gke-policies/policy/nap_integrity_monitoring_test.rego @@ -12,14 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.nap_integrity_monitoring +package gke.policy.nap_integrity_monitoring_test -test_cluster_not_enabled_nap { - valid with input as {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}} +import future.keywords.if +import data.gke.policy.nap_integrity_monitoring + +test_cluster_not_enabled_nap if { + nap_integrity_monitoring.valid with input as {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}} } -test_cluster_enabled_nap_with_integrity_monitoring_enabled { - valid with input as { +test_cluster_enabled_nap_with_integrity_monitoring_enabled if { + nap_integrity_monitoring.valid with input as { "name": "cluster-with-nap", "autoscaling": { "enable_node_autoprovisioning": true, @@ -30,8 +33,8 @@ test_cluster_enabled_nap_with_integrity_monitoring_enabled { } } -test_cluster_enabled_nap_without_integrity_monitoring_enabled { - not valid with input as { +test_cluster_enabled_nap_without_integrity_monitoring_enabled if { + not nap_integrity_monitoring.valid with input as { "name": "cluster-with-nap", "autoscaling": { "enable_node_autoprovisioning": true, diff --git a/gke-policies/policy/nap_use_cos.rego b/gke-policies/policy/nap_use_cos.rego index 1f9afb82..1cba57ad 100644 --- a/gke-policies/policy/nap_use_cos.rego +++ b/gke-policies/policy/nap_use_cos.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Ensure that nodes in Node Auto-Provisioning node pools will use Container-Optimized OS +# title: Configure Container-Optimized OS for Node Auto-Provisioning node pools # description: Nodes in Node Auto-Provisioning should use Container-Optimized OS # custom: # group: Security @@ -27,20 +27,21 @@ # cis: # version: "1.4" # id: "5.5.1" - +# dataSource: gke package gke.policy.nap_use_cos import future.keywords.in +import future.keywords.if +import future.keywords.contains -default valid = false +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.autoscaling.enable_node_autoprovisioning == true not lower(input.autoscaling.autoprovisioning_node_pool_defaults.image_type) in { "cos", "cos_containerd"} - - msg := "GKE cluster Node Auto-Provisioning configuration use Container-Optimized OS" + msg := "Cluster is not configured with COS for NAP node pools" } diff --git a/gke-policies/policy/nap_use_cos_test.rego b/gke-policies/policy/nap_use_cos_test.rego index 3b1f4a5e..df38d0c2 100644 --- a/gke-policies/policy/nap_use_cos_test.rego +++ b/gke-policies/policy/nap_use_cos_test.rego @@ -12,20 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.nap_use_cos +package gke.policy.nap_use_cos_test -test_cluster_not_enabled_nap { - valid with input as {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}} +import future.keywords.if +import data.gke.policy.nap_use_cos + +test_cluster_not_enabled_nap if { + nap_use_cos.valid with input as {"name": "cluster-without-nap", "autoscaling": {"enable_node_autoprovisioning": false}} } -test_cluster_enabled_nap_without_cos { - not valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "ANOTHER"}}} +test_cluster_enabled_nap_without_cos if { + not nap_use_cos.valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "ANOTHER"}}} } -test_cluster_enabled_nap_with_cos_containerd { - valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "COS_CONTAINERD"}} } +test_cluster_enabled_nap_with_cos_containerd if { + nap_use_cos.valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "COS_CONTAINERD"}} } } -test_cluster_enabled_nap_with_cos { - valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "COS"}} } +test_cluster_enabled_nap_with_cos if { + nap_use_cos.valid with input as {"name": "cluster-with-nap", "autoscaling": {"enable_node_autoprovisioning": true, "autoprovisioning_node_pool_defaults": {"image_type": "COS"}} } } diff --git a/gke-policies/policy/network_policies.rego b/gke-policies/policy/network_policies.rego index d402211d..9ba5ab47 100644 --- a/gke-policies/policy/network_policies.rego +++ b/gke-policies/policy/network_policies.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE Network Policies engine +# title: Enable Kubernetes Network Policies # description: GKE cluster should have Network Policies or Dataplane V2 enabled # custom: # group: Security @@ -27,34 +27,35 @@ # cis: # version: "1.4" # id: "5.6.7" - +# dataSource: gke package gke.policy.network_policies_engine -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.addons_config.network_policy_config.disabled not input.network_policy not input.network_config.datapath_provider == 2 - - msg := "No Network Policies Engines enabled" + msg := "Cluster is not configured with Kubneretes Network Policies" } -violation[msg] { +violation contains msg if { count(input.addons_config.network_policy_config) == 0 not input.network_policy.enabled not input.network_config.datapath_provider == 2 - msg := "Network Policies enabled but without configuration" + msg := "Cluster is configured with Kubneretes Network Policies without configuration" } -violation[msg] { +violation contains msg if { input.addons_config.network_policy_config.disabled count(input.network_policy) == 0 not input.network_config.datapath_provider == 2 - - msg := "Not DPv2 nor Network Policies are enabled onto the cluster" + msg := "Cluster is not DPv2 and has not configured Kubneretes Network Policies" } diff --git a/gke-policies/policy/network_policies_test.rego b/gke-policies/policy/network_policies_test.rego index 97abc15a..fa0cd5f8 100644 --- a/gke-policies/policy/network_policies_test.rego +++ b/gke-policies/policy/network_policies_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.network_policies_engine +package gke.policy.network_policies_engine_test -test_dataplane_v1_without_netpol { - not valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {"disabled": true}}, "private_cluster_config": {"enable_private_nodes": true}, "network_config": {"datapath_provider": 1}} +import future.keywords.if +import data.gke.policy.network_policies_engine + +test_dataplane_v1_without_netpol if { + not network_policies_engine.valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {"disabled": true}}, "private_cluster_config": {"enable_private_nodes": true}, "network_config": {"datapath_provider": 1}} } -test_dataplane_v1_with_netpol_disabled { - not valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {"provider": 1, "enabled": false}, "network_config": {"datapath_provider": 1}} +test_dataplane_v1_with_netpol_disabled if { + not network_policies_engine.valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {"provider": 1, "enabled": false}, "network_config": {"datapath_provider": 1}} } -test_dataplane_v1_without_netpol_conf { - not valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {}} +test_dataplane_v1_without_netpol_conf if { + not network_policies_engine.valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {}} } -test_dataplane_v1_with_netpol { - valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {"provider": 1, "enabled": true}} +test_dataplane_v1_with_netpol if { + network_policies_engine.valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {}}, "private_cluster_config": {"enable_private_nodes": false}, "network_policy": {"provider": 1, "enabled": true}} } -test_dataplane_v2_with_netpol { - valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {"disabled": true}}, "network_config": {"datapath_provider": 2}} +test_dataplane_v2_with_netpol if { + network_policies_engine.valid with input as {"name": "test-cluster", "addons_config": {"network_policy_config": {"disabled": true}}, "network_config": {"datapath_provider": 2}} } diff --git a/gke-policies/policy/node_local_dns_cache.rego b/gke-policies/policy/node_local_dns_cache.rego index e9e55e72..7ac0c954 100644 --- a/gke-policies/policy/node_local_dns_cache.rego +++ b/gke-policies/policy/node_local_dns_cache.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE node local DNS cache +# title: Enable GKE node local DNS cache # description: GKE cluster should use node local DNS cache # custom: # group: Scalability @@ -24,16 +24,19 @@ # Select the "Enable NodeLocal DNSCache" checkbox and click "Save changes" button. # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/nodelocal-dns-cache # sccCategory: DNS_CACHE_DISABLED - +# dataSource: gke package gke.policy.node_local_dns_cache -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - not input.addons_config.dns_cache_config.enabled = true - msg := "The GKE cluster does not have node local DNS cache enabled" +violation contains msg if { + not input.addons_config.dns_cache_config.enabled + msg := "Cluster is not configured with node local DNS cache" } diff --git a/gke-policies/policy/node_local_dns_cache_test.rego b/gke-policies/policy/node_local_dns_cache_test.rego index 3abf09aa..06027c9d 100644 --- a/gke-policies/policy/node_local_dns_cache_test.rego +++ b/gke-policies/policy/node_local_dns_cache_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_local_dns_cache +package gke.policy.node_local_dns_cache_test -test_enabled_node_local_dns_cache { - valid with input as {"name": "test-cluster", "addons_config": { "dns_cache_config": { "enabled": true }}} +import future.keywords.if +import data.gke.policy.node_local_dns_cache + +test_enabled_node_local_dns_cache if { + node_local_dns_cache.valid with input as {"name": "test-cluster", "addons_config": { "dns_cache_config": { "enabled": true }}} } -test_absent_dns_cache_config { - not valid with input as {"name": "test-cluster", "addons_config": {}} +test_absent_dns_cache_config if { + not node_local_dns_cache.valid with input as {"name": "test-cluster", "addons_config": {}} } -test_disabled_node_local_dns_cache { - not valid with input as {"name": "test-cluster", "addons_config": { "dns_cache_config": { "enabled": false }}} +test_disabled_node_local_dns_cache if { + not node_local_dns_cache.valid with input as {"name": "test-cluster", "addons_config": { "dns_cache_config": { "enabled": false }}} } diff --git a/gke-policies/policy/node_pool_autorepair.rego b/gke-policies/policy/node_pool_autorepair.rego index 2f495d4e..a483a2af 100644 --- a/gke-policies/policy/node_pool_autorepair.rego +++ b/gke-policies/policy/node_pool_autorepair.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use Node Auto-Repair +# title: Enable node auto-repair # description: GKE node pools should have Node Auto-Repair enabled to configure Kubernetes Engine # custom: # group: Availability @@ -28,18 +28,21 @@ # cis: # version: "1.4" # id: "5.5.2" - +# dataSource: gke package gke.policy.node_pool_autorepair -default valid = false +import future.keywords.if +import future.keywords.in +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - not input.node_pools[pool].management.auto_repair - msg := sprintf("autorepair not set for GKE node pool %q", [input.node_pools[pool].name]) +violation contains msg if { + some pool in input.node_pools + not pool.management.auto_repair + msg := sprintf("Node pool %q is not configured with auto-repair", [pool.name]) } - - diff --git a/gke-policies/policy/node_pool_autorepair_test.rego b/gke-policies/policy/node_pool_autorepair_test.rego index f34b43ac..2aef7652 100644 --- a/gke-policies/policy/node_pool_autorepair_test.rego +++ b/gke-policies/policy/node_pool_autorepair_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_autorepair +package gke.policy.node_pool_autorepair_test -test_autorepair_for_node_pool_enabled { - valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} +import future.keywords.if +import data.gke.policy.node_pool_autorepair + +test_autorepair_for_node_pool_enabled if { + node_pool_autorepair.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} } -test_autorepair_for_node_pool_disabled{ - not valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": false, "auto_upgrade": true }}]} +test_autorepair_for_node_pool_disabled if { + not node_pool_autorepair.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": false, "auto_upgrade": true }}]} } -test_autorepair_for_multiple_node_pools_but_only_one_disabled{ - not valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }},{"name": "custom", "management": {"auto_repair": false, "auto_upgrade": true }}]} +test_autorepair_for_multiple_node_pools_but_only_one_disabled if { + not node_pool_autorepair.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }},{"name": "custom", "management": {"auto_repair": false, "auto_upgrade": true }}]} } -test_autorepair_for_node_pool_empty_managment{ - not valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {}}]} +test_autorepair_for_node_pool_empty_managment if { + not node_pool_autorepair.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {}}]} } -test_autorepair_for_managment_without_auto_repair_field{ - not valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_upgrade": true }}]} -} \ No newline at end of file +test_autorepair_for_managment_without_auto_repair_field if { + not node_pool_autorepair.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_upgrade": true }}]} +} diff --git a/gke-policies/policy/node_pool_autoscaling.rego b/gke-policies/policy/node_pool_autoscaling.rego index 3d84ddd1..831b15d0 100644 --- a/gke-policies/policy/node_pool_autoscaling.rego +++ b/gke-policies/policy/node_pool_autoscaling.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use node pool autoscaling +# title: Enable node pool auto-scaling # description: GKE node pools should have autoscaling configured to proper resize nodes according to traffic # custom: # group: Scalability @@ -26,16 +26,21 @@ # fields. Slick "Save" button once done. # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-autoscaler # sccCategory: NODEPOOL_AUTOSCALING_DISABLED - +# dataSource: gke package gke.policy.node_pool_autoscaling -default valid = false +import future.keywords.if +import future.keywords.in +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - not input.node_pools[pool].autoscaling.enabled - msg := sprintf("Node pool %q does not have autoscaling configured.", [input.node_pools[pool].name]) -} \ No newline at end of file +violation contains msg if { + some pool in input.node_pools + not pool.autoscaling.enabled + msg := sprintf("Node pool %q is not configured with autoscaling", [pool.name]) +} diff --git a/gke-policies/policy/node_pool_autoscaling_test.rego b/gke-policies/policy/node_pool_autoscaling_test.rego index 9abdc082..2b4dfcab 100644 --- a/gke-policies/policy/node_pool_autoscaling_test.rego +++ b/gke-policies/policy/node_pool_autoscaling_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_autoscaling +package gke.policy.node_pool_autoscaling_test -test_node_pool_autoscaling_enabled { - valid with input as {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}]} +import future.keywords.if +import data.gke.policy.node_pool_autoscaling + +test_node_pool_autoscaling_enabled if { + node_pool_autoscaling.valid with input as {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}]} } -test_node_pool_autoscaling_disabled { - not valid with input as {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": false}}]} +test_node_pool_autoscaling_disabled if { + not node_pool_autoscaling.valid with input as {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": false}}]} } -test_multiple_node_pool_autoscaling_but_only_one_enabled { - not valid with input as {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}, {"name": "custom", "autoscaling": {"enabled": false}}]} +test_multiple_node_pool_autoscaling_but_only_one_enabled if { + not node_pool_autoscaling.valid with input as {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}, {"name": "custom", "autoscaling": {"enabled": false}}]} } -test_multiple_node_pool_autoscaling_enabled { - valid with input as {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}, {"name": "custom", "autoscaling": {"enabled": true}}]} +test_multiple_node_pool_autoscaling_enabled if { + node_pool_autoscaling.valid with input as {"name": "cluster", "node_pools": [{"name": "default", "autoscaling": {"enabled": true}}, {"name": "custom", "autoscaling": {"enabled": true}}]} } -test_node_pool_without_autoscaling_field { - not valid with input as {"name": "cluster", "node_pools": [{"name": "default"}]} +test_node_pool_without_autoscaling_field if { + not node_pool_autoscaling.valid with input as {"name": "cluster", "node_pools": [{"name": "default"}]} } \ No newline at end of file diff --git a/gke-policies/policy/node_pool_autoupgrade.rego b/gke-policies/policy/node_pool_autoupgrade.rego index ee7e359c..dd3a00d7 100644 --- a/gke-policies/policy/node_pool_autoupgrade.rego +++ b/gke-policies/policy/node_pool_autoupgrade.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use Node Auto-Upgrade +# title: Enable node auto-upgrade # description: GKE node pools should have Node Auto-Upgrade enabled to configure Kubernetes Engine # custom: # group: Security @@ -28,17 +28,21 @@ # cis: # version: "1.4" # id: "5.5.3" - +# dataSource: gke package gke.policy.node_pool_autoupgrade -default valid = false +import future.keywords.if +import future.keywords.in +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - not input.node_pools[pool].management.auto_upgrade - msg := sprintf("autoupgrade not set for GKE node pool %q", [input.node_pools[pool].name]) +violation contains msg if { + some pool in input.node_pools + not pool.management.auto_upgrade + msg := sprintf("Node pool %q is not configured with auto-upgrade", [pool.name]) } - diff --git a/gke-policies/policy/node_pool_autoupgrade_test.rego b/gke-policies/policy/node_pool_autoupgrade_test.rego index 602bd6b7..c48ed743 100644 --- a/gke-policies/policy/node_pool_autoupgrade_test.rego +++ b/gke-policies/policy/node_pool_autoupgrade_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_autoupgrade +package gke.policy.node_pool_autoupgrade_test -test_autoupgrade_for_node_pool_enabled { - valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} +import future.keywords.if +import data.gke.policy.node_pool_autoupgrade + +test_autoupgrade_for_node_pool_enabled if { + node_pool_autoupgrade.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} } -test_autoupgrade_for_node_pool_disabled{ - not valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": false }}]} +test_autoupgrade_for_node_pool_disabled if { + not node_pool_autoupgrade.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": false }}]} } -test_autoupgrade_for_multiple_node_pools_but_only_one_disabled{ - not valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }},{"name": "custom", "management": {"auto_repair": false, "auto_upgrade": false }}]} +test_autoupgrade_for_multiple_node_pools_but_only_one_disabled if { + not node_pool_autoupgrade.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }},{"name": "custom", "management": {"auto_repair": false, "auto_upgrade": false }}]} } -test_autoupgrade_for_node_pool_empty_managment{ - not valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {}}]} +test_autoupgrade_for_node_pool_empty_managment if { + not node_pool_autoupgrade.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {}}]} } -test_autoupgrade_for_managment_without_auto_upgrade_field{ - not valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true }}]} +test_autoupgrade_for_managment_without_auto_upgrade_field if { + not node_pool_autoupgrade.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true }}]} } \ No newline at end of file diff --git a/gke-policies/policy/node_pool_disk_encryption.rego b/gke-policies/policy/node_pool_disk_encryption.rego index 29d55d1e..bc741022 100644 --- a/gke-policies/policy/node_pool_disk_encryption.rego +++ b/gke-policies/policy/node_pool_disk_encryption.rego @@ -30,17 +30,20 @@ # version: "1.4" # id: "5.9.1" # dataSource: gke - package gke.policy.node_pool_cmek +import future.keywords.if +import future.keywords.in +import future.keywords.contains + default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - some pool - not input.node_pools[pool].config.boot_disk_kms_key - msg := sprintf("GKE cluster node_pool %q has no CMEK configured for the boot disks", [input.node_pools[pool].name]) +violation contains msg if { + some pool in input.node_pools + not pool.config.boot_disk_kms_key + msg := sprintf("Node pool %q is not configured with CMEK for the boot disk", [pool.name]) } diff --git a/gke-policies/policy/node_pool_disk_encryption_test.rego b/gke-policies/policy/node_pool_disk_encryption_test.rego index 6bde44e7..5d42e124 100644 --- a/gke-policies/policy/node_pool_disk_encryption_test.rego +++ b/gke-policies/policy/node_pool_disk_encryption_test.rego @@ -12,10 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_cmek +package gke.policy.node_pool_cmek_test -test_cluster_node_pool_with_cmek { - valid with input as { +import future.keywords.if +import data.gke.policy.node_pool_cmek + +test_cluster_node_pool_with_cmek if { + node_pool_cmek.valid with input as { "name": "cluster-test", "node_pools": [ { @@ -28,8 +31,8 @@ test_cluster_node_pool_with_cmek { } } -test_cluster_node_pool_without_cmek { - not valid with input as { +test_cluster_node_pool_without_cmek if { + not node_pool_cmek.valid with input as { "name": "cluster-test", "node_pools": [ { diff --git a/gke-policies/policy/node_pool_forbid_default_sa.rego b/gke-policies/policy/node_pool_forbid_default_sa.rego index a8048992..154b080f 100644 --- a/gke-policies/policy/node_pool_forbid_default_sa.rego +++ b/gke-policies/policy/node_pool_forbid_default_sa.rego @@ -13,11 +13,12 @@ # limitations under the License. # METADATA -# title: Forbid default compute SA on node_pool +# title: Change default Service Accounts in node pools # description: GKE node pools should have a dedicated sa with a restricted set of permissions # custom: # group: Security # severity: Critical +# recommendation: > # Navigate to the GKE page in Google Cloud Console and select the name of the cluster. # Select "Nodes" tab and click on the name of the target node pool. Within the node pool # details pane, click EDIT. Under the "Management" heading, select the "Enable auto-upagde" @@ -27,17 +28,22 @@ # cis: # version: "1.4" # id: "5.2.1" - +# dataSource: gke package gke.policy.node_pool_forbid_default_sa -default valid = false +import future.keywords.if +import future.keywords.in +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.autopilot.enabled - input.node_pools[pool].config.service_account == "default" - msg := sprintf("GKE cluster node_pool %q should have a dedicated SA", [input.node_pools[pool].name]) + some pool in input.node_pools + pool.config.service_account == "default" + msg := sprintf("Node pool %q is configured with default SA", [pool.name]) } diff --git a/gke-policies/policy/node_pool_forbid_default_sa_test.rego b/gke-policies/policy/node_pool_forbid_default_sa_test.rego index 4d33eb16..8684c25d 100644 --- a/gke-policies/policy/node_pool_forbid_default_sa_test.rego +++ b/gke-policies/policy/node_pool_forbid_default_sa_test.rego @@ -12,24 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_forbid_default_sa +package gke.policy.node_pool_forbid_default_sa_test -test_cluster_with_2_np_and_mixed_sas { - not valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}, {"name": "pool-1", "config": {"machine_type": "e2-standard-2", "disk_size_gb": 100, "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"], "service_account": "gke-sa@prj.iam.gserviceaccount.com", "metadata": {"disable-legacy-endpoints": "true"}, "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}}]} +import future.keywords.if +import data.gke.policy.node_pool_forbid_default_sa + +test_cluster_with_2_np_and_mixed_sas if { + not node_pool_forbid_default_sa.valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}, {"name": "pool-1", "config": {"machine_type": "e2-standard-2", "disk_size_gb": 100, "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"], "service_account": "gke-sa@prj.iam.gserviceaccount.com", "metadata": {"disable-legacy-endpoints": "true"}, "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}}]} } -test_cluster_with_2_np_and_dedicated_sas { - valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "gke-sa@prj.iam.gserviceaccount.com", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}, {"name": "pool-1", "config": {"machine_type": "e2-standard-2", "disk_size_gb": 100, "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"], "service_account": "gke-sa@prj.iam.gserviceaccount.com", "metadata": {"disable-legacy-endpoints": "true"}, "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}}]} +test_cluster_with_2_np_and_dedicated_sas if { + node_pool_forbid_default_sa.valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "gke-sa@prj.iam.gserviceaccount.com", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}, {"name": "pool-1", "config": {"machine_type": "e2-standard-2", "disk_size_gb": 100, "oauth_scopes": ["https://www.googleapis.com/auth/cloud-platform"], "service_account": "gke-sa@prj.iam.gserviceaccount.com", "metadata": {"disable-legacy-endpoints": "true"}, "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}}]} } -test_cluster_with_1_np_and_default_sa { - not valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}]} +test_cluster_with_1_np_and_default_sa if { + not node_pool_forbid_default_sa.valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "default", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}]} } -test_cluster_with_1_np_and_dedicated_sa { - valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "pool-1", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "gke-sa@prj.iam.gserviceaccount.com", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}]} +test_cluster_with_1_np_and_dedicated_sa if { + node_pool_forbid_default_sa.valid with input as {"name": "cluster-1", "legacy_abac": {"enabled": false}, "node_pools": [{"name": "pool-1", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "gke-sa@prj.iam.gserviceaccount.com", "image_type": "COS_CONTAINERD", "disk_type": "pd-standard", "workload_metadata_config": {"mode": 2}, "shielded_instance_config": {"enable_integrity_monitoring": true}}, "management": {"auto_repair": true, "auto_upgrade": true}}]} } -test_autopilot_with_default { - valid with input as {"name": "cluster-1", "autopilot": {"enabled": true}, "node_pools": [{"name": "pool-1", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD"}}]} +test_autopilot_with_default if { + node_pool_forbid_default_sa.valid with input as {"name": "cluster-1", "autopilot": {"enabled": true}, "node_pools": [{"name": "pool-1", "config": {"machine_type": "e2-standard-4", "disk_size_gb": 100, "service_account": "default", "image_type": "COS_CONTAINERD"}}]} } \ No newline at end of file diff --git a/gke-policies/policy/node_pool_integrity_monitoring.rego b/gke-policies/policy/node_pool_integrity_monitoring.rego index 1654400e..4703740e 100644 --- a/gke-policies/policy/node_pool_integrity_monitoring.rego +++ b/gke-policies/policy/node_pool_integrity_monitoring.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Integrity monitoring on the nodes +# title: Enable integrity monitoring for node pools # description: GKE node pools should have integrity monitoring feature enabled to detect changes in a VM boot measurements # custom: # group: Security @@ -27,16 +27,21 @@ # cis: # version: "1.4" # id: "5.5.6" - +# dataSource: gke package gke.policy.node_pool_integrity_monitoring -default valid = false +import future.keywords.if +import future.keywords.in +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - not input.node_pools[pool].config.shielded_instance_config.enable_integrity_monitoring - msg := sprintf("Node pool %q has disabled integrity monitoring feature.", [input.node_pools[pool].name]) +violation contains msg if { + some pool in input.node_pools + not pool.config.shielded_instance_config.enable_integrity_monitoring + msg := sprintf("Node pool %q is not configured with integrity monitoring", [pool.name]) } diff --git a/gke-policies/policy/node_pool_integrity_monitoring_test.rego b/gke-policies/policy/node_pool_integrity_monitoring_test.rego index 773843a9..20472ca8 100644 --- a/gke-policies/policy/node_pool_integrity_monitoring_test.rego +++ b/gke-policies/policy/node_pool_integrity_monitoring_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_integrity_monitoring +package gke.policy.node_pool_integrity_monitoring_test -test_empty_shielded_instance_config { - not valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{}}}]} +import future.keywords.if +import data.gke.policy.node_pool_integrity_monitoring + +test_empty_shielded_instance_config if { + not node_pool_integrity_monitoring.valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{}}}]} } -test_disabled_integrity_monitoring { - not valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_integrity_monitoring": false}}}]} +test_disabled_integrity_monitoring if { + not node_pool_integrity_monitoring.valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_integrity_monitoring": false}}}]} } -test_enabled_integrity_monitoring { - valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_integrity_monitoring": true}}}]} +test_enabled_integrity_monitoring if { + node_pool_integrity_monitoring.valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_integrity_monitoring": true}}}]} } \ No newline at end of file diff --git a/gke-policies/policy/node_pool_multi_zone.rego b/gke-policies/policy/node_pool_multi_zone.rego index f73206bc..96acb3e2 100644 --- a/gke-policies/policy/node_pool_multi_zone.rego +++ b/gke-policies/policy/node_pool_multi_zone.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Multi-zone node pools +# title: Ensure redudndancy of the node pools # description: GKE node pools should be regional (multiple zones) for maximum nodes availability during zonal outages # custom: # group: Availability @@ -25,16 +25,21 @@ # zones. Slick "Save" button once done. # externalURI: https://cloud.google.com/kubernetes-engine/docs/concepts/node-pools#multiple-zones # sccCategory: NODEPOOL_ZONAL - +# dataSource: gke package gke.policy.node_pool_multi_zone -default valid = false +import future.keywords.if +import future.keywords.in +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - count(input.node_pools[pool].locations) < 2 - msg := sprintf("Node pool %q is not on multiple zones.", [input.node_pools[pool].name]) -} \ No newline at end of file +violation contains msg if { + some pool in input.node_pools + count(pool.locations) < 2 + msg := sprintf("Node pool %q is not configured with multiple zones", [pool.name]) +} diff --git a/gke-policies/policy/node_pool_multi_zone_test.rego b/gke-policies/policy/node_pool_multi_zone_test.rego index 853fbdfb..1be9fb0b 100644 --- a/gke-policies/policy/node_pool_multi_zone_test.rego +++ b/gke-policies/policy/node_pool_multi_zone_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_multi_zone +package gke.policy.node_pool_multi_zone_test -test_node_pool_one_zone { - not valid with input as {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a"]}]} +import future.keywords.if +import data.gke.policy.node_pool_multi_zone + +test_node_pool_one_zone if { + not node_pool_multi_zone.valid with input as {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a"]}]} } -test_node_pool_two_zones { - valid with input as {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a", "us-central1-b"]}]} +test_node_pool_two_zones if { + node_pool_multi_zone.valid with input as {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a", "us-central1-b"]}]} } -test_node_pool_three_zones { - valid with input as {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a", "us-central1-b", "us-central1-c"]}]} +test_node_pool_three_zones if { + node_pool_multi_zone.valid with input as {"name": "cluster", "node_pools": [{"name": "default", "locations": ["us-central1-a", "us-central1-b", "us-central1-c"]}]} } \ No newline at end of file diff --git a/gke-policies/policy/node_pool_secure_boot.rego b/gke-policies/policy/node_pool_secure_boot.rego index 56a94e18..e8f841a4 100644 --- a/gke-policies/policy/node_pool_secure_boot.rego +++ b/gke-policies/policy/node_pool_secure_boot.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Secure boot on the nodes +# title: Enable Secure boot for node pools # description: Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails # custom: # group: Security @@ -27,16 +27,21 @@ # cis: # version: "1.4" # id: "5.5.7" - +# dataSource: gke package gke.policy.node_pool_secure_boot -default valid = false +import future.keywords.if +import future.keywords.in +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - not input.node_pools[pool].config.shielded_instance_config.enable_secure_boot - msg := sprintf("Node pool %q has disabled secure boot.", [input.node_pools[pool].name]) +violation contains msg if { + some pool in input.node_pools + not pool.config.shielded_instance_config.enable_secure_boot + msg := sprintf("Node pool %q is not configured with secure boot", [pool.name]) } diff --git a/gke-policies/policy/node_pool_secure_boot_test.rego b/gke-policies/policy/node_pool_secure_boot_test.rego index fd36296a..6fe039fc 100644 --- a/gke-policies/policy/node_pool_secure_boot_test.rego +++ b/gke-policies/policy/node_pool_secure_boot_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_secure_boot +package gke.policy.node_pool_secure_boot_test -test_empty_shielded_instance_config { - not valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{}}}]} +import future.keywords.if +import data.gke.policy.node_pool_secure_boot + +test_empty_shielded_instance_config if { + not node_pool_secure_boot.valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{}}}]} } -test_disabled_secure_boot { - not valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_secure_boot": false}}}]} +test_disabled_secure_boot if { + not node_pool_secure_boot.valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_secure_boot": false}}}]} } -test_enabled_secure_boot { - valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_secure_boot": true}}}]} +test_enabled_secure_boot if { + node_pool_secure_boot.valid with input as {"name": "cluster", "node_pools": [{"name": "default-pool", "config": {"machine_type": "e2-medium", "shielded_instance_config":{"enable_secure_boot": true}}}]} } \ No newline at end of file diff --git a/gke-policies/policy/node_pool_use_cos.rego b/gke-policies/policy/node_pool_use_cos.rego index 1d5765bf..794ef275 100644 --- a/gke-policies/policy/node_pool_use_cos.rego +++ b/gke-policies/policy/node_pool_use_cos.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use Container-Optimized OS +# title: Configure Container-Optimized OS for node pools # description: GKE node pools should use Container-Optimized OS which is maintained by Google and optimized for running Docker containers with security and efficiency. # custom: # group: Security @@ -29,19 +29,22 @@ # cis: # version: "1.4" # id: "5.5.1" - +# dataSource: gke package gke.policy.node_pool_use_cos import future.keywords.in +import future.keywords.if +import future.keywords.contains -default valid = false +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - not lower(input.node_pools[pool].config.image_type) in {"cos", "cos_containerd"} - not startswith(lower(input.node_pools[pool].config.image_type), "windows") - msg := sprintf("Node pool %q does not use Container-Optimized OS.", [input.node_pools[pool].name]) -} \ No newline at end of file +violation contains msg if { + some pool in input.node_pools + not lower(pool.config.image_type) in {"cos", "cos_containerd"} + not startswith(lower(pool.config.image_type), "windows") + msg := sprintf("Node pool %q is not configured with COS", [pool.name]) +} diff --git a/gke-policies/policy/node_pool_use_cos_test.rego b/gke-policies/policy/node_pool_use_cos_test.rego index 2d6f17f4..4764cb7a 100644 --- a/gke-policies/policy/node_pool_use_cos_test.rego +++ b/gke-policies/policy/node_pool_use_cos_test.rego @@ -12,36 +12,39 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_use_cos +package gke.policy.node_pool_use_cos_test -test_node_pool_using_cos { - valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}}]} +import future.keywords.if +import data.gke.policy.node_pool_use_cos + +test_node_pool_using_cos if { + node_pool_use_cos.valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}}]} } -test_node_pool_using_cos_containerd { - valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos_containerd"}}]} +test_node_pool_using_cos_containerd if { + node_pool_use_cos.valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos_containerd"}}]} } -test_node_pool_using_cos_uppercase { - valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "COS"}}]} +test_node_pool_using_cos_uppercase if { + node_pool_use_cos.valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "COS"}}]} } -test_node_pool_using_cos_containerd_uppercase { - valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "COS_CONTAINERD"}}]} +test_node_pool_using_cos_containerd_uppercase if { + node_pool_use_cos.valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "COS_CONTAINERD"}}]} } -test_node_pool_not_using_cos { - not valid with input as {"name": "cluster-not-cos", "node_pools": [{"name": "default", "config": {"image_type": "another_image"}}]} +test_node_pool_not_using_cos if { + not node_pool_use_cos.valid with input as {"name": "cluster-not-cos", "node_pools": [{"name": "default", "config": {"image_type": "another_image"}}]} } -test_multiple_node_pool_using_cos_but_only_one { - not valid with input as {"name": "cluster-not-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}},{"name": "custom", "config": {"image_type": "other"}}]} +test_multiple_node_pool_using_cos_but_only_one if { + not node_pool_use_cos.valid with input as {"name": "cluster-not-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}},{"name": "custom", "config": {"image_type": "other"}}]} } -test_multiple_node_pool_using_cos { - valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}},{"name": "custom", "config": {"image_type": "cos_containerd"}}]} +test_multiple_node_pool_using_cos if { + node_pool_use_cos.valid with input as {"name": "cluster-cos", "node_pools": [{"name": "default", "config": {"image_type": "cos"}},{"name": "custom", "config": {"image_type": "cos_containerd"}}]} } -test_windows_node_pool { - valid with input as {"name": "cluster-windows", "node_pools": [{"name": "default", "config": {"image_type": "windows-server"}}]} +test_windows_node_pool if { + node_pool_use_cos.valid with input as {"name": "cluster-windows", "node_pools": [{"name": "default", "config": {"image_type": "windows-server"}}]} } \ No newline at end of file diff --git a/gke-policies/policy/node_pool_version_skew.rego b/gke-policies/policy/node_pool_version_skew.rego index e7ba2a76..6768a4aa 100644 --- a/gke-policies/policy/node_pool_version_skew.rego +++ b/gke-policies/policy/node_pool_version_skew.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Version skew between node pools and control plane +# title: Ensure acceptable version skew in a cluster # description: Difference between cluster control plane version and node pools version should be no more than 2 minor versions. # custom: # group: Management @@ -27,54 +27,58 @@ # Click "Upgrade" button once done. # externalURI: https://cloud.google.com/kubernetes-engine/docs/how-to/upgrading-a-cluster#upgrading-nodes # sccCategory: NODEPOOL_VERSION_SKEW_UNSUPPORTED - +# dataSource: gke package gke.policy.node_pool_version_skew -default valid = false +import future.keywords.if +import future.keywords.in +import future.keywords.contains + +default valid := false expr := `^([0-9]+)\.([0-9]+)\.([0-9]+)(-.+)*$` -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.current_master_version msg := "control plane version is undefined" } -violation[msg] { - some node_pool - not input.node_pools[node_pool].version - msg := sprintf("node pool %q control plane version is undefined", [input.node_pools[node_pool].name]) +violation contains msg if { + some pool in input.node_pools + not pool.version + msg := sprintf("Node pool %q version is undefined", [pool.name]) } -violation[msg] { +violation contains msg if { master_ver := regex.find_all_string_submatch_n(expr, input.current_master_version, 1) count(master_ver) == 0 - msg := sprintf("control plane version %q does not match version regex", [input.current_master_version]) + msg := sprintf("Control plane version %q does not match version regex", [input.current_master_version]) } -violation[msg] { - some node_pool - node_pool_ver := regex.find_all_string_submatch_n(expr, input.node_pools[node_pool].version, 1) +violation contains msg if { + some pool in input.node_pools + node_pool_ver := regex.find_all_string_submatch_n(expr, pool.version, 1) count(node_pool_ver) == 0 - msg := sprintf("node pool %q version %q does not match version regex", [input.node_pools[node_pool].name, input.node_pools[node_pool].version]) + msg := sprintf("Node pool %q version %q does not match version regex", [pool.name, pool.version]) } -violation[msg] { +violation contains msg if { master_ver := regex.find_all_string_submatch_n(expr, input.current_master_version, 1) - some node_pool - node_pool_ver := regex.find_all_string_submatch_n(expr, input.node_pools[node_pool].version, 1) + some pool in input.node_pools + node_pool_ver := regex.find_all_string_submatch_n(expr, pool.version, 1) master_ver[0][1] != node_pool_ver[0][1] - msg := sprintf("node pool %q and control plane major versions differ", [input.node_pools[node_pool].name]) + msg := sprintf("Node pool %q and control plane major versions differ", [pool.name]) } -violation[msg] { +violation contains msg if { master_ver := regex.find_all_string_submatch_n(expr, input.current_master_version, 1) - some node_pool - node_pool_ver := regex.find_all_string_submatch_n(expr, input.node_pools[node_pool].version, 1) + some pool in input.node_pools + node_pool_ver := regex.find_all_string_submatch_n(expr, pool.version, 1) minor_diff := to_number(master_ver[0][2]) - to_number(node_pool_ver[0][2]) abs(minor_diff) > 2 - msg := sprintf("node pool %q and control plane minor versions difference is greater than 2", [input.node_pools[node_pool].name]) + msg := sprintf("Node pool %q and control plane minor versions difference is greater than 2", [pool.name]) } diff --git a/gke-policies/policy/node_pool_version_skew_test.rego b/gke-policies/policy/node_pool_version_skew_test.rego index a7218d89..ca0ca3f4 100644 --- a/gke-policies/policy/node_pool_version_skew_test.rego +++ b/gke-policies/policy/node_pool_version_skew_test.rego @@ -12,32 +12,35 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.node_pool_version_skew +package gke.policy.node_pool_version_skew_test -test_empty_master_version { - not valid with input as {"name":"cluster","node_pools":[{"name":"default-pool","version":"1.22.1-gke.600"}]} +import future.keywords.if +import data.gke.policy.node_pool_version_skew + +test_empty_master_version if { + not node_pool_version_skew.valid with input as {"name":"cluster","node_pools":[{"name":"default-pool","version":"1.22.1-gke.600"}]} } -test_empty_nodepool_version { - not valid with input as {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool"}]} +test_empty_nodepool_version if { + not node_pool_version_skew.valid with input as {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool"}]} } -test_invalid_master_version { - not valid with input as {"name":"cluster","current_master_version":"1.22.A","node_pools":[{"name":"default-pool","version":"1.22.1-gke.600"}]} +test_invalid_master_version if { + not node_pool_version_skew.valid with input as {"name":"cluster","current_master_version":"1.22.A","node_pools":[{"name":"default-pool","version":"1.22.1-gke.600"}]} } -test_invalid_nodepool_version { - not valid with input as {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool","version":"1.22"}]} +test_invalid_nodepool_version if { + not node_pool_version_skew.valid with input as {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool","version":"1.22"}]} } -test_different_major { - not valid with input as {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool","version":"2.22.1-gke.200"}]} +test_different_major if { + not node_pool_version_skew.valid with input as {"name":"cluster","current_master_version":"1.22.1-gke.600","node_pools":[{"name":"default-pool","version":"2.22.1-gke.200"}]} } -test_greater_minor { - not valid with input as {"name":"cluster","current_master_version":"1.24.10-gke.200","node_pools":[{"name":"default-pool","version":"1.21.5-gke.200"}]} +test_greater_minor if { + not node_pool_version_skew.valid with input as {"name":"cluster","current_master_version":"1.24.10-gke.200","node_pools":[{"name":"default-pool","version":"1.21.5-gke.200"}]} } -test_good_minor { - valid with input as {"name":"cluster","current_master_version":"1.24.10-gke.200","node_pools":[{"name":"default-pool","version":"1.22.5-gke.200"}]} +test_good_minor if { + node_pool_version_skew.valid with input as {"name":"cluster","current_master_version":"1.24.10-gke.200","node_pools":[{"name":"default-pool","version":"1.22.5-gke.200"}]} } diff --git a/gke-policies/policy/node_rbac_security_group.rego b/gke-policies/policy/node_rbac_security_group.rego index 13b13061..3d747319 100644 --- a/gke-policies/policy/node_rbac_security_group.rego +++ b/gke-policies/policy/node_rbac_security_group.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Use RBAC Google group +# title: Enable Google Groups for RBAC # description: GKE cluster should have RBAC security Google group enabled # custom: # group: Security @@ -30,16 +30,19 @@ # cis: # version: "1.4" # id: "5.8.3" - +# dataSource: gke package gke.policy.rbac_security_group_enabled -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.authenticator_groups_config.enabled - msg := sprintf("RBAC security group not enabled for cluster %q", [input.name]) + msg := "Cluster is not configured with Google Groups for RBAC" } \ No newline at end of file diff --git a/gke-policies/policy/node_rbac_security_group_test.rego b/gke-policies/policy/node_rbac_security_group_test.rego index fa6e5e25..8ea76a48 100644 --- a/gke-policies/policy/node_rbac_security_group_test.rego +++ b/gke-policies/policy/node_rbac_security_group_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.rbac_security_group_enabled +package gke.policy.rbac_security_group_enabled_test -test_rbac_group_enabled { - valid with input as {"name": "cluster1", "authenticator_groups_config": {"enabled": true}} +import future.keywords.if +import data.gke.policy.rbac_security_group_enabled + +test_rbac_group_enabled if { + rbac_security_group_enabled.valid with input as {"name": "cluster1", "authenticator_groups_config": {"enabled": true}} } -test_rbac_group_disabled { - not valid with input as {"name": "cluster1", "authenticator_groups_config": {"enabled": false}} +test_rbac_group_disabled if { + not rbac_security_group_enabled.valid with input as {"name": "cluster1", "authenticator_groups_config": {"enabled": false}} } -test_rbac_group_without_authenticator_group { - not valid with input as {"name": "cluster1"} +test_rbac_group_without_authenticator_group if { + not rbac_security_group_enabled.valid with input as {"name": "cluster1"} } \ No newline at end of file diff --git a/gke-policies/policy/private_cluster.rego b/gke-policies/policy/private_cluster.rego index 208d0ec2..9c89617d 100644 --- a/gke-policies/policy/private_cluster.rego +++ b/gke-policies/policy/private_cluster.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE private cluster +# title: Use private nodes # description: GKE cluster should be private to ensure network isolation # custom: # group: Security @@ -26,16 +26,19 @@ # cis: # version: "1.4" # id: "5.6.5" - +# dataSource: gke package gke.policy.private_cluster -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.private_cluster_config.enable_private_nodes - msg := "GKE cluster has not enabled private nodes" + msg := "Cluster is not configured with private nodes" } diff --git a/gke-policies/policy/private_cluster_test.rego b/gke-policies/policy/private_cluster_test.rego index 306b5576..21a45ca1 100644 --- a/gke-policies/policy/private_cluster_test.rego +++ b/gke-policies/policy/private_cluster_test.rego @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.private_cluster +package gke.policy.private_cluster_test -test_private_nodes_enabled { - valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}} +import future.keywords.if +import data.gke.policy.private_cluster + +test_private_nodes_enabled if { + private_cluster.valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": true}} } -test_private_nodes_disabled { - not valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}} +test_private_nodes_disabled if { + not private_cluster.valid with input as {"name": "test-cluster", "private_cluster_config": {"enable_private_nodes": false}} } -test_private_cluster_config_missing { - not valid with input as {"name": "test-cluster"} +test_private_cluster_config_missing if { + not private_cluster.valid with input as {"name": "test-cluster"} } \ No newline at end of file diff --git a/gke-policies/policy/secret_encryption.rego b/gke-policies/policy/secret_encryption.rego index da81e522..700a3f95 100644 --- a/gke-policies/policy/secret_encryption.rego +++ b/gke-policies/policy/secret_encryption.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: Kubernetes secrets encryption +# title: Enable Kubernetes secrets encryption # description: GKE cluster should use encryption for kubernetes application secrets # custom: # group: Security @@ -29,16 +29,19 @@ # cis: # version: "1.4" # id: "5.3.1" - +# dataSource: gke package gke.policy.secret_encryption -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { input.database_encryption.state != 1 - msg := "The GKE cluster is not configured to encrypt kubernetes application secrets" + msg := "Cluster is not configured with kubernetes secrets encryption" } diff --git a/gke-policies/policy/secret_encryption_test.rego b/gke-policies/policy/secret_encryption_test.rego index ce7d113d..abecbcca 100644 --- a/gke-policies/policy/secret_encryption_test.rego +++ b/gke-policies/policy/secret_encryption_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.secret_encryption +package gke.policy.secret_encryption_test -test_enabled_encryption { - valid with input as {"name": "cluster-1", "database_encryption": {"state": 1}} +import future.keywords.if +import data.gke.policy.secret_encryption + +test_enabled_encryption if { + secret_encryption.valid with input as {"name": "cluster-1", "database_encryption": {"state": 1}} } -test_disabled_encryption { - not valid with input as {"name": "cluster-1", "database_encryption": {"state": 2}} +test_disabled_encryption if { + not secret_encryption.valid with input as {"name": "cluster-1", "database_encryption": {"state": 2}} } diff --git a/gke-policies/policy/shielded_nodes.rego b/gke-policies/policy/shielded_nodes.rego index cb5639c7..b1bfaee5 100644 --- a/gke-policies/policy/shielded_nodes.rego +++ b/gke-policies/policy/shielded_nodes.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE Shielded Nodes +# title: Enable Shielded Nodes # description: GKE cluster should use shielded nodes # custom: # group: Security @@ -27,17 +27,19 @@ # cis: # version: "1.4" # id: "5.5.5" - +# dataSource: gke package gke.policy.shielded_nodes -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.shielded_nodes.enabled = true - - msg := "The GKE cluster does not have shielded nodes enabled" + msg := "Cluster is not configured with shielded nodes" } diff --git a/gke-policies/policy/shielded_nodes_test.rego b/gke-policies/policy/shielded_nodes_test.rego index a1ca10d0..50211b9e 100644 --- a/gke-policies/policy/shielded_nodes_test.rego +++ b/gke-policies/policy/shielded_nodes_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.shielded_nodes +package gke.policy.shielded_nodes_test -test_enabled_shielded_nodes { - valid with input as {"name": "test-cluster", "shielded_nodes": { "enabled": true }} +import future.keywords.if +import data.gke.policy.shielded_nodes + +test_enabled_shielded_nodes if { + shielded_nodes.valid with input as {"name": "test-cluster", "shielded_nodes": { "enabled": true }} } -test_disabled_shielded_nodes { - not valid with input as {"name": "test-cluster", "shielded_nodes": {}} +test_disabled_shielded_nodes if { + not shielded_nodes.valid with input as {"name": "test-cluster", "shielded_nodes": {}} } diff --git a/gke-policies/policy/vpc_native_cluster.rego b/gke-policies/policy/vpc_native_cluster.rego index c634e563..efcc0a17 100644 --- a/gke-policies/policy/vpc_native_cluster.rego +++ b/gke-policies/policy/vpc_native_cluster.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE VPC-native cluster +# title: Use VPC-native cluster # description: GKE cluster nodepool should be VPC-native as per our best-practices # custom: # group: Management @@ -27,21 +27,26 @@ # cis: # version: "1.4" # id: "5.6.2" - +# dataSource: gke package gke.policy.vpc_native_cluster -default valid = false +import future.keywords.if +import future.keywords.in +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { - not input.node_pools[pool].network_config.pod_ipv4_cidr_block - msg := sprintf("Nodepool %q of the GKE cluster is not configured to use VPC-native routing", [input.node_pools[pool].name]) +violation contains msg if { + some pool in input.node_pools + not pool.network_config.pod_ipv4_cidr_block + msg := sprintf("Nodepool %q is not configured with use VPC-native routing", [pool.name]) } -violation[msg] { +violation contains msg if { not input.ip_allocation_policy.use_ip_aliases - msg := "the GKE cluster is not configured to use VPC-native routing" -} \ No newline at end of file + msg := "Cluster is not configured with VPC-native routing" +} diff --git a/gke-policies/policy/vpc_native_cluster_test.rego b/gke-policies/policy/vpc_native_cluster_test.rego index 95afa6c9..4ac2cfcf 100644 --- a/gke-policies/policy/vpc_native_cluster_test.rego +++ b/gke-policies/policy/vpc_native_cluster_test.rego @@ -12,20 +12,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.vpc_native_cluster +package gke.policy.vpc_native_cluster_test -test_vpc_native_cluster_with_pods_range { - valid with input as {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": true}, "node_pools": [{"name": "default", "network_config": { "pod_range": "gke-cluster-1-vpc-pods-273c12cd", "pod_ipv4_cidr_block": "10.48.0.0/14" }, "management": {"auto_repair": true, "auto_upgrade": true }}]} +import future.keywords.if +import data.gke.policy.vpc_native_cluster + +test_vpc_native_cluster_with_pods_range if { + vpc_native_cluster.valid with input as {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": true}, "node_pools": [{"name": "default", "network_config": { "pod_range": "gke-cluster-1-vpc-pods-273c12cd", "pod_ipv4_cidr_block": "10.48.0.0/14" }, "management": {"auto_repair": true, "auto_upgrade": true }}]} } -test_vpc_native_cluster_without_pods_range { - not valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} +test_vpc_native_cluster_without_pods_range if { + not vpc_native_cluster.valid with input as {"name": "cluster-not-repairing", "node_pools": [{"name": "default", "management": {"auto_repair": true, "auto_upgrade": true }}]} } -test_vpc_native_cluster_using_ip_aliases { - valid with input as {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": true}} +test_vpc_native_cluster_using_ip_aliases if { + vpc_native_cluster.valid with input as {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": true}} } -test_vpc_native_cluster_not_using_ip_aliases { - not valid with input as {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": false}} +test_vpc_native_cluster_not_using_ip_aliases if { + not vpc_native_cluster.valid with input as {"name": "cluster-not-repairing", "ip_allocation_policy": {"use_ip_aliases": false}} } \ No newline at end of file diff --git a/gke-policies/policy/workload_identity.rego b/gke-policies/policy/workload_identity.rego index 60afbf6d..b19d6270 100644 --- a/gke-policies/policy/workload_identity.rego +++ b/gke-policies/policy/workload_identity.rego @@ -13,7 +13,7 @@ # limitations under the License. # METADATA -# title: GKE Workload Identity +# title: Use GKE Workload Identity # description: GKE cluster should have Workload Identity enabled # custom: # group: Security @@ -29,16 +29,19 @@ # cis: # version: "1.4" # id: "5.2.2" - +# dataSource: gke package gke.policy.workload_identity -default valid = false +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { not input.workload_identity_config.workload_pool - msg := "The GKE cluster does not have workload identity enabled" + msg := "Cluster is not configured with Workload Identity" } diff --git a/gke-policies/policy/workload_identity_test.rego b/gke-policies/policy/workload_identity_test.rego index 05aa2ed7..5265c4de 100644 --- a/gke-policies/policy/workload_identity_test.rego +++ b/gke-policies/policy/workload_identity_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.policy.workload_identity +package gke.policy.workload_identity_test -test_enabled_workload_identity { - valid with input as {"name": "test-cluster", "workload_identity_config": { "workload_pool": "foo_pool.svc.id.goog" }} +import future.keywords.if +import data.gke.policy.workload_identity + +test_enabled_workload_identity if { + workload_identity.valid with input as {"name": "test-cluster", "workload_identity_config": { "workload_pool": "foo_pool.svc.id.goog" }} } -test_disabled_workload_identity { - not valid with input as {"name": "test-cluster"} +test_disabled_workload_identity if { + not workload_identity.valid with input as {"name": "test-cluster"} } diff --git a/gke-policies/rule/cluster/location.rego b/gke-policies/rule/cluster/location.rego index 7f5557e6..248b5b42 100644 --- a/gke-policies/rule/cluster/location.rego +++ b/gke-policies/rule/cluster/location.rego @@ -14,10 +14,14 @@ package gke.rule.cluster.location -regional(location) { - regex.match("^[^-]+-[^-]+$", location) +import future.keywords.if + +regional(location) if { + re := "^[^-]+-[^-]+$" + regex.match(re, location) } -zonal(location) { - regex.match("^[^-]+-[^-]+-[^-]+$", location) +zonal(location) if { + re := "^[^-]+-[^-]+-[^-]+$" + regex.match(re, location) } diff --git a/gke-policies/rule/cluster/location_test.rego b/gke-policies/rule/cluster/location_test.rego index 68c13b08..aaf77497 100644 --- a/gke-policies/rule/cluster/location_test.rego +++ b/gke-policies/rule/cluster/location_test.rego @@ -12,22 +12,25 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.rule.cluster.location +package gke.rule.cluster.location_test -test_regional { - location := "europe-central2" - regional(location) - not zonal(location) +import future.keywords.if +import data.gke.rule.cluster.location + +test_regional if { + loc := "europe-central2" + location.regional(loc) + not location.zonal(loc) } -test_zonal { - location := "europe-central2-a" - zonal(location) - not regional(location) +test_zonal if { + loc := "europe-central2-a" + location.zonal(loc) + not location.regional(loc) } -test_not_regional_nor_zonal { - location := "test" - not regional(location) - not zonal(location) +test_not_regional_nor_zonal if { + loc := "test" + not location.regional(loc) + not location.zonal(loc) } diff --git a/gke-policies/rule/nodepool/location.rego b/gke-policies/rule/nodepool/location.rego index 289219b2..ee7c092a 100644 --- a/gke-policies/rule/nodepool/location.rego +++ b/gke-policies/rule/nodepool/location.rego @@ -14,12 +14,15 @@ package gke.rule.nodepool.location -regional[nodepool] { +import future.keywords.if +import future.keywords.contains + +regional contains nodepool if { nodepool := input.node_pools[_] count(nodepool.locations) > 1 } -zonal[nodepool] { +zonal contains nodepool if { nodepool := input.node_pools[_] count(nodepool.locations) < 2 } \ No newline at end of file diff --git a/gke-policies/scalability/limits_configmaps.rego b/gke-policies/scalability/limits_configmaps.rego index 3c16bfcc..dc058cc4 100644 --- a/gke-policies/scalability/limits_configmaps.rego +++ b/gke-policies/scalability/limits_configmaps.rego @@ -19,20 +19,20 @@ # group: Scalability # severity: High # sccCategory: CONFIGMAPS_LIMIT - package gke.scalability.configmaps -default valid = false +import future.keywords.if +import future.keywords.contains -default configmaps_limit = 2 #value is ONLY for demo purpose, does not reflect a real limit +default valid := false +default limit := 2 # value is ONLY for demo purpose, does not reflect a real limit -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { configmaps := {object | object := input.Resources[_]; object.Data.kind == "ConfigMap"} - count(configmaps) > configmaps_limit - msg := sprintf("Configmaps found: %d higher than the limit: %d", [count(configmaps), configmaps_limit]) - print(msg) + count(configmaps) > limit + msg := sprintf("Configmaps found: %d higher than the limit: %d", [count(configmaps), limit]) } diff --git a/gke-policies/scalability/limits_configmaps_test.rego b/gke-policies/scalability/limits_configmaps_test.rego index b012212b..6d5e956b 100644 --- a/gke-policies/scalability/limits_configmaps_test.rego +++ b/gke-policies/scalability/limits_configmaps_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.configmaps +package gke.scalability.configmaps_test -test_configmap_underusage { - valid with input as {"Resources": [{"Type": {"Group": "", "Version": "v1", "Name": "configmaps", "Namespaced": true}, "Data": {"apiVersion": "v1", "kind": "ConfigMap", "metadata": {"annotations": {"control-plane.alpha.kubernetes.io/leader": ""}, "creationTimestamp": "2022-06-21T10:10:31Z", "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:control-plane.alpha.kubernetes.io/leader": {}}}}, "manager": "manager", "operation": "Update", "time": "2022-06-21T10:10:31Z"}], "name": "", "namespace": "asm-system", "resourceVersion": "", "uid": ""}}}]} +import future.keywords.if +import data.gke.scalability.configmaps + +test_configmap_underusage if { + configmaps.valid with input as {"Resources": [{"Type": {"Group": "", "Version": "v1", "Name": "configmaps", "Namespaced": true}, "Data": {"apiVersion": "v1", "kind": "ConfigMap", "metadata": {"annotations": {"control-plane.alpha.kubernetes.io/leader": ""}, "creationTimestamp": "2022-06-21T10:10:31Z", "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:control-plane.alpha.kubernetes.io/leader": {}}}}, "manager": "manager", "operation": "Update", "time": "2022-06-21T10:10:31Z"}], "name": "", "namespace": "asm-system", "resourceVersion": "", "uid": ""}}}]} } -test_configmap_overusage { - not valid with input as {"Resources": [{"Type": {"Group": "", "Version": "v1", "Name": "configmaps", "Namespaced": true}, "Data": {"apiVersion": "v1", "kind": "ConfigMap", "metadata": {"annotations": {"control-plane.alpha.kubernetes.io/leader": ""}, "creationTimestamp": "2022-06-21T10:10:31Z", "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:control-plane.alpha.kubernetes.io/leader": {}}}}, "manager": "manager", "operation": "Update", "time": "2022-06-21T10:10:31Z"}], "name": "test1", "namespace": "asm-system", "resourceVersion": "", "uid": ""}}}, {"Type": {"Group": "", "Version": "v1", "Name": "configmaps", "Namespaced": true}, "Data": {"apiVersion": "v1", "kind": "ConfigMap", "metadata": {"annotations": {"control-plane.alpha.kubernetes.io/leader": ""}, "creationTimestamp": "2022-06-21T10:10:31Z", "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:control-plane.alpha.kubernetes.io/leader": {}}}}, "manager": "manager", "operation": "Update", "time": "2022-06-21T10:10:31Z"}], "name": "test2", "namespace": "asm-system", "resourceVersion": "", "uid": ""}}}, {"Type": {"Group": "", "Version": "v1", "Name": "configmaps", "Namespaced": true}, "Data": {"apiVersion": "v1", "kind": "ConfigMap", "metadata": {"annotations": {"control-plane.alpha.kubernetes.io/leader": ""}, "creationTimestamp": "2022-06-21T10:10:31Z", "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:control-plane.alpha.kubernetes.io/leader": {}}}}, "manager": "manager", "operation": "Update", "time": "2022-06-21T10:10:31Z"}], "name": "test3", "namespace": "asm-system", "resourceVersion": "", "uid": ""}}}]} +test_configmap_overusage if { + not configmaps.valid with input as {"Resources": [{"Type": {"Group": "", "Version": "v1", "Name": "configmaps", "Namespaced": true}, "Data": {"apiVersion": "v1", "kind": "ConfigMap", "metadata": {"annotations": {"control-plane.alpha.kubernetes.io/leader": ""}, "creationTimestamp": "2022-06-21T10:10:31Z", "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:control-plane.alpha.kubernetes.io/leader": {}}}}, "manager": "manager", "operation": "Update", "time": "2022-06-21T10:10:31Z"}], "name": "test1", "namespace": "asm-system", "resourceVersion": "", "uid": ""}}}, {"Type": {"Group": "", "Version": "v1", "Name": "configmaps", "Namespaced": true}, "Data": {"apiVersion": "v1", "kind": "ConfigMap", "metadata": {"annotations": {"control-plane.alpha.kubernetes.io/leader": ""}, "creationTimestamp": "2022-06-21T10:10:31Z", "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:control-plane.alpha.kubernetes.io/leader": {}}}}, "manager": "manager", "operation": "Update", "time": "2022-06-21T10:10:31Z"}], "name": "test2", "namespace": "asm-system", "resourceVersion": "", "uid": ""}}}, {"Type": {"Group": "", "Version": "v1", "Name": "configmaps", "Namespaced": true}, "Data": {"apiVersion": "v1", "kind": "ConfigMap", "metadata": {"annotations": {"control-plane.alpha.kubernetes.io/leader": ""}, "creationTimestamp": "2022-06-21T10:10:31Z", "managedFields": [{"apiVersion": "v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:metadata": {"f:annotations": {".": {}, "f:control-plane.alpha.kubernetes.io/leader": {}}}}, "manager": "manager", "operation": "Update", "time": "2022-06-21T10:10:31Z"}], "name": "test3", "namespace": "asm-system", "resourceVersion": "", "uid": ""}}}]} } diff --git a/gke-policies/scalability/limits_hpas.rego b/gke-policies/scalability/limits_hpas.rego index 1ccbdc1b..ce626809 100644 --- a/gke-policies/scalability/limits_hpas.rego +++ b/gke-policies/scalability/limits_hpas.rego @@ -19,20 +19,20 @@ # group: Scalability # severity: High # sccCategory: HPAS_LIMIT - package gke.scalability.hpas -default valid = false +import future.keywords.if +import future.keywords.contains -default hpas_limit = 2 #the value is ONLY for demo purpose, does not reflect a real limit +default valid := false +default limit := 2 # the value is ONLY for demo purpose, does not reflect a real limit -valid { +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { hpas := {object | object := input.Resources[_]; object.Data.kind == "HorizontalPodAutoscaler"} - count(hpas) > hpas_limit - msg := sprintf("HPAs found: %d higher than the limit: %d", [count(hpas), hpas_limit]) - print(msg) + count(hpas) > limit + msg := sprintf("HPAs found: %d higher than the limit: %d", [count(hpas), limit]) } diff --git a/gke-policies/scalability/limits_hpas_test.rego b/gke-policies/scalability/limits_hpas_test.rego index cfe9e67f..43c0ec16 100644 --- a/gke-policies/scalability/limits_hpas_test.rego +++ b/gke-policies/scalability/limits_hpas_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.hpas +package gke.scalability.hpas_test -test_hpas_underusage { - valid with input as {"Resources": [{"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2023-02-24T19:40:57Z"}}}]} +import future.keywords.if +import data.gke.scalability.hpas + +test_hpas_underusage if { + hpas.valid with input as {"Resources": [{"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2023-02-24T19:40:57Z"}}}]} } -test_hpas_overusage { - not valid with input as {"Resources": [{"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": "f6f0911a-3aed-46d0-90a3-d7c7cf70554e"}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2022-06-24T19:40:57Z"}}}, {"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami-two", "namespace": "demo", "resourceVersion": "5958695", "uid": "f6f0911a-3aed-46d0-90a3-d7c7cf70554e"}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2022-06-24T19:40:57Z"}}}, {"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami-three", "namespace": "demo", "resourceVersion": "5958695", "uid": "f6f0911a-3aed-46d0-90a3-d7c7cf70554e"}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2022-06-24T19:40:57Z"}}}]} +test_hpas_overusage if { + not hpas.valid with input as {"Resources": [{"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": "f6f0911a-3aed-46d0-90a3-d7c7cf70554e"}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2022-06-24T19:40:57Z"}}}, {"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami-two", "namespace": "demo", "resourceVersion": "5958695", "uid": "f6f0911a-3aed-46d0-90a3-d7c7cf70554e"}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2022-06-24T19:40:57Z"}}}, {"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami-three", "namespace": "demo", "resourceVersion": "5958695", "uid": "f6f0911a-3aed-46d0-90a3-d7c7cf70554e"}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2022-06-24T19:40:57Z"}}}]} } diff --git a/gke-policies/scalability/limits_unused_hpas.rego b/gke-policies/scalability/limits_unused_hpas.rego index 77447685..b6ae42b1 100644 --- a/gke-policies/scalability/limits_unused_hpas.rego +++ b/gke-policies/scalability/limits_unused_hpas.rego @@ -19,19 +19,21 @@ # group: Scalability # severity: Low # sccCategory: HPAS_UNUSED - package gke.scalability.unused_hpas -default valid = false +import future.keywords.in +import future.keywords.if +import future.keywords.contains + +default valid := false -valid { - print(violation) +valid if { count(violation) == 0 } -violation[msg] { +violation contains msg if { hpas := {object | object := input.Resources[_]; object.Data.kind == "HorizontalPodAutoscaler"} - some i - not hpas[i].Data.status.lastScaleTime - msg := sprintf("HPA %s in namespace %s never executed since %s", [hpas[i].Data.metadata.name, hpas[i].Data.metadata.namespace, hpas[i].Data.metadata.creationTimestamp]) + some hpa in hpas + not hpa.Data.status.lastScaleTime + msg := sprintf("HPA %s in namespace %s never executed since %s", [hpa.Data.metadata.name, hpa.Data.metadata.namespace, hpa.Data.metadata.creationTimestamp]) } diff --git a/gke-policies/scalability/limits_unused_hpas_test.rego b/gke-policies/scalability/limits_unused_hpas_test.rego index 029fd632..d1aed866 100644 --- a/gke-policies/scalability/limits_unused_hpas_test.rego +++ b/gke-policies/scalability/limits_unused_hpas_test.rego @@ -12,12 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -package gke.scalability.unused_hpas +package gke.scalability.unused_hpas_test -test_unused_hpas_not_presence { - valid with input as {"Resources": [{"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2023-02-24T19:40:57Z"}}}]} +import future.keywords.if +import data.gke.scalability.unused_hpas + +test_unused_hpas_not_presence if { + unused_hpas.valid with input as {"Resources": [{"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2023-02-24T19:40:57Z"}}}]} } -test_unused_hpas_presence { - not valid with input as {"Resources": [{"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2023-02-24T19:40:57Z"}}}, {"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1}}}, {"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1}}}]} +test_unused_hpas_presence if { + not unused_hpas.valid with input as {"Resources": [{"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1, "lastScaleTime": "2023-02-24T19:40:57Z"}}}, {"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1}}}, {"Type": {"Group": "autoscaling", "Version": "v1", "Name": "horizontalpodautoscalers", "Namespaced": true}, "Data": {"apiVersion": "autoscaling/v1", "kind": "HorizontalPodAutoscaler", "metadata": {"annotations": {"autoscaling.alpha.kubernetes.io/conditions": "", "autoscaling.alpha.kubernetes.io/current-metrics": ""}, "creationTimestamp": "2022-06-24T19:02:21Z", "managedFields": [{"apiVersion": "autoscaling/v1", "fieldsType": "FieldsV1", "fieldsV1": {"f:spec": {"f:maxReplicas": {}, "f:minReplicas": {}, "f:scaleTargetRef": {}, "f:targetCPUUtilizationPercentage": {}}}, "manager": "kubectl-autoscale", "operation": "Update", "time": "2022-06-24T19:02:21Z"}, {"apiVersion": "autoscaling/v2beta2", "fieldsType": "FieldsV1", "fieldsV1": {"f:status": {"f:conditions": {}, "f:currentMetrics": {}, "f:currentReplicas": {}, "f:desiredReplicas": {}, "f:lastScaleTime": {}}}, "manager": "vpa-recommender", "operation": "Update", "subresource": "status", "time": "2022-06-24T19:33:41Z"}], "name": "wherami", "namespace": "demo", "resourceVersion": "5958695", "uid": ""}, "spec": {"maxReplicas": 10, "minReplicas": 1, "scaleTargetRef": {"apiVersion": "apps/v1", "kind": "Deployment", "name": "wherami"}, "targetCPUUtilizationPercentage": 50}, "status": {"currentCPUUtilizationPercentage": 3, "currentReplicas": 1, "desiredReplicas": 1}}}]} }