From 1cb70c02f738360889b618fd29233c37b70e693e Mon Sep 17 00:00:00 2001 From: Jacob Date: Sat, 5 Feb 2022 17:19:35 -0800 Subject: [PATCH] added ansible-lockdown modules --- .../APACHE-2.4-CIS/CONTRIBUTING.rst | 54 + Linux/ansible-lockdown/APACHE-2.4-CIS/LICENSE | 21 + .../ansible-lockdown/APACHE-2.4-CIS/README.md | 298 +++ .../APACHE-2.4-CIS/communitytodevel.yml | 38 + .../APACHE-2.4-CIS/defaults/main.yml | 317 +++ .../APACHE-2.4-CIS/develtomaster.yml | 38 + .../files/custom_cert/custom_cert_file.crt | 1 + .../files/custom_cert/custom_cert_key.key | 1 + .../APACHE-2.4-CIS/handlers/main.yml | 15 + .../APACHE-2.4-CIS/meta/main.yml | 19 + .../ansible-lockdown/APACHE-2.4-CIS/site.yml | 8 + .../tasks/cis_apache_redhat_fix.yml | 2205 +++++++++++++++++ .../tasks/cis_apache_ubuntu_fix.yml | 2137 ++++++++++++++++ .../APACHE-2.4-CIS/tasks/main.yml | 27 + .../APACHE-2.4-CIS/tasks/prelim.yml | 111 + .../templates/crs-setup.conf.j2 | 772 ++++++ .../APACHE-2.4-CIS/tests/inventory | 2 + .../APACHE-2.4-CIS/tests/test.yml | 5 + .../APACHE-2.4-CIS/vars/main.yml | 2 + .../POSTGRES-12-CIS/.ansible-lint | 11 + .../POSTGRES-12-CIS/.travis.yml | 29 + .../POSTGRES-12-CIS/CONTRIBUTING.rst | 67 + .../ansible-lockdown/POSTGRES-12-CIS/LICENSE | 21 + .../POSTGRES-12-CIS/README.md | 62 + .../POSTGRES-12-CIS/defaults/main.yml | 284 +++ .../POSTGRES-12-CIS/handlers/main.yml | 18 + .../POSTGRES-12-CIS/meta/main.yml | 26 + .../ansible-lockdown/POSTGRES-12-CIS/site.yml | 6 + .../tasks/cis_pgs12_redhat_fixes.yml | 1329 ++++++++++ .../tasks/cis_pgs12_ubuntu_fixes.yml | 1213 +++++++++ .../POSTGRES-12-CIS/tasks/main.yml | 23 + .../tasks/postgresql12_install.yml | 78 + .../POSTGRES-12-CIS/tasks/prelim.yml | 111 + .../POSTGRES-12-CIS/tasks/rhel7_fips.yml | 108 + .../POSTGRES-12-CIS/tests/inventory | 2 + .../POSTGRES-12-CIS/tests/test.yml | 5 + .../POSTGRES-12-CIS/vars/main.yml | 21 + .../ansible-lockdown/RHEL7-CIS/.ansible-lint | 11 + .../ansible-lockdown/RHEL7-CIS/.gitattributes | 6 + Linux/ansible-lockdown/RHEL7-CIS/.yamllint | 22 + .../RHEL7-CIS/CONTRIBUTING.rst | 66 + Linux/ansible-lockdown/RHEL7-CIS/ChangeLog.md | 77 + Linux/ansible-lockdown/RHEL7-CIS/LICENSE | 21 + Linux/ansible-lockdown/RHEL7-CIS/README.md | 155 ++ Linux/ansible-lockdown/RHEL7-CIS/ansible.cfg | 25 + .../RHEL7-CIS/defaults/main.yml | 605 +++++ .../RHEL7-CIS/group_vars/docker | 28 + .../RHEL7-CIS/group_vars/vagrant | 28 + .../RHEL7-CIS/handlers/main.yml | 103 + Linux/ansible-lockdown/RHEL7-CIS/local.yml | 11 + .../ansible-lockdown/RHEL7-CIS/meta/main.yml | 19 + Linux/ansible-lockdown/RHEL7-CIS/site.yml | 6 + .../RHEL7-CIS/tasks/LE_audit_setup.yml | 22 + .../RHEL7-CIS/tasks/check_prereqs.yml | 47 + .../ansible-lockdown/RHEL7-CIS/tasks/main.yml | 129 + .../RHEL7-CIS/tasks/parse_etc_password.yml | 31 + .../ansible-lockdown/RHEL7-CIS/tasks/post.yml | 12 + .../tasks/post_remediation_audit.yml | 43 + .../RHEL7-CIS/tasks/pre_remediation_audit.yml | 111 + .../RHEL7-CIS/tasks/prelim.yml | 87 + .../RHEL7-CIS/tasks/section_1/cis_1.1.1.x.yml | 70 + .../RHEL7-CIS/tasks/section_1/cis_1.1.x.yml | 397 +++ .../RHEL7-CIS/tasks/section_1/cis_1.2.x.yml | 96 + .../RHEL7-CIS/tasks/section_1/cis_1.3.x.yml | 45 + .../RHEL7-CIS/tasks/section_1/cis_1.4.x.yml | 72 + .../RHEL7-CIS/tasks/section_1/cis_1.5.x.yml | 77 + .../RHEL7-CIS/tasks/section_1/cis_1.6.x.yml | 114 + .../RHEL7-CIS/tasks/section_1/cis_1.7.x.yml | 91 + .../RHEL7-CIS/tasks/section_1/cis_1.8.x.yml | 114 + .../RHEL7-CIS/tasks/section_1/cis_1.9.x.yml | 16 + .../RHEL7-CIS/tasks/section_1/main.yml | 31 + .../RHEL7-CIS/tasks/section_2/cis_2.1.x.yml | 15 + .../RHEL7-CIS/tasks/section_2/cis_2.2.1.x.yml | 116 + .../RHEL7-CIS/tasks/section_2/cis_2.2.x.yml | 356 +++ .../RHEL7-CIS/tasks/section_2/cis_2.3.x.yml | 86 + .../RHEL7-CIS/tasks/section_2/cis_2.4.x.yml | 11 + .../RHEL7-CIS/tasks/section_2/main.yml | 16 + .../RHEL7-CIS/tasks/section_3/cis_3.1.x.yml | 36 + .../RHEL7-CIS/tasks/section_3/cis_3.2.x.yml | 54 + .../RHEL7-CIS/tasks/section_3/cis_3.3.x.yml | 214 ++ .../RHEL7-CIS/tasks/section_3/cis_3.4.x.yml | 33 + .../RHEL7-CIS/tasks/section_3/cis_3.5.1.x.yml | 115 + .../RHEL7-CIS/tasks/section_3/cis_3.5.2.x.yml | 321 +++ .../tasks/section_3/cis_3.5.3.1.x.yml | 46 + .../RHEL7-CIS/tasks/section_3/main.yml | 28 + .../RHEL7-CIS/tasks/section_4/cis_4.1.1.x.yml | 44 + .../RHEL7-CIS/tasks/section_4/cis_4.1.2.x.yml | 342 +++ .../RHEL7-CIS/tasks/section_4/cis_4.2.1.x.yml | 171 ++ .../RHEL7-CIS/tasks/section_4/cis_4.2.2.x.yml | 43 + .../RHEL7-CIS/tasks/section_4/cis_4.2.x.yml | 35 + .../RHEL7-CIS/tasks/section_4/main.yml | 16 + .../RHEL7-CIS/tasks/section_5/cis_5.1.x.yml | 148 ++ .../RHEL7-CIS/tasks/section_5/cis_5.2.x.yml | 46 + .../RHEL7-CIS/tasks/section_5/cis_5.3.x.yml | 458 ++++ .../RHEL7-CIS/tasks/section_5/cis_5.4.x.yml | 133 + .../RHEL7-CIS/tasks/section_5/cis_5.5.1.x.yml | 109 + .../RHEL7-CIS/tasks/section_5/cis_5.5.x.yml | 96 + .../RHEL7-CIS/tasks/section_5/cis_5.6.yml | 21 + .../RHEL7-CIS/tasks/section_5/cis_5.7.yml | 28 + .../RHEL7-CIS/tasks/section_5/main.yml | 22 + .../RHEL7-CIS/tasks/section_6/cis_6.1.x.yml | 290 +++ .../RHEL7-CIS/tasks/section_6/cis_6.2.x.yml | 426 ++++ .../RHEL7-CIS/tasks/section_6/main.yml | 7 + .../templates/ansible_vars_goss.yml.j2 | 455 ++++ .../templates/audit/99_finalize.rules.j2 | 1 + .../templates/audit/MAC_policy.rules.j2 | 2 + .../RHEL7-CIS/templates/audit/access.rules.j2 | 4 + .../templates/audit/actions.rules.j2 | 2 + .../templates/audit/deletion.rules.j2 | 2 + .../templates/audit/identity.rules.j2 | 5 + .../RHEL7-CIS/templates/audit/logins.rules.j2 | 3 + .../templates/audit/modules.rules.j2 | 4 + .../RHEL7-CIS/templates/audit/mounts.rules.j2 | 2 + .../templates/audit/perm_mod.rules.j2 | 6 + .../templates/audit/priv_commands.rules.j2 | 4 + .../RHEL7-CIS/templates/audit/scope.rules.j2 | 2 + .../templates/audit/session.rules.j2 | 3 + .../templates/audit/system_local.rules.j2 | 6 + .../templates/audit/time_change.rules.j2 | 7 + .../RHEL7-CIS/templates/chrony.conf.j2 | 93 + .../RHEL7-CIS/templates/etc/issue.j2 | 1 + .../RHEL7-CIS/templates/etc/issue.net.j2 | 1 + .../RHEL7-CIS/templates/etc/motd.j2 | 1 + .../RHEL7-CIS/templates/etc/tmp_mount.j2 | 25 + .../RHEL7-CIS/templates/hosts.allow.j2 | 11 + .../RHEL7-CIS/templates/ntp.conf.j2 | 59 + .../ansible-lockdown/RHEL7-CIS/vars/main.yml | 2 + .../ansible-lockdown/RHEL8-CIS/.ansible-lint | 11 + .../ansible-lockdown/RHEL8-CIS/.gitattributes | 6 + Linux/ansible-lockdown/RHEL8-CIS/.yamllint | 23 + .../RHEL8-CIS/CONTRIBUTING.rst | 67 + Linux/ansible-lockdown/RHEL8-CIS/Changelog.md | 93 + Linux/ansible-lockdown/RHEL8-CIS/LICENSE | 21 + Linux/ansible-lockdown/RHEL8-CIS/README.md | 129 + Linux/ansible-lockdown/RHEL8-CIS/ansible.cfg | 24 + .../RHEL8-CIS/defaults/main.yml | 622 +++++ .../files/etc/systemd/system/tmp.mount | 25 + .../RHEL8-CIS/group_vars/docker | 28 + .../RHEL8-CIS/group_vars/vagrant | 28 + .../RHEL8-CIS/handlers/main.yml | 106 + Linux/ansible-lockdown/RHEL8-CIS/local.yml | 12 + .../ansible-lockdown/RHEL8-CIS/meta/main.yml | 18 + Linux/ansible-lockdown/RHEL8-CIS/site.yml | 11 + .../RHEL8-CIS/tasks/LE_audit_setup.yml | 22 + .../RHEL8-CIS/tasks/audit_homedirperms.yml | 46 + .../RHEL8-CIS/tasks/check_prereqs.yml | 36 + .../ansible-lockdown/RHEL8-CIS/tasks/main.yml | 111 + .../RHEL8-CIS/tasks/parse_etc_password.yml | 32 + .../ansible-lockdown/RHEL8-CIS/tasks/post.yml | 15 + .../tasks/post_remediation_audit.yml | 43 + .../RHEL8-CIS/tasks/pre_remediation_audit.yml | 118 + .../RHEL8-CIS/tasks/prelim.yml | 110 + .../RHEL8-CIS/tasks/section_1/cis_1.1.1.x.yml | 102 + .../RHEL8-CIS/tasks/section_1/cis_1.1.x.yml | 363 +++ .../RHEL8-CIS/tasks/section_1/cis_1.10.yml | 15 + .../RHEL8-CIS/tasks/section_1/cis_1.11.yml | 15 + .../RHEL8-CIS/tasks/section_1/cis_1.2.x.yml | 100 + .../RHEL8-CIS/tasks/section_1/cis_1.3.x.yml | 44 + .../RHEL8-CIS/tasks/section_1/cis_1.4.x.yml | 50 + .../RHEL8-CIS/tasks/section_1/cis_1.5.x.yml | 76 + .../RHEL8-CIS/tasks/section_1/cis_1.6.x.yml | 60 + .../RHEL8-CIS/tasks/section_1/cis_1.7.1.x.yml | 115 + .../RHEL8-CIS/tasks/section_1/cis_1.8.1.x.yml | 96 + .../RHEL8-CIS/tasks/section_1/cis_1.8.2.yml | 27 + .../RHEL8-CIS/tasks/section_1/cis_1.9.yml | 15 + .../RHEL8-CIS/tasks/section_1/main.yml | 42 + .../RHEL8-CIS/tasks/section_2/cis_2.1.1.yml | 14 + .../RHEL8-CIS/tasks/section_2/cis_2.2.1.x.yml | 42 + .../RHEL8-CIS/tasks/section_2/cis_2.2.x.yml | 288 +++ .../RHEL8-CIS/tasks/section_2/cis_2.3.x.yml | 43 + .../RHEL8-CIS/tasks/section_2/main.yml | 13 + .../RHEL8-CIS/tasks/section_3/cis_3.1.x.yml | 53 + .../RHEL8-CIS/tasks/section_3/cis_3.2.x.yml | 207 ++ .../RHEL8-CIS/tasks/section_3/cis_3.3.x.yml | 61 + .../RHEL8-CIS/tasks/section_3/cis_3.4.1.1.yml | 13 + .../RHEL8-CIS/tasks/section_3/cis_3.4.2.x.yml | 102 + .../RHEL8-CIS/tasks/section_3/cis_3.4.3.x.yml | 264 ++ .../tasks/section_3/cis_3.4.4.1.x.yml | 144 ++ .../tasks/section_3/cis_3.4.4.2.x.yml | 136 + .../RHEL8-CIS/tasks/section_3/cis_3.5.yml | 32 + .../RHEL8-CIS/tasks/section_3/cis_3.6.yml | 17 + .../RHEL8-CIS/tasks/section_3/main.yml | 41 + .../RHEL8-CIS/tasks/section_4/cis_4.1.1.x.yml | 103 + .../RHEL8-CIS/tasks/section_4/cis_4.1.2.x.yml | 53 + .../RHEL8-CIS/tasks/section_4/cis_4.1.x.yml | 265 ++ .../RHEL8-CIS/tasks/section_4/cis_4.2.1.x.yml | 177 ++ .../RHEL8-CIS/tasks/section_4/cis_4.2.2.x.yml | 43 + .../RHEL8-CIS/tasks/section_4/cis_4.2.3.yml | 13 + .../RHEL8-CIS/tasks/section_4/cis_4.3.yml | 24 + .../RHEL8-CIS/tasks/section_4/main.yml | 23 + .../RHEL8-CIS/tasks/section_5/cis_5.1.x.yml | 147 ++ .../RHEL8-CIS/tasks/section_5/cis_5.2.x.yml | 345 +++ .../RHEL8-CIS/tasks/section_5/cis_5.3.x.yml | 90 + .../RHEL8-CIS/tasks/section_5/cis_5.4.x.yml | 131 + .../RHEL8-CIS/tasks/section_5/cis_5.5.1.x.yml | 117 + .../RHEL8-CIS/tasks/section_5/cis_5.5.x.yml | 96 + .../RHEL8-CIS/tasks/section_5/cis_5.6.yml | 35 + .../RHEL8-CIS/tasks/section_5/cis_5.7.yml | 22 + .../RHEL8-CIS/tasks/section_5/main.yml | 27 + .../RHEL8-CIS/tasks/section_6/cis_6.1.x.yml | 294 +++ .../RHEL8-CIS/tasks/section_6/cis_6.2.x.yml | 519 ++++ .../RHEL8-CIS/tasks/section_6/main.yml | 7 + .../templates/ansible_vars_goss.yml.j2 | 474 ++++ .../audit/rhel8cis_rule_4_1_10.rules.j2 | 5 + .../audit/rhel8cis_rule_4_1_11.rules.j2 | 6 + .../audit/rhel8cis_rule_4_1_12.rules.j2 | 3 + .../audit/rhel8cis_rule_4_1_13.rules.j2 | 4 + .../audit/rhel8cis_rule_4_1_14.rules.j2 | 3 + .../audit/rhel8cis_rule_4_1_15.rules.j2 | 5 + .../audit/rhel8cis_rule_4_1_16.rules.j2 | 2 + .../audit/rhel8cis_rule_4_1_17.rules.j2 | 2 + .../audit/rhel8cis_rule_4_1_3.rules.j2 | 2 + .../audit/rhel8cis_rule_4_1_4.rules.j2 | 3 + .../audit/rhel8cis_rule_4_1_5.rules.j2 | 4 + .../audit/rhel8cis_rule_4_1_6.rules.j2 | 6 + .../audit/rhel8cis_rule_4_1_7.rules.j2 | 3 + .../audit/rhel8cis_rule_4_1_8.rules.j2 | 7 + .../audit/rhel8cis_rule_4_1_9.rules.j2 | 7 + .../RHEL8-CIS/templates/chrony.conf.j2 | 93 + .../RHEL8-CIS/templates/etc/issue.j2 | 1 + .../RHEL8-CIS/templates/etc/issue.net.j2 | 1 + .../RHEL8-CIS/templates/etc/motd.j2 | 1 + .../templates/etc/systemd/system/tmp.mount.j2 | 28 + .../RHEL8-CIS/templates/hosts.allow.j2 | 11 + .../RHEL8-CIS/templates/ntp.conf.j2 | 59 + .../ansible-lockdown/RHEL8-CIS/vars/main.yml | 5 + Linux/ansible-lockdown/UBUNTU18-CIS/.DS_Store | Bin 0 -> 10244 bytes .../UBUNTU18-CIS/.ansible-lint | 11 + .../UBUNTU18-CIS/.gitattributes | 6 + .../ansible-lockdown/UBUNTU18-CIS/.travis.yml | 29 + Linux/ansible-lockdown/UBUNTU18-CIS/.yamllint | 20 + .../UBUNTU18-CIS/CONTRIBUTING.rst | 68 + Linux/ansible-lockdown/UBUNTU18-CIS/LICENSE | 21 + Linux/ansible-lockdown/UBUNTU18-CIS/README.md | 86 + .../UBUNTU18-CIS/defaults/main.yml | 707 ++++++ .../UBUNTU18-CIS/files/.DS_Store | Bin 0 -> 6148 bytes .../UBUNTU18-CIS/files/etc/.DS_Store | Bin 0 -> 6148 bytes .../files/etc/apparmor.d/usr.bin.ssh | 10 + .../UBUNTU18-CIS/files/etc/systemd/.DS_Store | Bin 0 -> 6148 bytes .../files/etc/systemd/system/tmp.mount | 25 + .../UBUNTU18-CIS/handlers/main.yml | 54 + .../UBUNTU18-CIS/library/goss.py | 147 ++ .../UBUNTU18-CIS/meta/main.yml | 20 + Linux/ansible-lockdown/UBUNTU18-CIS/site.yml | 12 + .../UBUNTU18-CIS/tasks/.DS_Store | Bin 0 -> 6148 bytes .../UBUNTU18-CIS/tasks/LE_audit_setup.yml | 22 + .../UBUNTU18-CIS/tasks/main.yml | 108 + .../UBUNTU18-CIS/tasks/parse_etc_password.yml | 32 + .../tasks/post_remediation_audit.yml | 43 + .../tasks/pre_remediation_audit.yml | 118 + .../UBUNTU18-CIS/tasks/prelim.yml | 50 + .../tasks/section_1/cis_1.1.x.yml | 496 ++++ .../tasks/section_1/cis_1.2.x.yml | 50 + .../tasks/section_1/cis_1.3.x.yml | 35 + .../tasks/section_1/cis_1.4.x.yml | 84 + .../tasks/section_1/cis_1.5.x.yml | 94 + .../tasks/section_1/cis_1.6.x.yml | 81 + .../tasks/section_1/cis_1.7.x.yml | 93 + .../tasks/section_1/cis_1.8.x.yml | 77 + .../UBUNTU18-CIS/tasks/section_1/cis_1.9.yml | 14 + .../UBUNTU18-CIS/tasks/section_1/main.yml | 27 + .../tasks/section_2/cis_2.1.x.yml | 407 +++ .../tasks/section_2/cis_2.2.x.yml | 91 + .../UBUNTU18-CIS/tasks/section_2/cis_2.3.yml | 23 + .../UBUNTU18-CIS/tasks/section_2/main.yml | 9 + .../tasks/section_3/cis_3.1.x.yml | 49 + .../tasks/section_3/cis_3.2.x.yml | 60 + .../tasks/section_3/cis_3.3.x.yml | 233 ++ .../tasks/section_3/cis_3.4.x.yml | 64 + .../tasks/section_3/cis_3.5.x.yml | 798 ++++++ .../UBUNTU18-CIS/tasks/section_3/main.yml | 15 + .../tasks/section_4/cis_4.1.1.x.yml | 81 + .../tasks/section_4/cis_4.1.2.x.yml | 54 + .../tasks/section_4/cis_4.1.x.yml | 278 +++ .../tasks/section_4/cis_4.2.1.x.yml | 151 ++ .../tasks/section_4/cis_4.2.2.x.yml | 51 + .../tasks/section_4/cis_4.2.3.x.yml | 15 + .../UBUNTU18-CIS/tasks/section_4/cis_4.3.yml | 25 + .../UBUNTU18-CIS/tasks/section_4/cis_4.4.yml | 15 + .../UBUNTU18-CIS/tasks/section_4/main.yml | 24 + .../tasks/section_5/cis_5.1.x.yml | 158 ++ .../tasks/section_5/cis_5.2.x.yml | 46 + .../tasks/section_5/cis_5.3.x.yml | 413 +++ .../tasks/section_5/cis_5.4.x.yml | 191 ++ .../tasks/section_5/cis_5.5.1.x.yml | 139 ++ .../tasks/section_5/cis_5.5.x.yml | 100 + .../UBUNTU18-CIS/tasks/section_5/cis_5.6.yml | 24 + .../UBUNTU18-CIS/tasks/section_5/cis_5.7.yml | 39 + .../UBUNTU18-CIS/tasks/section_5/main.yml | 24 + .../tasks/section_6/cis_6.1.x.yml | 348 +++ .../tasks/section_6/cis_6.2.x.yml | 545 ++++ .../UBUNTU18-CIS/tasks/section_6/main.yml | 6 + .../UBUNTU18-CIS/templates/.DS_Store | Bin 0 -> 6148 bytes .../templates/ansible_vars_goss.yml.j2 | 487 ++++ .../templates/audit/chrony.conf.j2 | 3 + .../audit/ubtu18cis_4_1_10_access.rules.j2 | 7 + .../ubtu18cis_4_1_11_privileged.rules.j2 | 4 + .../audit/ubtu18cis_4_1_12_audit.rules.j2 | 5 + .../audit/ubtu18cis_4_1_13_delete.rules.j2 | 5 + .../audit/ubtu18cis_4_1_14_scope.rules.j2 | 3 + .../audit/ubtu18cis_4_1_15_actions.rules.j2 | 5 + .../audit/ubtu18cis_4_1_16_modules.rules.j2 | 10 + .../ubtu18cis_4_1_17_99finalize.rules.j2 | 2 + .../audit/ubtu18cis_4_1_3_timechange.rules.j2 | 8 + .../audit/ubtu18cis_4_1_4_identity.rules.j2 | 6 + .../ubtu18cis_4_1_5_systemlocale.rules.j2 | 9 + .../audit/ubtu18cis_4_1_6_macpolicy.rules.j2 | 3 + .../audit/ubtu18cis_4_1_7_logins.rules.j2 | 4 + .../audit/ubtu18cis_4_1_8_session.rules.j2 | 4 + .../audit/ubtu18cis_4_1_9_permmod.rules.j2 | 9 + .../UBUNTU18-CIS/templates/chrony.conf.j2 | 91 + .../UBUNTU18-CIS/templates/etc/issue.j2 | 1 + .../UBUNTU18-CIS/templates/etc/issue.net.j2 | 1 + .../UBUNTU18-CIS/templates/etc/motd.j2 | 1 + .../UBUNTU18-CIS/templates/hosts.allow.j2 | 10 + .../UBUNTU18-CIS/templates/ntp.conf.j2 | 68 + .../ubtu18cis_4_1_3_timechange64.rules.j2 | 5 + .../UBUNTU18-CIS/tests/inventory | 2 + .../UBUNTU18-CIS/tests/test.yml | 5 + .../UBUNTU18-CIS/vars/main.yml | 2 + Linux/ansible-lockdown/UBUNTU20-CIS/.DS_Store | Bin 0 -> 8196 bytes .../UBUNTU20-CIS/.ansible-lint | 11 + .../ansible-lockdown/UBUNTU20-CIS/.travis.yml | 29 + Linux/ansible-lockdown/UBUNTU20-CIS/.yamllint | 20 + .../UBUNTU20-CIS/CONTRIBUTING.rst | 69 + Linux/ansible-lockdown/UBUNTU20-CIS/LICENSE | 21 + Linux/ansible-lockdown/UBUNTU20-CIS/README.md | 85 + .../UBUNTU20-CIS/defaults/main.yml | 698 ++++++ .../UBUNTU20-CIS/handlers/main.yml | 58 + .../UBUNTU20-CIS/meta/main.yml | 20 + Linux/ansible-lockdown/UBUNTU20-CIS/site.yml | 12 + .../UBUNTU20-CIS/tasks/LE_audit_setup.yml | 22 + .../UBUNTU20-CIS/tasks/main.yml | 115 + .../UBUNTU20-CIS/tasks/parse_etc_password.yml | 32 + .../tasks/post_remediation_audit.yml | 43 + .../tasks/pre_remediation_audit.yml | 118 + .../UBUNTU20-CIS/tasks/prelim.yml | 53 + .../tasks/section_1/cis_1.1.x.yml | 524 ++++ .../tasks/section_1/cis_1.2.x.yml | 52 + .../tasks/section_1/cis_1.3.x.yml | 35 + .../tasks/section_1/cis_1.4.x.yml | 86 + .../tasks/section_1/cis_1.5.x.yml | 122 + .../tasks/section_1/cis_1.6.x.yml | 83 + .../tasks/section_1/cis_1.7.x.yml | 93 + .../tasks/section_1/cis_1.8.x.yml | 78 + .../UBUNTU20-CIS/tasks/section_1/cis_1.9.yml | 14 + .../UBUNTU20-CIS/tasks/section_1/main.yml | 27 + .../tasks/section_2/cis_2.1.x.yml | 457 ++++ .../tasks/section_2/cis_2.2.x.yml | 88 + .../UBUNTU20-CIS/tasks/section_2/cis_2.3.yml | 24 + .../UBUNTU20-CIS/tasks/section_2/main.yml | 9 + .../tasks/section_3/cis_3.1.x.yml | 70 + .../tasks/section_3/cis_3.2.x.yml | 60 + .../tasks/section_3/cis_3.3.x.yml | 233 ++ .../tasks/section_3/cis_3.4.x.yml | 64 + .../tasks/section_3/cis_3.5.x.yml | 790 ++++++ .../UBUNTU20-CIS/tasks/section_3/main.yml | 15 + .../tasks/section_4/cis_4.1.1.x.yml | 100 + .../tasks/section_4/cis_4.1.2.x.yml | 53 + .../tasks/section_4/cis_4.1.x.yml | 279 +++ .../tasks/section_4/cis_4.2.1.x.yml | 153 ++ .../tasks/section_4/cis_4.2.2.x.yml | 50 + .../tasks/section_4/cis_4.2.3.yml | 16 + .../UBUNTU20-CIS/tasks/section_4/cis_4.3.yml | 26 + .../UBUNTU20-CIS/tasks/section_4/cis_4.4.yml | 15 + .../UBUNTU20-CIS/tasks/section_4/main.yml | 24 + .../tasks/section_5/cis_5.1.x.yml | 159 ++ .../tasks/section_5/cis_5.2.x.yml | 46 + .../tasks/section_5/cis_5.3.x.yml | 413 +++ .../tasks/section_5/cis_5.4.x.yml | 199 ++ .../tasks/section_5/cis_5.5.x.yml | 266 ++ .../UBUNTU20-CIS/tasks/section_5/cis_5.6.yml | 25 + .../UBUNTU20-CIS/tasks/section_5/cis_5.7.yml | 38 + .../UBUNTU20-CIS/tasks/section_5/main.yml | 21 + .../tasks/section_6/cis_6.1.x.yml | 355 +++ .../tasks/section_6/cis_6.2.x.yml | 567 +++++ .../UBUNTU20-CIS/tasks/section_6/main.yml | 6 + .../UBUNTU20-CIS/templates/.DS_Store | Bin 0 -> 6148 bytes .../templates/ansible_vars_goss.yml.j2 | 500 ++++ .../audit/ubtu20cis_4_1_10_access.rules.j2 | 6 + .../ubtu20cis_4_1_11_privileged.rules.j2 | 3 + .../audit/ubtu20cis_4_1_12_audit.rules.j2 | 4 + .../audit/ubtu20cis_4_1_13_delete.rules.j2 | 4 + .../audit/ubtu20cis_4_1_14_scope.rules.j2 | 2 + .../audit/ubtu20cis_4_1_15_actions.rules.j2 | 4 + .../audit/ubtu20cis_4_1_16_modules.rules.j2 | 9 + .../ubtu20cis_4_1_17_99finalize.rules.j2 | 1 + .../audit/ubtu20cis_4_1_3_timechange.rules.j2 | 7 + .../audit/ubtu20cis_4_1_4_identity.rules.j2 | 5 + .../ubtu20cis_4_1_5_systemlocale.rules.j2 | 8 + .../audit/ubtu20cis_4_1_6_macpolicy.rules.j2 | 2 + .../audit/ubtu20cis_4_1_7_logins.rules.j2 | 3 + .../audit/ubtu20cis_4_1_8_session.rules.j2 | 3 + .../audit/ubtu20cis_4_1_9_permmod.rules.j2 | 8 + .../UBUNTU20-CIS/templates/chrony.conf.j2 | 93 + .../UBUNTU20-CIS/templates/etc/issue.j2 | 1 + .../UBUNTU20-CIS/templates/etc/issue.net.j2 | 1 + .../UBUNTU20-CIS/templates/etc/motd.j2 | 1 + .../UBUNTU20-CIS/templates/ntp.conf.j2 | 69 + .../UBUNTU20-CIS/tests/inventory | 2 + .../UBUNTU20-CIS/tests/test.yml | 5 + Linux/ansible-lockdown/UBUNTU20-CIS/vagrant | 13 + .../UBUNTU20-CIS/vars/main.yml | 2 + Linux/ansible-lockdown/clone-all.sh | 2 +- 404 files changed, 40243 insertions(+), 1 deletion(-) create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/CONTRIBUTING.rst create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/LICENSE create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/README.md create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/communitytodevel.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/defaults/main.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/develtomaster.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/files/custom_cert/custom_cert_file.crt create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/files/custom_cert/custom_cert_key.key create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/handlers/main.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/meta/main.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/site.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/cis_apache_redhat_fix.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/cis_apache_ubuntu_fix.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/main.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/prelim.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/templates/crs-setup.conf.j2 create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/tests/inventory create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/tests/test.yml create mode 100644 Linux/ansible-lockdown/APACHE-2.4-CIS/vars/main.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/.ansible-lint create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/.travis.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/CONTRIBUTING.rst create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/LICENSE create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/README.md create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/defaults/main.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/handlers/main.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/meta/main.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/site.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/cis_pgs12_redhat_fixes.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/cis_pgs12_ubuntu_fixes.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/main.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/postgresql12_install.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/prelim.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/rhel7_fips.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/tests/inventory create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/tests/test.yml create mode 100644 Linux/ansible-lockdown/POSTGRES-12-CIS/vars/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/.ansible-lint create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/.gitattributes create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/.yamllint create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/CONTRIBUTING.rst create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/ChangeLog.md create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/LICENSE create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/README.md create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/ansible.cfg create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/defaults/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/group_vars/docker create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/group_vars/vagrant create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/handlers/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/local.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/meta/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/site.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/LE_audit_setup.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/check_prereqs.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/parse_etc_password.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/post.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/post_remediation_audit.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/pre_remediation_audit.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/prelim.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.1.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.3.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.4.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.5.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.6.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.7.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.8.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.9.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.2.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.3.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.4.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.3.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.4.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.5.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.5.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.5.3.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.1.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.1.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.3.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.4.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.5.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.5.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.6.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.7.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/cis_6.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/cis_6.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/main.yml create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/ansible_vars_goss.yml.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/99_finalize.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/MAC_policy.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/access.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/actions.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/deletion.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/identity.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/logins.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/modules.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/mounts.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/perm_mod.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/priv_commands.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/scope.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/session.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/system_local.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/audit/time_change.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/chrony.conf.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/etc/issue.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/etc/issue.net.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/etc/motd.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/etc/tmp_mount.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/hosts.allow.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/templates/ntp.conf.j2 create mode 100644 Linux/ansible-lockdown/RHEL7-CIS/vars/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/.ansible-lint create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/.gitattributes create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/.yamllint create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/CONTRIBUTING.rst create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/Changelog.md create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/LICENSE create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/README.md create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/ansible.cfg create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/defaults/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/files/etc/systemd/system/tmp.mount create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/group_vars/docker create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/group_vars/vagrant create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/handlers/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/local.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/meta/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/site.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/LE_audit_setup.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/audit_homedirperms.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/check_prereqs.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/parse_etc_password.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/post.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/post_remediation_audit.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/pre_remediation_audit.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/prelim.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.1.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.10.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.11.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.3.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.4.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.5.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.6.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.7.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.8.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.8.2.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.9.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.1.1.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.2.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.3.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.3.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.1.1.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.3.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.4.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.4.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.5.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.6.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.1.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.1.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.3.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.3.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.3.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.4.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.5.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.5.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.6.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.7.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/cis_6.1.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/cis_6.2.x.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/main.yml create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/ansible_vars_goss.yml.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_10.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_11.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_12.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_13.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_14.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_15.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_16.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_17.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_3.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_4.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_5.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_6.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_7.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_8.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_9.rules.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/chrony.conf.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/etc/issue.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/etc/issue.net.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/etc/motd.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/etc/systemd/system/tmp.mount.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/hosts.allow.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/templates/ntp.conf.j2 create mode 100644 Linux/ansible-lockdown/RHEL8-CIS/vars/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/.DS_Store create mode 100755 Linux/ansible-lockdown/UBUNTU18-CIS/.ansible-lint create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/.gitattributes create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/.travis.yml create mode 100755 Linux/ansible-lockdown/UBUNTU18-CIS/.yamllint create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/CONTRIBUTING.rst create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/LICENSE create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/README.md create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/defaults/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/files/.DS_Store create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/files/etc/.DS_Store create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/files/etc/apparmor.d/usr.bin.ssh create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/files/etc/systemd/.DS_Store create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/files/etc/systemd/system/tmp.mount create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/handlers/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/library/goss.py create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/meta/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/site.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/.DS_Store create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/LE_audit_setup.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/parse_etc_password.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/post_remediation_audit.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/pre_remediation_audit.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/prelim.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.3.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.4.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.5.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.6.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.7.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.8.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.9.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.3.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.3.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.4.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.5.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.1.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.1.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.3.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.3.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.4.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.3.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.4.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.5.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.5.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.6.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.7.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/cis_6.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/cis_6.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/.DS_Store create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/ansible_vars_goss.yml.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/chrony.conf.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_10_access.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_11_privileged.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_12_audit.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_13_delete.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_14_scope.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_15_actions.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_16_modules.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_17_99finalize.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_3_timechange.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_4_identity.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_5_systemlocale.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_6_macpolicy.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_7_logins.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_8_session.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_9_permmod.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/chrony.conf.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/issue.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/issue.net.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/motd.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/hosts.allow.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/ntp.conf.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/templates/ubtu18cis_4_1_3_timechange64.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tests/inventory create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/tests/test.yml create mode 100644 Linux/ansible-lockdown/UBUNTU18-CIS/vars/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/.DS_Store create mode 100755 Linux/ansible-lockdown/UBUNTU20-CIS/.ansible-lint create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/.travis.yml create mode 100755 Linux/ansible-lockdown/UBUNTU20-CIS/.yamllint create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/CONTRIBUTING.rst create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/LICENSE create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/README.md create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/defaults/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/handlers/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/meta/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/site.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/LE_audit_setup.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/parse_etc_password.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/post_remediation_audit.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/pre_remediation_audit.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/prelim.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.3.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.4.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.5.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.6.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.7.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.8.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.9.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.3.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.3.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.4.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.5.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.3.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.3.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.4.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.3.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.4.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.5.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.6.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.7.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/cis_6.1.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/cis_6.2.x.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/main.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/.DS_Store create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/ansible_vars_goss.yml.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_10_access.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_11_privileged.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_12_audit.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_13_delete.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_14_scope.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_15_actions.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_16_modules.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_17_99finalize.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_3_timechange.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_4_identity.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_5_systemlocale.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_6_macpolicy.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_7_logins.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_8_session.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_9_permmod.rules.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/chrony.conf.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/issue.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/issue.net.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/motd.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/templates/ntp.conf.j2 create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tests/inventory create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/tests/test.yml create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/vagrant create mode 100644 Linux/ansible-lockdown/UBUNTU20-CIS/vars/main.yml mode change 100644 => 100755 Linux/ansible-lockdown/clone-all.sh diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/CONTRIBUTING.rst b/Linux/ansible-lockdown/APACHE-2.4-CIS/CONTRIBUTING.rst new file mode 100644 index 0000000..871e1e0 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/CONTRIBUTING.rst @@ -0,0 +1,54 @@ +Contributing to MindPoint Group Projects +======================================== + +Signing your contribution +------------------------- + +We've chosen to use the Developer's Certificate of Origin (DCO) method +that is employed by the Linux Kernel Project, which provides a simple +way to contribute to MindPoint Group projects. + +The process is to certify the below DCO 1.1 text +:: + + Developer's Certificate of Origin 1.1 + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +:: + +Then, when it comes time to submit a contribution, include the +following text in your contribution commit message: + +:: + + Signed-off-by: Joan Doe + +:: + + +This message can be entered manually, or if you have configured git +with the correct `user.name` and `user.email`, you can use the `-s` +option to `git commit` to automatically include the signoff message. + diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/LICENSE b/Linux/ansible-lockdown/APACHE-2.4-CIS/LICENSE new file mode 100644 index 0000000..5b33d4a --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Mindpoint Group / Lockdown Enterprise / Lockdown Enterprise Releases + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/README.md b/Linux/ansible-lockdown/APACHE-2.4-CIS/README.md new file mode 100644 index 0000000..ae94831 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/README.md @@ -0,0 +1,298 @@ +APACHE-2.4 CIS +========= + +Configure RHEL and Debian based Apache 2.4 servers to be [CIS] (https://www.cisecurity.org/cis-benchmarks/) compliant. + +This role **will make changes to the system** that could break things. This is not an auditing tool but rather a remediation tool to be used after an audit has been conducted. + +Based on [CIS Apache HTTP Server 2.4 Benchmark ](https://community.cisecurity.org/collab/public/index.php). + + +Requirements +------------ + +You should carefully read through the tasks to make sure these changes will not break your systems before running this playbook. +If you want to do a dry run without changing anything, set the below sections (apache_cis_section1-12) to false. + +Role Variables +-------------- + +There are many role variables defined in defaults/main.yml. This list shows the most important. + +**apache_cis_section1**: CIS - Planning and Installation (Section 1) (Default: true) + +**apache_cis_section2**: CIS - Minimize Apache Modules Mo (Section 2) (Default: true) + +**apache_cis_section3**: CIS - Principles, Permissions, and Ownership (Section 3) (Default: true) + +**apache_cis_section4**: CIS - Apache Access Control (Section 4) (Default: true) + +**apache_cis_section5**: CIS - Minimize Features, Content and Options (Section 5) (Default: true) + +**apache_cis_section6**: CIS - Operations - Logging, Monitoring and Maintenance (Section 6) (Default: true) + +**apache_cis_section7**: CIS - SSL/TLS Configuration (Section 7) (Default: true) + +**apache_cis_section8**: CIS - Information Leakage (Section 8) (Default: true) + +**apache_cis_section9**: CIS - Denial of Service Mitigations (Section 9) (Default: true) + +**apache_cis_section10**: CIS - Request Limits (Section 10) (Default: true) + +**apache_cis_section11**: CIS - Enable SELinux to Restrict Apache Processes (Section 11) (Default: true) + +**apache_cis_section12**: CIS - Enable AppArmor to Restrict Apache Processes (Section 12) (Default: true) + + + +##### Apache user and group declarations +apache_rhel_user is the user that the apache software will use for RHEL systems +apache_ubuntu_user is the user that the apache software will use for Ubuntu (Debian) systems +``` +apache_rhel_user: apache +apache_ubuntu_user: apache +``` +apache_rhel_group is the group the apache user will use for RHEL systems +apache_ubuntu_user is the group the apache user will use for Ubuntu (Debian) systems +``` +apache_rhel_group: apache +apache_ubuntu_group: apache +``` + + +##### Apache Principles, Permissions, and Ownership Settings +apache_cis_core_dump_location is the folder for core dumps +``` +apache_cis_core_dump_location: /var/log/apache2 +``` + +apache_cis_lockfile_location is the location to the lock file. This can not be the same location as as the DocumentRoot directory. Apache default is ServerRoot logs +The LockFile should be on a locally mounted driver rathare than an NFS mounted file system +apache_cis_lockfile_location = RHEL based +apache2_cis_lockfile_location = Debian based (Ubuntu) +``` +apache_cis_lockfile_location: "{{ apache_cis_server_root_dir }}/logs" +apache2_cis_lockfile_location: "/var/lock/apache2" +``` + + +##### Apache Minimize Features, Content and Options +This is the options setting for the web root directory vhost settings. Needs to be None or Multiviews to conform to CIS standards +``` +apache_cis_webrootdir_options: None +``` + + +##### Apache allowed file types +This is the list of allowed file types for the FilesMatch directive in httpd.conf/apache.conf +``` +apache_cis_allowed_filetypes: "css|html?|js|pdf|txt|xml|xsl|gif|ico|jpe?g|png" +``` + + +##### Apache top level server and IP/Port settings +The hostname of the top level server for RewriteCond %{HTTP_HOST} config section of httpd.conf/apache.conf +``` +apache_cis_toplevel_svr: 'www\.example\.com' +``` + +This is the list of ip's and ports that apache will listen on. If multiples are in use a dash (-) list is used +``` +apache_cis_listen_ip_port: + - 10.0.2.15:80 +``` + + +##### Operations - Logging, Monitoring and Maintenance settings +all_mods is the level for everything but but core module. Value bust be notice or lower. The core_mod is the core mod setting and needs to be info or lower. +`apache_cis_loglevel: + all_mods: "notice" + core_mod: "info"` + +Path to the apache error logs +apache_cis_errorlog_path: "/var/log/apache2" +The facility setting for error logs. Any appropriate syslog facility can be used in place of local1 and will still conform to CIS standards +``` +apache_cis_errorlog_facility: "local1" +``` + +apache_cis_log_format is the format that the log files will be created in. For compliance with the control +the following need to be present (order does not matter for the CIS control) +%h, %l, %u, %t, %r, %>s, %b, %{Referer}i, and %{User-agent}i +``` +apache_cis_log_format: '"%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\""' +``` +apache_cis_custom_log is the path for the error log file +``` +apache_cis_custom_log: "/var/log/apache2" +``` + +apache_cis_extra_packages are the extra packages that will need to be updated. Please make this in list format +example format apache_cis_extra_packages: "'apache2', 'telnet', 'openssl'" +``` +apache_cis_extra_packages: "'apache2'" +``` + +To install/configure OWASP requires internet connections. If there is no internet available please set to false +``` +apache_cis_owasp_automate: true +``` + +##### SSL/TLS Configuration settings +When apache_cis_custom_cert set to true the file in files/custom_cert will be copied to the /etc/ssl/certs folder +When apache_cis_custom_cert set to false the control will create a self signed certificate +``` +apache_cis_custom_cert: false +``` + +The hostname used for certificate. It is important to remember that the browser will compare the host name in the URL to the common name in the +certificate, so that it is important that all https: URL's match the correct host name. +Specifically, the host name www.example.com is not the same as example.com nor the +same as ssl.example.com. +``` +apache_cis_hostname_cert: "example.com" +``` + +When using a cypher (aes128, aes256, etc) when generating an encrypted private key a passphrase is required +``` +apache_cis_privatekey_passphrase: "letmein" +``` + +This will be the final location to your signed certificate +``` +apache_cis_csr_folder: "/etc/ssl/private" +``` + +This is to add the hostname values to the openssl.cnf temp file. +It is recommented (not required) that the first alt name is the common name. +This is a list and must be in the format of DNS.X = , where X is the next number sequentially +``` +apache_cis_alt_names: + - DNS:www.example.com + - DNS:example.com + - DNS:app.example.com + - DNS:service.example.com +``` + +The settings below relate to req_distinguished_name section of the openssl.cnf file. The var with the value set relates to the setting it is named after. +``` +apache_req_distinguished_name_settings: + countryName_default: "GB" + stateOrProvinceName_default: "Scotland" + localityName_default: "Glasgow" + organizationName_default: "Example Company Ltd" + organizationalUnitName_default: "ICT" + commonName_default: "www.example.com" + email_address: "blah@mail.com" +``` + +apache_cis_tls_1_2_available will toggle TLS1.2 or TLSv1 set in ssl.conf. If TLS1.2 is available that is preferred but needs to be setup and TLSv1.0 and TLSv1.1 needs to removed/disabled +``` +apache_cis_tls_1_2_available: true +``` + +apache_cis_sslciphersuite_settings are the settings for the SSLCipherSuite parameter in the ssl.conf configuration. +To conform to the CIS standard for 7.5 (weak ciphers disabled) these settings must have !NULL:!SSLv2:!RC4:!aNULL and it is not recommented to add !SSLv3. Example value: ALL:!EXP:!NULL:!LOW:!SSLv2:!RC4:!aNULL +to conform to the CIS standard for 7.8 (medium ciphers disables) these settings must have !3DES:!IDEA. Example value: ALL:!EXP:!NULL:!LOW:!SSLv2:!RC4:!aNULL:!3DES:!IDEA +``` +apache_cis_sslciphersuite_settings: "ALL:!EXP:!NULL:!LOW:!SSLv2:!RC4:!aNULL:!3DES:!IDEA" +``` + +apache_cis_tls_redirect is the web address that will be used to redirect a tls website or similar +``` +apache_cis_tls_redirect: "https://www.cisecurity.org/" +``` + + +##### Information Leakage settings +apache_cis_servertokens needs to be set to either Prod or ProductOnly +``` +apache_cis_servertokens: "Prod" +``` + + +##### Denial of Service Mitigations settings +apache_cis_timeout is the apache server timeout, must be set to less than 10 seconds to conform to CIS standards +``` +apache_cis_timeout: 10 +``` + +apache_cis_maxkeepaliverequests is the max number of keep alive requests. Needs to be set to 100 or more to conform to CIS standards +``` +apache_cis_maxkeepaliverequests: 100 +``` + +apache_cis_keepalivetimeout is the keep alive timout value in seconds. Needs to be set to 15 or less to conform to CIS standards +``` +apache_cis_keepalivetimeout: 15 +``` + +apache_cis_reqread_timeout is the value or range of the request read timeout in seconds. The max length can not exceed 40 seconds to conform to CIS standards +``` +apache_cis_reqread_timeout: 20-40 +``` + +apache_cis_reqread_body is the value of the request read body timout in seconds. This needs to be set to 20 seconds or less to conform to CIS standards +``` +apache_cis_reqread_body: 20 +``` + + +##### Request Limits settings +apache_cis_limitrequestline is the limit set to the request line. The value needs to be 512 or shorter to conform to CIS standards +``` +apache_cis_limitrequestline: 512 +``` + +apache_cis_limitrequestfields is the limit set to the number of fields. The value needs to be 100 or less to conform to CIS standards +``` +apache_cis_limitrequestfields: 100 +``` + +apache_cis_limitrequestfieldsize is the limit set for the size of the request headers. The value needs to be 1024 or less +``` +apache_cis_limitrequestfieldsize: 1024 +``` + +apache_cis_limitrequestbody is the limit set for the size of the request body. The value needs to be set to 102400 (100k) or less +``` +apache_cis_limitrequestbody: 102400 +``` + + +##### Enable SELinux to Restrict Apache Processes settings +apache2_cis_selinux is if you are using AppArmor on Ubuntu instead of SELinux. AppArmor is installed by default with Ubuntu +AppArmor is not supported on RHEL based systems and this toggle will not work with the RHEL implimentation of the CIS role. +``` +apache2_cis_selinux: false +``` + +Dependencies +------------ + +Ansible > 2.6.5 + +Example Playbook +---------------- + +This sample playbook should be run in a folder that is above the main APACHE-2.4-CIS / APACHE-2.4-CIS-devel folder. + +``` +- name: Harden Server + hosts: servers + become: yes + + roles: + - APACHE-2.4-CIS +``` + +Tags +---- +Many tags are available for precise control of what is and is not changed. + +Some examples of using tags: + +``` + # Audit and patch the site + ansible-playbook site.yml --tags="patch" +``` diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/communitytodevel.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/communitytodevel.yml new file mode 100644 index 0000000..d8abf04 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/communitytodevel.yml @@ -0,0 +1,38 @@ +# This is a basic workflow to help you get started with Actions + +name: CommunityToDevel + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the devel branch +on: + pull_request: + branches: [ devel ] + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + # Refactr pipeline for devel pull request/merge + - name: Refactr - Run Pipeline (to devel) + # You may pin to the exact commit or the version. + # uses: refactr/action-run-pipeline@be91e2796aa225268e4685c0e01a26d5f800cd53 + uses: refactr/action-run-pipeline@v0.1.2 + with: + # API token + api_token: '${{ secrets.REFACTR_KEY }}' + # Project ID + project_id: 5f47f0c4a13c7b18373e5556 + # Job ID + job_id: 5f933cbcf9c74e86b1609c00 + # Variables + variables: '{ "gitrepo": "https://github.com/ansible-lockdown/UBUNTU20-CIS.git", "image": "ami-0fe12c34e05228a69", "githubBranch": "${{ github.head_ref }}", "username": "ubuntu" }' + # Refactr API base URL + api_url: # optional diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/defaults/main.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/defaults/main.yml new file mode 100644 index 0000000..283b79e --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/defaults/main.yml @@ -0,0 +1,317 @@ +--- +# defaults file for Apache CIS + +# The settings below will enable/disable entire CIS sections +apache_cis_section1: true +apache_cis_section2: true +apache_cis_section3: true +apache_cis_section4: true +apache_cis_section5: true +apache_cis_section6: true +apache_cis_section7: true +apache_cis_section8: true +apache_cis_section9: true +apache_cis_section10: true +apache_cis_section11: true +apache_cis_section12: true + +apache_cis_skip_for_travis: false + +# We've defined disruption-high to indicate items that are likely to cause +# disruption in a normal workflow. These items can be remediated automatically +# but are disabled by default to avoid disruption. +apache_cis_disruption_high: true + +# This will automate the install of Apache in the prelim tasks +# If apache is already installed set to false +automate_install: true + +# Section 1 individual control toggles +apache_cis_1_1: true +apache_cis_1_2: true +apache_cis_1_3: true + +# Section 2 individual control toggles +apache_cis_2_1: true +apache_cis_2_2: true +apache_cis_2_3: true +apache_cis_2_4: true +apache_cis_2_5: true +apache_cis_2_6: true +apache_cis_2_7: true +apache_cis_2_8: true +apache_cis_2_9: true + +# Section 3 individual control toggles +apache_cis_3_1: true +apache_cis_3_2: true +apache_cis_3_3: true +apache_cis_3_4: true +apache_cis_3_5: true +apache_cis_3_6: true +apache_cis_3_7: true +apache_cis_3_8: true +apache_cis_3_9: true +apache_cis_3_10: true +apache_cis_3_11: true +apache_cis_3_12: true +apache_cis_3_13: true + +# Section 4 individual control toggles +apache_cis_4_1: true +apache_cis_4_2: true +apache_cis_4_3: true +apache_cis_4_4: true + +# Section 5 individual control toggles +apache_cis_5_1: true +apache_cis_5_2: false +apache_cis_5_3: true +apache_cis_5_4: true +apache_cis_5_5: true +apache_cis_5_6: true +apache_cis_5_7: true +apache_cis_5_8: true +apache_cis_5_9: true +apache_cis_5_10: true +apache_cis_5_11: true +apache_cis_5_12: true +apache_cis_5_13: true +apache_cis_5_14: true + +# Section 6 individual control toggles +apache_cis_6_1: true +apache_cis_6_2: true +apache_cis_6_3: true +apache_cis_6_4: true +apache_cis_6_5: true +apache_cis_6_6: true +apache_cis_6_7: true + +# Section 7 individual control toggles +apache_cis_7_1: true +apache_cis_7_2: true +apache_cis_7_3: true +apache_cis_7_4: true +apache_cis_7_5: true +apache_cis_7_6: true +apache_cis_7_7: true +apache_cis_7_8: true +apache_cis_7_9: true +apache_cis_7_10: true +apache_cis_7_11: true +apache_cis_7_12: true +apache_cis_7_13: true + +# Section 8 individual control toggles +apache_cis_8_1: true +apache_cis_8_2: true +apache_cis_8_3: true +apache_cis_8_4: true + +# Section 9 individual control toggles +apache_cis_9_1: true +apache_cis_9_2: true +apache_cis_9_3: true +apache_cis_9_4: true +apache_cis_9_5: true +apache_cis_9_6: true + +# Section 10 individual control toggles +apache_cis_10_1: true +apache_cis_10_2: true +apache_cis_10_3: true +apache_cis_10_4: true + +# Section 11 individual control toggles +apache_cis_11_1: true +apache_cis_11_2: true +apache_cis_11_3: true +apache_cis_11_4: true + +# Section 12 individual control toggles +apache_cis_12_1: true +apache_cis_12_2: true +apache_cis_12_3: true + +# The two variables below define the server root directory (path that apache resides) +# and the Document Root Directory. These are both derived from a task in tasks/prelim.yml +apache_cis_server_root_dir: "{{ apache_cis_server_root_dir_gather.stdout }}" +apache_cis_doc_root_dir: "{{ apache_cis_doc_root_dir_gather.stdout }}" + +# The apache2_cis_server_root_dir is for Debian based systems +apache2_cis_server_root_dir: "{{ apache2_cis_server_root_dir_gather.stdout }}" +apache2_cis_doc_root_dir: "{{ apache2_cis_doc_root_dir_gather.stdout }}" +# Below are the control specific variables +# Section 1 task variables + +# Section 2 task variables + +# Section 3 task variables +# Control 3.1 variable +# apache_rhel_user is the user that the apache software will use for RHEL systems +# apache_ubuntu_user is the user that the apache software will use for Ubuntu (Debian) systems +apache_rhel_user: apache +apache_ubuntu_user: apache +# apache_rhel_group is the group the apache user will use for RHEL systems +# apache_ubuntu_user is the group the apache user will use for Ubuntu (Debian) systems +apache_rhel_group: apache +apache_ubuntu_group: apache + +# Control 3.7 +# apache_cis_core_dump_location is the folder for core dumps +apache_cis_core_dump_location: /var/log/apache2 + +# Control 3.8 +# apache_cis_lockfile_location is the location to the lock file. This can not be the same location as as the DocumentRoot directory. Apache default is ServerRoot/logs +# The LockFile should be on a locally mounted driver rathare than an NFS mounted file system +# apache_cis_lockfile_location = RHEL based +# apache2_cis_lockfile_location = Debian based (Ubuntu) +apache_cis_lockfile_location: "{{ apache_cis_server_root_dir }}/logs" +apache2_cis_lockfile_location: "/var/lock/apache2" + +# Section 5 task variables +# Control 5.2 +# This is the options setting for the web root directory vhost settings. Needs to be None or Multiviews to conform to CIS standards +apache_cis_webrootdir_options: None + +# Cotnrol 5.11 +# This is the list of allowed file types for the FilesMatch directive in httpd.conf/apache.conf +apache_cis_allowed_filetypes: "css|html?|js|pdf|txt|xml|xsl|gif|ico|jpe?g|png" + +# Control 5.12 +# The hostname of the top level server for RewriteCond %{HTTP_HOST} config section of httpd.conf/apache.conf +apache_cis_toplevel_svr: 'www\.example\.com' +# Control 5.13 variable +# This is the list of ip's and ports that apache will listen on. If multiples are in use a dash (-) list is used +apache_cis_listen_ip_port: + - 10.0.2.15:80 + +# Section 6 task variables +# Control 6.1 +# all_mods is the level for everything but but core module. Value bust be notice or lower. The core_mod is the core mod setting and needs to be info or lower. +apache_cis_loglevel: + all_mods: "notice" + core_mod: "info" + +# Control 6.1/6.2 +# Path to the apache error logs +apache_cis_errorlog_path: "/var/log/apache2" +# The facility setting for error logs. Any appropriate syslog facility can be used in place of local1 and will still conform to CIS standards +apache_cis_errorlog_facility: "local1" + +# Control 6.3 variables +# apache_cis_log_format is the format that the log files will be created in. For compliance with the control +# the following need to be present (order does not matter for the CIS control) +# %h, %l, %u, %t, %r, %>s, %b, %{Referer}i, and %{User-agent}i +apache_cis_log_format: '"%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\""' +# apache_cis_custom_log is the path for the error log file +apache_cis_custom_log: "/var/log/apache2" + +# Control 6.5 +# apache_cis_extra_packages are the extra packages that will need to be updated. Please make this in list format +# example format apache_cis_extra_packages: "'apache2', 'telnet', 'openssl'" +apache_cis_extra_packages: "'apache2'" + +# Control 6.7 +# To install/configure OWASP requires internet connections. If there is no internet available please set to false +apache_cis_owasp_automate: true + +# Section 7 +# Control 7.2 +# When apache_cis_custom_cert set to true the file in files/custom_cert will be copied to the /etc/ssl/certs folder +# When apache_cis_custom_cert set to false the control will create a self signed certificate +apache_cis_custom_cert: false +# The hostname used for certificate. It is important to remember that the browser will compare the host name in the URL to the common name in the +# certificate, so that it is important that all https: URL's match the correct host name. +# Specifically, the host name www.example.com is not the same as example.com nor the +# same as ssl.example.com. +apache_cis_hostname_cert: "example.com" +# Control 7.2 +# When using a cypher (aes128, aes256, etc) when generating an encrypted private key a passphrase is required +apache_cis_privatekey_passphrase: "letmein" +# Control 7.2 +# This will be the final location to your signed certificate +apache_cis_csr_folder: "/etc/ssl/private" +# Control 7.2 +# This is to add the hostname values to the openssl.cnf temp file. +# It is recommented (not required) that the first alt name is the common name. +# This is a list and must be in the format of DNS.X = , where X is the next number sequentially +apache_cis_alt_names: + - DNS:www.example.com + - DNS:example.com + - DNS:app.example.com + - DNS:service.example.com +# Control 7.2 +# The settings below relate to req_distinguished_name section of the openssl.cnf file. The var with the value set relates to the setting it is named after. +apache_req_distinguished_name_settings: + countryName_default: "GB" + stateOrProvinceName_default: "Scotland" + localityName_default: "Glasgow" + organizationName_default: "Example Company Ltd" + organizationalUnitName_default: "ICT" + commonName_default: "www.example.com" + email_address: "blah@mail.com" + +# Control 7.4 +# apache_cis_tls_1_2_available will toggle TLS1.2 or TLSv1 set in ssl.conf. If TLS1.2 is available that is preferred but needs to be setup and TLSv1.0 and TLSv1.1 needs to removed/disabled +apache_cis_tls_1_2_available: true + +# Control 7.5/7.8 +# apache_cis_sslciphersuite_settings are the settings for the SSLCipherSuite parameter in the ssl.conf configuration. +# To conform to the CIS standard for 7.5 (weak ciphers disabled) these settings must have !NULL:!SSLv2:!RC4:!aNULL and it is not recommented to add !SSLv3. Example value: ALL:!EXP:!NULL:!LOW:!SSLv2:!RC4:!aNULL +# to conform to the CIS standard for 7.8 (medium ciphers disables) these settings must have !3DES:!IDEA. Example value: ALL:!EXP:!NULL:!LOW:!SSLv2:!RC4:!aNULL:!3DES:!IDEA +apache_cis_sslciphersuite_settings: "ALL:!EXP:!NULL:!LOW:!SSLv2:!RC4:!aNULL:!3DES:!IDEA" + +# Control 7.9 +# apache_cis_tls_redirect is the web address that will be used to redirect a tls website or similar +apache_cis_tls_redirect: "https://www.cisecurity.org/" + + +# Section 8 task variables +# Control 8.1 +# apache_cis_servertokens needs to be set to either Prod or ProductOnly +apache_cis_servertokens: "Prod" + +# Section 9 task variables +# Control 9.1 +# apache_cis_timeout is the apache server timeout, must be set to less than 10 seconds to conform to CIS standards +apache_cis_timeout: 10 + +# Control 9.3 +# apache_cis_maxkeepaliverequests is the max number of keep alive requests. Needs to be set to 100 or more to conform to CIS standards +apache_cis_maxkeepaliverequests: 100 + +# Control 9.4 +# apache_cis_keepalivetimeout is the keep alive timout value in seconds. Needs to be set to 15 or less to conform to CIS standards +apache_cis_keepalivetimeout: 15 + +# Control 9.5 +# apache_cis_reqread_timeout is the value or range of the request read timeout in seconds. The max length can not exceed 40 seconds to conform to CIS standards +apache_cis_reqread_timeout: 20-40 + +# Control 9.6 +# apache_cis_reqread_body is the value of the request read body timout in seconds. This needs to be set to 20 seconds or less to conform to CIS standards +apache_cis_reqread_body: 20 + +# Section 10 task variables +# Control 10.1 +# apache_cis_limitrequestline is the limit set to the request line. The value needs to be 512 or shorter to conform to CIS standards +apache_cis_limitrequestline: 512 + +# Control 10.2 +# apache_cis_limitrequestfields is the limit set to the number of fields. The value needs to be 100 or less to conform to CIS standards +apache_cis_limitrequestfields: 100 + +# Control 10.3 +# apache_cis_limitrequestfieldsize is the limit set for the size of the request headers. The value needs to be 1024 or less +apache_cis_limitrequestfieldsize: 1024 + +# Control 10.4 +# apache_cis_limitrequestbody is the limit set for the size of the request body. The value needs to be set to 102400 (100k) or less +apache_cis_limitrequestbody: 102400 + +# Control 11.1 +# apache2_cis_selinux is if you are using AppArmor on Ubuntu instead of SELinux. AppArmor is installed by default with Ubuntu +# AppArmor is not supported on RHEL based systems and this toggle will not work with the RHEL implimentation of the CIS role. +apache2_cis_selinux: false \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/develtomaster.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/develtomaster.yml new file mode 100644 index 0000000..4913679 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/develtomaster.yml @@ -0,0 +1,38 @@ +# This is a basic workflow to help you get started with Actions + +name: DevelToMaster + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the devel branch +on: + pull_request: + branches: [ main ] + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - uses: actions/checkout@v2 + + # Refactr pipeline for devel pull request/merge + - name: Refactr - Run Pipeline (to master) + # You may pin to the exact commit or the version. + # uses: refactr/action-run-pipeline@be91e2796aa225268e4685c0e01a26d5f800cd53 + uses: refactr/action-run-pipeline@v0.1.2 + with: + # API token + api_token: '${{ secrets.REFACTR_KEY }}' + # Project ID + project_id: 5f47f0c4a13c7b18373e5556 + # Job ID + job_id: 5f90ad90f9c74e6d1e606e33 + # Variables + variables: '{ "gitrepo": "https://github.com/ansible-lockdown/UBUNTU20-CIS.git", "image": "ami-0fe12c34e05228a69", "username": "ubuntu" }' + # Refactr API base URL + api_url: # optional diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/files/custom_cert/custom_cert_file.crt b/Linux/ansible-lockdown/APACHE-2.4-CIS/files/custom_cert/custom_cert_file.crt new file mode 100644 index 0000000..67145ca --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/files/custom_cert/custom_cert_file.crt @@ -0,0 +1 @@ +certfile here \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/files/custom_cert/custom_cert_key.key b/Linux/ansible-lockdown/APACHE-2.4-CIS/files/custom_cert/custom_cert_key.key new file mode 100644 index 0000000..e460608 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/files/custom_cert/custom_cert_key.key @@ -0,0 +1 @@ +Key for custom cert \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/handlers/main.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/handlers/main.yml new file mode 100644 index 0000000..86515e9 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/handlers/main.yml @@ -0,0 +1,15 @@ +--- +- name: restart httpd + service: + name: httpd + state: restarted + +- name: restart apache2 + service: + name: apache2 + state: restarted + +- name: reboot system + shell: sleep 3; reboot + async: 15 + poll: 0 \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/meta/main.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/meta/main.yml new file mode 100644 index 0000000..9330518 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/meta/main.yml @@ -0,0 +1,19 @@ +galaxy_info: + author: "Sam Doran, Josh Springer, Daniel Shepherd, James Cassell, Mike Renfro, DFed, George Nalen" + description: "Apply the Apache 2.4 CIS" + company: "MindPoint Group" + license: MIT + min_ansible_version: 2.6.5 + + platforms: + - name: EL + versions: + - 7 + + galaxy_tags: + - system + - security + - cis + - hardening + +dependencies: [] \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/site.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/site.yml new file mode 100644 index 0000000..2dbdd40 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/site.yml @@ -0,0 +1,8 @@ +--- +- hosts: all + become: true + vars: + is_container: false + + roles: + - role: "{{ playbook_dir }}" \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/cis_apache_redhat_fix.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/cis_apache_redhat_fix.yml new file mode 100644 index 0000000..2109501 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/cis_apache_redhat_fix.yml @@ -0,0 +1,2205 @@ +--- +# Section 1 Fixes +- name: "NOTSCORED | 1.1 | AUDIT | Ensure the Pre-Installation Planning Checklist Has Been Implemented" + debug: + msg: + - "Warning! Make sure you have your pre-installation checklist completed" + - "Please refer to control 1.1 in the benchmark document for those items" + when: + - apache_cis_1_1 + - apache_cis_section1 + tags: + - level 1 + - level 2 + - notscored + - audit + - rule_1.1 + +- name: "NOTSCORED | 1.2 | AUDIT | Ensure the Server Is Not a Multi-Use System" + block: + - name: "NOT SCORED | 1.2 | AUDIT | Ensure the Server Is Not a Multi-Use System | Gather running services for review" + command: chkconfig --list | grep ':on' + changed_when: false + failed_when: false + register: apache_rhel_1_2_running_services + + - name: "NOTSCORED | 1.2 | AUDIT | Ensure the Server Is Not a Multi-Use System | Display running services" + debug: + msg: + - "Warning! Below are the running services. Please review" + - "Servers should only be single service" + - "{{ apache_rhel_1_2_running_services.stdout_lines }}" + when: + - apache_cis_1_2 + - apache_cis_section1 + tags: + - level1 + - level2 + - notscored + - audit + - rule_1.2 + +- name: "NOTSCORED | 1.3 | PATCH | Ensure Apache Is Installed From the Appropriate Binaries" + yum: + name: httpd + state: present + when: + - apache_cis_1_1 + - apache_cis_section1 + - apache_cis_disruption_high + tags: + - level1 + - level2 + - notscored + - audit + - rule_1.3 + +#Section 2 Fixes +- name: "NOTSCORED | 2.1 | AUDIT | Ensure Only Necessary Authentication and Authorization Modules Are Enabled" + block: + - name: "NOT SCORED | 2.1 | AUDIT | Ensure Only Necessary Authentication and Authorization Modules Are Enabled | Capture Auth modules" + shell: httpd -M | egrep 'auth._' + changed_when: false + failed_when: false + register: apache_rhel_2_1_auth_modules + + - name: "NOTSCORED | 2.1 | AUDIT | Ensure Only Necessary Authentication and Authorization Modules Are Enabled | Capture LDAP modules" + shell: httpd -M | egrep 'ldap' + changed_when: false + failed_when: false + register: apache_rhel_2_1_ldap_modules + + - name: "NOTSCORED | 2.1 | AUDIT | Ensure Only Necessary Authentication and Authorization Modules Are Enabled | Display Modules" + debug: + msg: + - "Warning! Below are the installed modules. Please review and remove any un-needed modules" + - "Auth Modules:" + - "{{ apache_rhel_2_1_auth_modules.stdout_lines }}" + - "LDAP Modules" + - "{{ apache_rhel_2_1_ldap_modules.stdout_lines }}" + when: + - apache_cis_2_1 + - apache_cis_section2 + tags: + - level1 + - level2 + - notscored + - audit + - rule_2.1 + +- name: "SCORED | 2.2 | PATCH | Ensure the Log Config Module Is Enabled" + block: + - name: "SCORED | 2.2 | AUDIT | Ensure the Log Config Module Is Enabled | Check for Log Module configured" + shell: cat {{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf | grep -i "LoadModule log_config_module" + changed_when: false + failed_when: false + register: apache_rhel_2_2_log_config_module_base + + - name: "SCORED | 2.2 | PATCH | Ensure the Log Config Module Is Enabled | Enable Log Config Module" + lineinfile: + path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf' + line: "LoadModule log_config_module modules/mod_log_config.so" + notify: restart httpd + when: '"log_config_module" not in apache_rhel_2_2_log_config_module_base.stdout' + + - name: "SCORED | 2.2 | PATCH | Ensure the Log Config Module Is Enabled | Enable Log Config Module" + replace: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf" + regexp: '.*LoadModule log_config_module modules/mod_log_config.so' + replace: "LoadModule log_config_module modules/mod_log_config.so" + notify: restart httpd + when: '"log_config_module" in apache_rhel_2_2_log_config_module_base.stdout' + + when: + - apache_cis_2_2 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.2 + +- name: "SCORED | 2.3 | PATCH | Ensure the WebDAV Modules Are Disabled | Check for enabled WebDAV Module" + replace: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-dav.conf" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - { regexp: '.*LoadModule dav_module modules/mod_dav.so', replace: '# LoadModule dav_module modules/mod_dav.so' } + - { regexp: '.*LoadModule dav_fs_module modules/mod_dav_fs.so', replace: '# LoadModule dav_fs_module modules/mod_dav_fs.so' } + - { regexp: '.*LoadModule dav_lock_module modules/mod_dav_lock.so', replace: '# LoadModule dav_lock_module modules/mod_dav_lock.so' } + notify: restart httpd + when: + - apache_cis_2_3 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.3 + +- name: "SCORED | 2.4 | PATCH | Ensure the Status Module Is Disabled" + replace: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf" + regexp: '.*LoadModule status_module modules/mod_status.so' + replace: '## LoadModule status_module modules/mod_status.so' + notify: restart httpd + when: + - apache_cis_2_4 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.4 + +- name: "SCORED | 2.5 | PATCH | Ensure the Autoindex Module Is Disabled" + block: + - name: "SCORED | 2.5 | PATCH | Ensure the Autoindex Module Is Disabled | Check for autoindex.conf" + find: + paths: "{{ apache_cis_server_root_dir }}" + patterns: "autoindex.conf" + recurse: yes + register: apache_rhel_2_5_autoindex_status + + - name: "SCORED | 2.5 | PATCH | Ensure the Autoindex Module Is Disabled | Delete autoindex.conf" + file: + path: "{{ apache_rhel_2_5_autoindex_status.files[0].path }}" + state: absent + when: apache_rhel_2_5_autoindex_status.matched >=1 + + - name: "SCORED | 2.5 | PATCH | Ensure the Autoindex Module Is Disabled" + replace: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf" + regexp: '.*LoadModule autoindex_module modules/mod_autoindex.so' + replace: '## LoadModule autoindex_module modules/mod_autoindex.so' + notify: restart httpd + when: + - apache_cis_2_5 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.5 + +- name: "SCORED | 2.6 | PATCH | Ensure the Proxy Modules Are Disabled" + block: + - name: "SCORED | 2.6 | PATCH | Ensure the Proxy Modules Are Disabled" + replace: + path: "{{ item.path }}" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_module modules/mod_proxy.so', replace: '# LoadModule proxy_module modules/mod_proxy.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule lbmethod_bybusyness_module modules/mod_lbmethod_bybusyness.so', replace: '# LoadModule lbmethod_bybusyness_module modules/mod_lbmethod_bybusyness.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule lbmethod_byrequests_module modules/mod_lbmethod_byrequests.so', replace: '# LoadModule lbmethod_byrequests_module modules/mod_lbmethod_byrequests.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule lbmethod_bytraffic_module modules/mod_lbmethod_bytraffic.so', replace: '# LoadModule lbmethod_bytraffic_module modules/mod_lbmethod_bytraffic.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule lbmethod_heartbeat_module modules/mod_lbmethod_heartbeat.so', replace: '# LoadModule lbmethod_heartbeat_module modules/mod_lbmethod_heartbeat.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_connect_module modules/mod_proxy_connect.so', replace: '# LoadModule proxy_connect_module modules/mod_proxy_connect.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_ftp_module modules/mod_proxy_ftp.so', replace: '# LoadModule proxy_ftp_module modules/mod_proxy_ftp.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_http_module modules/mod_proxy_http.so', replace: '# LoadModule proxy_http_module modules/mod_proxy_http.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_fcgi_module modules/mod_proxy_fcgi.so', replace: '# LoadModule proxy_fcgi_module modules/mod_proxy_fcgi.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_scgi_module modules/mod_proxy_scgi.so', replace: '# LoadModule proxy_scgi_module modules/mod_proxy_scgi.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_ajp_module modules/mod_proxy_ajp.so', replace: '# LoadModule proxy_ajp_module modules/mod_proxy_ajp.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_balancer_module modules/mod_proxy_balancer.so', replace: '# LoadModule proxy_balancer_module modules/mod_proxy_balancer.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_express_module modules/mod_proxy_express.so', replace: '# LoadModule proxy_express_module modules/mod_proxy_express.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_wstunnel_module modules/mod_proxy_wstunnel.so', replace: '# LoadModule proxy_wstunnel_module modules/mod_proxy_wstunnel.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_fdpass_module modules/mod_proxy_fdpass.so', replace: '# LoadModule proxy_fdpass_module modules/mod_proxy_fdpass.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_hcheck_module modules/mod_proxy_hcheck.so', replace: '# LoadModule proxy_hcheck_module modules/mod_proxy_hcheck.so' } + - { path: '{{ apache_cis_server_root_dir }}/conf.modules.d/00-proxy.conf', regexp: '.*LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so', replace: '# LoadModule proxy_uwsgi_module modules/mod_proxy_uwsgi.so' } + notify: restart httpd + + - name: "SCORED | 2.6 | PATCH | Ensure the Proxy Modules Are Disabled | RHEL 8 extra line" + replace: + path: '{{ apache_cis_server_root_dir }}/conf.modules.d/10-proxy_h2.conf' + regexp: '.*LoadModule proxy_http2_module modules/mod_proxy_http2.so' + replace: '# LoadModule proxy_http2_module modules/mod_proxy_http2.so' + notify: restart httpd + when: ansible_facts.distribution_major_version == "8" + when: + - apache_cis_2_6 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.6 + +- name: "SCORED | 2.7 | PATCH | Ensure the User Directories Module Is Disabled" + replace: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf" + regexp: '.*LoadModule userdir_module modules/mod_userdir.so' + replace: '# LoadModule userdir_module modules/mod_userdir.so' + notify: restart httpd + when: + - apache_cis_2_7 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.7 + +- name: "SCORED | 2.8 | PATCH | Ensure the Info Module Is Disabled" + replace: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf" + regexp: '.*LoadModule info_module modules/mod_info.so' + replace: '# LoadModule info_module modules/mod_info.so' + notify: restart httpd + when: + - apache_cis_2_8 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.8 + +- name: "SCORED | 2.9 | PATCH | Ensure the Basic and Digest Authentication Modules are Disabled" + replace: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - { regexp: '.*LoadModule auth_basic_module modules/mod_auth_basic.so', replace: '# LoadModule auth_basic_module modules/mod_auth_basic.so' } + - { regexp: '.*LoadModule auth_digest_module modules/mod_auth_digest.so', replace: '# LoadModule auth_digest_module modules/mod_auth_digest.so' } + notify: restart httpd + when: + - apache_cis_2_9 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.9 + +# Section 3 Fixes +# ---------------------------- +# ----------------------------- +# The service is stopped because if the service is running with the user you are trying to set the options for the task will fail since the user is associated with a process +# ------------------------------ +# ----------------------------- +- name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User" + block: + - name: "SCORED | 3.1 | AUDIT | Ensure the Apache Web Server Runs As a Non-Root User | Get configured user block" + block: + - name: "SCORED | 3.1 | AUDIT | Ensure the Apache Web Server Runs As a Non-Root User | Get configured user" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | grep "User " -A1 + changed_when: false + failed_when: false + register: apache_rhel_3_1_configured_user_group + + - name: "SCORED | 3.1 | AUDIT | Ensure the Apache Web Server Runs As a Non-Root User | Warn about user" + debug: + msg: + - "Warning! Below is the user and group for the apache service" + - "Please review to confirm it is a non-root user" + - "This task did not change the user information due to apache_cis_disruption_high being set to false" + - "Set that parameter to true and this task will create the user/group and configure as needed" + - "{{ apache_rhel_3_1_configured_user_group.stdout_lines }}" + when: not apache_cis_disruption_high + + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User" + block: + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Stop apache service" + service: + name: httpd + state: stopped + + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Create Apache group" + group: + name: "{{ apache_rhel_user }}" + system: yes + state: present + + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Create Apache user" + user: + name: "{{ apache_rhel_group }}" + system: yes + group: apache + home: /var/www + shell: /sbin/nologin + + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Add user to configs" + replace: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - { regexp: 'User .*', replace: 'User {{ apache_rhel_user }}' } + - { regexp: 'Group .*', replace: 'Group {{ apache_rhel_group }}' } + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Start apache service" + service: + name: httpd + state: started + when: apache_cis_disruption_high + when: + - apache_cis_3_1 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.1 + +- name: "SCORED | 3.2 | PATCH | Ensure the Apache User Account Has an Invalid Shell" + user: + name: "{{ apache_rhel_user }}" + shell: /sbin/nologin + when: + - apache_cis_3_2 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.2 + +- name: "SCORED | 3.3 | PATCH | Ensure the Apache User Account Is Locked" + user: + name: "{{ apache_rhel_user }}" + password_lock: yes + when: + - apache_cis_3_3 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.3 + +- name: "SCORED | 3.4 | PATCH | Ensure Apache Directories and Files Are Owned By Root" + file: + name: "{{ apache_cis_server_root_dir }}" + owner: root + recurse: yes + when: + - apache_cis_3_4 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.4 + +- name: "SCORED | 3.5 | PATCH | Ensure the Group Is Set Correctly on Apache Directories and Files" + file: + name: "{{ apache_cis_server_root_dir }}" + group: root + recurse: yes + when: + - apache_cis_3_5 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.5 + +- name: "SCORED | 3.6 | PATCH | Ensure Other Write Access on Apache Directories and Files Is Restricted" + file: + name: "{{ apache_cis_server_root_dir }}" + mode: o-w + recurse: yes + when: + - apache_cis_3_6 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.6 + +- name: "SCORED | 3.7 | PATCH | Ensure the Core Dump Directory Is Secured" + block: + - name: "SCORED | 3.7 | AUDIT | Ensure the Core Dump Directory Is Secured | Find if CoreDumpDirectory is used" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | grep CoreDumpDirectory | cut -f2 -d " " + changed_when: false + failed_when: false + register: apache_rhel_3_7_coredumpdirectory + + - name: "SCORED | 3.7 | AUDIT | Ensure the Core Dump Directory Is Secured | Find Web Document Root Dir" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | grep "DocumentRoot " | cut -f2 -d'"' + changed_when: false + failed_when: false + register: apache_rhel_3_7_docrootdir + + - name: "SCORED | 3.7 | AUDIT | Ensure the Core Dump Directory Is Secured | Message if CoreDumpDirectory is Web Document Root Dir" + debug: + msg: "WARNING!! Your CoreDumpDirectory is using the Web Document Root directory: {{ apache_cis_3_7_coredumpdirectory.stdout }}" + when: + - apache_rhel_3_7_coredumpdirectory.stdout != "" + - apache_rhel_3_7_coredumpdirectory.stdout == apache_cis_3_7_docrootdir.stdout + + - name: "SCORED | 3.7 | PATCH | Ensure the Core Dump Directory Is Secured" + file: + name: /var/log/httpd + owner: root + group: apache + mode: o-rwx + recurse: yes + when: + - apache_cis_3_7 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.7 + +# LockFile is no longer the valid parameter, it was replaced with Mutex File +- name: "SCORED | 3.8 | PATCH | Ensure the Lock File Is Secured" + block: + - name: "SCORED | 3.8 | PATCH | Ensure the Lock File Is Secured | Add LockFile directive if needed" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^LockFile|^Mutex File' + line: 'Mutex File:{{ apache_cis_lockfile_location }} default' + insertafter: '# Supplemental configuration' + + - name: "SCORED | 3.8 | PATCH | Ensure the Lock File Is Secured | Create LockFile in new location and set permissions if does not exist" + file: + path: "{{ apache_cis_lockfile_location }}/httpd.lock" + owner: root + group: root + mode: '0750' + state: touch + + - name: "SCORED | 3.8 | PATCH | Ensure the Lock File Is Secured | Set LockFile permissions" + file: + path: "{{ apache_cis_lockfile_location }}/httpd.lock" + owner: root + group: root + mode: '0750' + when: + - apache_cis_3_8 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.8 + +- name: "SCORED | 3.9 | PATCH | Ensure the Pid File Is Secured" + block: + - name: "SCORED | 3.9 | AUDIT | Ensure the Pid File Is Secured | Get PID location" + shell: find / -name httpd.pid + changed_when: false + failed_when: false + register: apache_rhel_3_9_httpd_pid_loc + + - name: "SCORED | 3.9 | PATCH | Ensure the Pid File Is Secured | Set permissions" + file: + path: "{{ apache_rhel_3_9_httpd_pid_loc.stdout }}" + owner: root + group: root + mode: o-w,g-w + + - name: "SCORED | 3.9 | AUDIT | Ensure the Pid File Is Secured | Warn if PID is in DocumentRoot dir" + debug: + msg: + - "WARNING!! Your PID file is in the DocumentRoot directory, to confirm with this STIG control" + - "please move to another folder that is not within the Document root directory ({{ apache_cis_doc_root_dir.stdout }})" + when: apache_cis_doc_root_dir == apache_rhel_3_9_httpd_pid_loc.stdout + when: + - apache_cis_3_9 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.9 + +# ----------------------------- +# ----------------------------- +# Missing last step to confirm ScoreBoardFile location is not on an externally mounted NFS system. Need to review how to do that +# ----------------------------- +# ----------------------------- +- name: "SCORED | 3.10 | PATCH | Ensure the ScoreBoard File Is Secured" + block: + - name: "SCORED | 3.10 | AUDIT | Ensure the ScoreBoard File Is Secured | Check if ScoreBoardFile is in use" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | grep ScoreBoardFile | cut -f2 -d" " + changed_when: false + failed_when: false + register: apache_rhel_3_10_scoreboardfile + + - name: "SCORED | 3.10 | AUDIT | Ensure the ScoreBoard File Is Secured | Warn if the same as DocumentRoot" + debug: + msg: + - "WARNING!! The ScoreBoardFile parameter is in use using the DocumentRoot location" + - "This does not conform to CIS standars. Please change the ScoreBoardFile location in {{ apache_cis_server_root_dir }}/conf/httpd.conf" + when: + - '"{{ apache_cis_doc_root_dir }}" in apache_rhel_3_10_scoreboardfile.stdout' + + - name: "SCORED | 3.10 | PATCH | Ensure the ScoreBoard File Is Secured | Update file permissions" + file: + path: "{{ apache_cis_3_10_scoreboardfile.stdout }}" + owner: root + group: root + mode: o-w,g-w + when: apache_rhel_3_10_scoreboardfile.stdout != "" + when: + - apache_cis_3_10 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.10 + - notimplimented + +- name: "SCORED | 3.11 | PATCH | Ensure Group Write Access for the Apache Directories and Files Is Properly Restricted" + file: + path: "{{ apache_cis_server_root_dir }}" + mode: g-w + recurse: yes + when: + - apache_cis_3_11 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.11 + +- name: "SCORED | 3.12 | PATCH | Ensure Group Write Access for the Document Root Directories and Files Is Properly Restricted" + file: + path: "{{ apache_cis_doc_root_dir }}" + mode: g-w + recurse: yes + when: + - apache_cis_3_12 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.12 + +# ----------------------------- +# ----------------------------- +# Control 3.13 circle back since I'm not sure what is needed here. You are setting values but I can't figure out where those configurations are located +# ----------------------------- +# ----------------------------- + +- name: "NOTSCORED | 3.13 | PATCH | Ensure Access to Special Purpose Application Writable Directories is Properly Restricted" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_3_13 + - apache_cis_section3 + tags: + - level1 + - level2 + - notscored + - patch + - rule_3.13 + - notimplimented + +# Section 4 Fixes +- name: "SCORED | 4.1 | PATCH | Ensure Access to OS Root Directory Is Denied By Default" + block: + - name: "SCORED | 4.1 | AUDIT | Ensure Access to OS Root Directory Is Denied By Default | Get Root Directory" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | sed -n '/' + before: '' + notify: restart httpd + when: '"Require" in apache_rhel_4_1_root_directory.stdout' + + - name: "SCORED | 4.1 | PATCH | Ensure Access to OS Root Directory Is Denied By Default | Enter Require if it doesn't exist" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '' + line: | + + Require all denied + notify: restart httpd + when: '"Require" not in apache_rhel_4_1_root_directory.stdout' + when: + - apache_cis_4_1 + - apache_cis_section4 + tags: + - level1 + - level2 + - patch + - rule_4.1 + +# ----------------------------- +# ----------------------------- +# Control 4.2 requires dynamic changes to all and elements. Need to figure out best lineinfile way to handle this +# ----------------------------- +# ----------------------------- + +- name: "NOTSCORED | 4.2 | PATCH | Ensure Appropriate Access to Web Content Is Allowed" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_4_2 + - apache_cis_section4 + tags: + - level1 + - level2 + - scored + - patch + - rule_4.2 + - notimplimented + + +- name: "SCORED | 4.3 | PATCH | Ensure OverRide Is Disabled for the OS Root Directory" + block: + - name: "SCORED | 4.3 | AUDIT | Ensure OverRide Is Disabled for the OS Root Directory | Get Root Directory" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | sed -n '/' + before: '' + notify: restart httpd + when: '"AllowOverride" in apache_rhel_4_3_root_directory.stdout' + + - name: "SCORED | 4.3 | PATCH | Ensure OverRide Is Disabled for the OS Root Directory | Enter AllowOverride if it doesn't exist" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '' + line: | + + AllowOverride None + notify: restart httpd + when: '"AllowOverride" not in apache_rhel_4_3_root_directory.stdout' + + - name: "SCORED | 4.3 | PATCH | Ensure OverRide Is Disabled for the OS Root Directory | Remove AllowOverrideList element" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: 'AllowOverrideList .*' + state: absent + notify: restart httpd + when: + - apache_cis_4_3 + - apache_cis_section4 + tags: + - level1 + - level2 + - patch + - rule_4.3 + +- name: "SCORED | 4.4 | PATCH | Ensure OverRide Is Disabled for All Directories" + block: + - name: "SCORED | 4.4 | PATCH | Ensure OverRide Is Disabled for All Directories | Set AllowOverride to None" + replace: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^ AllowOverride.*' + replace: ' AllowOverride None' + notify: restart httpd + + - name: "SCORED | 4.4 | PATCH | Ensure OverRide Is Disabled for All Directories | Remove AllowOverrideList" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^ AllowOverrideList' + state: absent + notify: restart httpd + when: + - apache_cis_4_4 + - apache_cis_section4 + tags: + - level1 + - level2 + - scored + - patch + - rule_4.4 + +# Section 5 Fixes +- name: "SCORED | 5.1 | PATCH | Ensure Options for the OS Root Directory Are Restricted" + block: + - name: "SCORED | 5.1 | AUDIT | Ensure Options for the OS Root Directory Are Restricted | Get Root Directory Settings" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | sed -n '/' + before: '' + notify: restart httpd + when: '"Options" in apache_rhel_5_1_root_directory.stdout' + + - name: "SCORED | 5.1 | PATCH | Ensure Options for the OS Root Directory Are Restricted | Enter Options if doesn't exist" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '' + line: | + + Options None + notify: restart httpd + when: '"Options" not in apache_rhel_5_1_root_directory.stdout' + when: + - apache_cis_5_1 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.1 + +- name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted" + block: + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Get Document Root vHost settings" + raw: cat /etc/httpd/conf/httpd.conf | sed -n "//,/<\/Directory>/p" + changed_when: false + failed_when: false + register: apache_rhel_5_2_vdir_doc_settings + + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Get Document Root vHost settings" + raw: cat /etc/httpd/conf/httpd.conf + changed_when: false + failed_when: false + register: test + + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Escape path slashes" + set_fact: + apache_rhel_5_2_doc_root_dir: '{{ apache_cis_doc_root_dir | replace("/","\/") }}' + + # - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Escape path slashes" + # set_fact: + # test_after: "{{ test | regex_search('([\\s\\S]*?)<\/Directory>', multiline=True) }}" + - debug: var=apache_rhel_5_2_vdir_doc_settings + - debug: var=apache_rhel_5_2_doc_root_dir + - debug: var=test_after + + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Set options to None or Multiviews if options does not exist" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + line: ' Options {{ apache_cis_webrootdir_options }}' + insertafter: '^' + when: "'Options' not in apache_rhel_5_2_vdir_doc_settings.stdout" + + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Get Document Root vHost settings" + shell: cat /etc/httpd/conf/httpd.conf | sed -n "//,/<\/Directory>/p" + changed_when: false + failed_when: false + register: apache_rhel_5_2_vdir_doc_settings_2222222 + + - debug: var=apache_rhel_5_2_vdir_doc_settings_2222222 + # - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Set options to None or Multiviews if options does not exist" + # replace: + # path: "{{ apache_cis_server_root_dir }}" + # replace: ' Options.*' + # replace: ' Options {{ apache_cis_webrootdir_options }}' + # after: '^' + # before: '^' + when: + - apache_cis_5_2 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.2 + - notimplimented + +# ----------------------------- +# ----------------------------- +# Control 5.3/5.4 requires dynamic changes to all elements. Need to figure out best lineinfile way to handle this +# ----------------------------- +# ----------------------------- +- name: "SCORED | 5.3 | PATCH | Ensure Options for Other Directories Are Minimized" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_5_3 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.3 + - notimplimented + +- name: "SCORED | 5.4 | PATCH | Ensure Default HTML Content Is Removed" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_5_4 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.4 + - notimplimented + +- name: "SCORED | 5.5 | PATCH | Ensure the Default CGI Content printenv Script Is Removed" + block: + - name: "SCORED | 5.5 | AUDIT | Ensure the Default CGI Content printenv Script Is Removed | Get CGI folder" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | grep -i "cgi-bin/" | grep -v "#" | cut -f2 -d'"' + changed_when: false + failed_when: false + register: apache_rhel_5_5_cgi_bin_dir + + - name: "SCORED | 5.5 | PATCH | Ensure the Default CGI Content printenv Script Is Removed | Remove printenv" + file: + path: "{{ item }}/printenv" + state: absent + with_items: + - "{{ apache_rhel_5_5_cgi_bin_dir.stdout_lines }}" + when: + - apache_cis_5_5 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.5 + +- name: "SCORED | 5.6 | PATCH | Ensure the Default CGI Content test-cgi Script Is Removed" + block: + - name: "SCORED | 5.6 | AUDIT | Ensure the Default CGI Content test-cgi Script Is Removed | Get CGI folder" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | grep -i "cgi-bin/" | grep -v "#" | cut -f2 -d'"' + changed_when: false + failed_when: false + register: apache_rhel_5_6_cgi_bin_dir + + - name: "SCORED | 5.6 | PATCH | Ensure the Default CGI Content test-cgi Script Is Removed | Remove test-cgi folder" + file: + path: "{{ item }}/test-cgi" + state: absent + with_items: + - "{{ apache_rhel_5_6_cgi_bin_dir.stdout_lines }}" + when: + - apache_cis_5_6 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.6 + +# ----------------------------- +# ----------------------------- +# Control 5.7requires dynamic changes to all elements. Need to figure out best lineinfile way to handle this +# ----------------------------- +# ----------------------------- +- name: "SCORED | 5.7 | PATCH | Ensure HTTP Request Methods Are Restricted" + block: + - name: "SCORED | 5.7 | PATCH | Ensure HTTP Request Methods Are Restricted" + shell: 'cat /etc/httpd/conf/httpd.conf | sed -n "//,/<\/Directory>/p"' + changed_when: false + failed_when: false + register: apache_rhel_5_7_vdir_doc_settings + when: + - apache_cis_5_7 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.7 + - notimplimented + +- name: "SCORED | 5.8 | PATCH | Ensure the HTTP TRACE Method Is Disabled" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: 'TraceEnable.*' + line: 'TraceEnable Off' + notify: restart httpd + when: + - apache_cis_5_8 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.8 + +- name: "SCORED | 5.9 | PATCH | Ensure Old HTTP Protocol Versions Are Disallowed" + block: + - name: "SCORED | 5.9 | PATCH | Ensure Old HTTP Protocol Versions Are Disallowed | Enable re-write module" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf" + regexp: '.*LoadModule rewrite_module modules/mod_rewrite.so' + line: 'LoadModule rewrite_module modules/mod_rewrite.so' + notify: restart httpd + + - name: "SCORED | 5.9 | PATCH | Ensure Old HTTP Protocol Versions Are Disallowed | Set re-write settings" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: '# Supplemental configuration' + notify: restart httpd + with_items: + - { regexp: 'RewriteEngine .*', line: 'RewriteEngine On' } + - { regexp: 'RewriteOptions .*', line: 'RewriteOptions Inherit'} + - { regexp: 'RewriteCond .*', line: 'RewriteCond %{THE_REQUEST} !HTTP/1\.1$' } + - { regexp: 'RewriteRule .*', line: 'RewriteRule .* - [F]' } + when: + - apache_cis_5_9 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.9 + +- name: "SCORED | 5.10 | PATCH | Ensure Access to .ht* Files Is Restricted" + block: + - name: "SCORED | 5.10 | AUDIT | Ensure Access to .ht* Files Is Restricted | Does FilesMatch exist" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | grep -ozP '([\S\s]*?)<\/FilesMatch>' + changed_when: false + failed_when: false + register: apache_rhel_5_10_files_match_param + + - name: "SCORED | 5.10 | PATCH | Ensure Access to .ht* Files Is Restricted | Replace Require all setting" + replace: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: ' Require.*' + replace: ' Require all denied' + after: '' + before: '' + notify: restart httpd + when: + - '"Require" in apache_rhel_5_10_files_match_param.stdout' + + - name: "SCORED | 5.10 | PATCH | Ensure Access to .ht* Files Is Restricted | Add Require setting if missing" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '' + line: | + + Require all denied + notify: restart httpd + when: + - apache_rhel_5_10_files_match_param.stdout != "" + - '"Require" not in apache_rhel_5_10_files_match_param.stdout' + + - name: "SCORED | 5.10 | PATCH | Ensure Access to .ht* Files Is Restricted | Add FilesMatch settings" + blockinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + marker: "# {mark} FilesMatch ht Setting ANSIBLE MANAGED BLOCK" + block: | + + Require all denied + + notify: restart httpd + when: apache_rhel_5_10_files_match_param.stdout == "" + when: + - apache_cis_5_10 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.10 + +- name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted" + block: + - name: "SCORED | 5.11 | AUDIT | Ensure Access to Inappropriate File Extensions Is Restricted | Gather file extentions" + shell: find * {{ apache_cis_doc_root_dir }} -type f -name '*.*' | awk -F. '{print $NF }' | sort -u + changed_when: false + failed_when: false + register: apache_rhel_5_11_file_extentions + + - name: "SCORED | 5.11 | AUDIT | Ensure Access to Inappropriate File Extensions Is Restricted | Find FilesMatch for all files" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | grep -ozP '([\S\s]*?)<\/FilesMatch>' + changed_when: false + failed_when: false + register: apache_rhel_5_11_files_match_1 + + - name: "SCORED | 5.11 | AUDIT | Ensure Access to Inappropriate File Extensions Is Restricted | Find FilesMatch for specified files" + shell: cat {{ apache_cis_server_root_dir }}/conf/httpd.conf | grep -ozP '([\S\s]*?)<\/FilesMatch>' + changed_when: false + failed_when: false + register: apache_rhel_5_11_files_match_2 + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Replace Require in FilesMatch for all files" + replace: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: ' Require.*' + replace: ' Require all denied' + after: '' + before: '' + when: '"Require" in apache_rhel_5_11_files_match_1.stdout' + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Add Require in FilesMatch for all files" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '' + line: | + + Require all denied + when: '"Require" not in apache_rhel_5_11_files_match_1.stdout and apache_rhel_5_11_files_match_1.stdout != ""' + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Add FilesMatch for all files if missing" + blockinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + marker: "# {mark} FilesMatch All Settings ANSIBLE MANAGED BLOCK" + block: | + # Block all files by default, unless specifically allowed. + + Require all denied + + when: apache_rhel_5_11_files_match_1.stdout == "" + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Set allowed file types" + replace: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '' + replace: '' + when: '"' + before: '' + when: '"Require" in apache_rhel_5_11_files_match_2.stdout' + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Add Require for allowed file types" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '' + line: | + + Require all granted + when: '"Require" not in apache_rhel_5_11_files_match_2.stdout and apache_rhel_5_11_files_match_2.stdout != ""' + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Add FilesMatch for allowed files if missing" + blockinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + marker: "# {mark} FilesMatch File Type Settings ANSIBLE MANAGED BLOCK" + block: | + # Allow files with specifically approved file extensions + # Such as (css, htm; html; js; pdf; txt; xml; xsl; ...), + # images (gif; ico; jpeg; jpg; png; ...), multimedia + + Require all granted + + when: apache_rhel_5_11_files_match_2.stdout == "" + + - name: "SCORED | 5.11 | AUDIT | Ensure Access to Inappropriate File Extensions Is Restricted | Display file extensions" + debug: + msg: + - "Alert!! Below are the file extensions in use. Please review to make sure they are all approved" + - "{{ apache_rhel_5_11_file_extentions.stdout_lines }}" + when: + - apache_cis_5_11 + - apache_cis_section5 + tags: + - level2 + - scored + - patch + - rule_5.11 + +- name: "SCORED | 5.12 | PATCH | Ensure IP Address Based Requests Are Disallowed" + block: + - name: "SCORED | 5.12 | PATCH | Ensure IP Address Based Requests Are Disallowed | Enable re-write module" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf" + regexp: '.*LoadModule rewrite_module modules/mod_rewrite.so' + line: 'LoadModule rewrite_module modules/mod_rewrite.so' + + - name: "SCORED | 5.12 | PATCH | Ensure IP Address Based Requests Are Disallowed | Set re-write settings" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: '# Supplemental configuration' + with_items: + - { regexp: 'RewriteEngine .*', line: 'RewriteEngine On'} + - { regexp: 'RewriteCond %{HTTP_HOST}.*', line: 'RewriteCond %{HTTP_HOST} !^{{ apache_cis_toplevel_svr }} [NC]' } + - { regexp: 'RewriteCond %{REQUEST_URI}.*', line: 'RewriteCond %{REQUEST_URI} !^/error [NC]' } + - { regexp: 'RewriteRule \^\.\(\.\*\).*', line: 'RewriteRule ^.(.*) - [L,F]' } + when: + - apache_cis_5_12 + - apache_cis_section5 + tags: + - level2 + - scored + - patch + - rule_5.12 + +- name: "SCORED | 5.13 | PATCH | Ensure the IP Addresses for Listening for Requests Are Specified" + block: + - name: "SCORED | 5.13 | PATCH | Ensure the IP Addresses for Listening for Requests Are Specified | Remove Listen" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^Listen.*' + state: absent + notify: restart httpd + + - name: "SCORED | 5.13 | PATCH | Ensure the IP Addresses for Listening for Requests Are Specified" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + line: 'Listen {{ item }}' + insertafter: '# prevent Apache from glomming onto all bound IP addresses' + with_items: + - "{{ apache_cis_listen_ip_port }}" + notify: restart httpd + when: + - apache_cis_5_13 + - apache_cis_section5 + tags: + - level2 + - scored + - patch + - rule_5.13 + +- name: "SCORED | 5.14 | PATCH | Ensure Browser Framing Is Restricted" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^Header.*' + line: 'Header always append X-Frame-Options SAMEORIGIN' + insertafter: '# Supplemental configuration' + when: + - apache_cis_5_14 + - apache_cis_section5 + tags: + - level2 + - scored + - patch + - rule_5.14 + +# Section 6 Fixes +- name: | + "SCORED | 6.1 | PATCH | Ensure the Error Log Filename and Severity Level Are Configured Correctly" + "SCORED | 6.2 | PATCH | Ensure a Syslog Facility Is Configured for Error Logging" + block: + - name: "SCORED | 6.1 | PATCH | Ensure the Error Log Filename and Severity Level Are Configured Correctly | Add LogLevel setting" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^LogLevel' + line: 'LogLevel {{ apache_cis_loglevel.all_mods }} core:{{ apache_cis_loglevel.core_mod }}' + insertafter: '# Supplemental configuration' + when: + - apache_cis_6_1 + + - name: "SCORED | 6.1 | PATCH | Ensure the Error Log Filename and Severity Level Are Configured Correctly | Add ErrorLog path" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^ErrorLog' + line: 'ErrorLog "{{ apache_cis_errorlog_path }}"' + insertafter: '# Supplemental configuration' + when: + - apache_cis_6_1 + - not apache_cis_6_2 + + - name: "SCORED | 6.2 | PATCH | Ensure a Syslog Facility Is Configured for Error Logging | Add ErrorLog path set to system:local1" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^ErrorLog' + line: 'ErrorLog "syslog:{{ apache_cis_errorlog_facility }}"' + insertafter: '# Supplemental configuration' + when: + - apache_cis_6_2 + - not apache_cis_6_1 + + - name: | + "SCORED | 6.1 | PATCH | Ensure the Error Log Filename and Severity Level Are Configured Correctly | Add combined ErrorLog" + "SCORED | 6.2 | PATCH | Ensure a Syslog Facility Is Configured for Error Logging | Add combined ErrorLog" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^ErrorLog.*' + line: 'ErrorLog "{{ apache_cis_errorlog_path }} syslog:{{ apache_cis_errorlog_facility }}"' + insertafter: '# Supplemental configuration' + when: + - apache_cis_6_1 + - apache_cis_6_2 + + - name: "SCORED | 6.1 | AUDIT | Ensure the Error Log Filename and Severity Level Are Configured Correctly | Alert about virtual Directories" + debug: + msg: + - "Caution!! If you are using virutal directories please add the ErrorLog directive." + - "Each responsible individual or organization needs to access their own web logs and and needs the" + - "skills/training/tools for monitoring the logs." + when: + - apache_cis_6_1 or apache_cis_6_2 + - apache_cis_section6 + tags: + - level1 + - level2 + - scored + - patch + - rule_6.1 + - rule_6.2 + +- name: "SCORED | 6.3 | PATCH | Ensure the Server Access Log Is Configured Correctly" + block: + - name: "SCORED | 6.3 | PATCH | Ensure the Server Access Log Is Configured Correctly | Remove LogFormat" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^ LogFormat .*' + state: absent + + - name: "SCORED | 6.3 | PATCH | Ensure the Server Access Log Is Configured Correctly | Add/Modify LogFormat and CustomLog" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: '' + notify: restart httpd + with_items: + - { regexp: '^ LogFormat "', line: ' LogFormat {{ apache_cis_log_format }} combined' } + - { regexp: '^ CustomLog .*', line: ' CustomLog "{{ apache_cis_custom_log }}" combined' } + + - name: "SCORED | 6.3 | AUDIT | Ensure the Server Access Log Is Configured Correctly | Notify about need for vhost logging" + debug: + msg: + - "Caution!! If you are using virtual directories please add the LogFormat and and CustomLog directives" + - "if you have different people responsible for each web site. Each responsible individual or organization" + - "needs access to their own web logs as well as the skills/training/tools for monitoring the logs" + when: + - apache_cis_6_3 + - apache_cis_section6 + tags: + - level1 + - level2 + - scored + - patch + - rule_6.3 + +- name: "SCORED | 6.4 | PATCH | Ensure Log Storage and Rotation Is Configured Correctly" + block: + - name: "SCORED | 6.4 | PATCH | Ensure Log Storage and Rotation Is Configured Correctly | Edit logrotate.d/httpd" + lineinfile: + path: /etc/logrotate.d/httpd + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: '^ missingok', line: ' missingok', insertafter: '^/var/log/httpd/' } + - { regexp: '^ notifempty', line: ' notifempty', insertafter: '^/var/log/httpd/' } + - { regexp: '^ sharedscripts', line: ' sharedscripts', insertafter: '^/var/log/httpd/' } + - { regexp: '^ postrotate', line: ' postrotate', insertafter: '^/var/log/httpd/' } + - { regexp: '^ /bin', line: " /bin/kill -HUP 'cat /var/run/httpd.pid 2>/dev/null' 2> /dev/null || true", insertafter: ' postrotate' } + + - name: "SCORED | 6.4 | PATCH | Ensure Log Storage and Rotation Is Configured Correctly | Edit logrotate.conf" + lineinfile: + path: /etc/logrotate.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: '^# rotate log files ', line: '# rotate log files weekly', insertafter: '^# see' } + - { regexp: '^daily|weekly|monthly|yearly', line: 'weekly', insertafter: '^# rotate log files' } + - { regexp: '^# keep ([^\s]+) ([^\s]+) worth of backlogs', line: '# keep 13 weeks worth of backlogs', insertafter: '^# see'} + - { regexp: '^rotate', line: 'rotate 13', insertafter: '^# keep'} + when: + - apache_cis_6_4 + - apache_cis_section6 + tags: + - level1 + - level2 + - scored + - patch + - rule_6.4 + +- name: "SCORED | 6.5 | PATCH | Ensure Applicable Patches Are Applied" + block: + - name: "SCORED | 6.5 | PATCH | Ensure Applicable Patches Are Applied | RHEL 7" + yum: + name: ['httpd', 'mod_session', 'mod_ssl'] + state: latest + when: ansible_distribution_major_version|int <= 7 + + - name: "SCORED | 6.5 | PATCH | Ensure Applicable Patches Are Applied | RHEL 8" + dnf: + name: ['httpd', 'mod_session', 'mod_ssl'] + state: latest + when: ansible_distribution_major_version|int >= 8 + when: + - apache_cis_6_4 + - apache_cis_section6 + tags: + - level1 + - level2 + - scored + - patch + - rule_6.4 + +- name: "SCORED | 6.6 | PATCH | Ensure ModSecurity Is Installed and Enabled" + block: + - name: "SCORED | 6.6 | PATCH | Ensure ModSecurity Is Installed and Enabled | RHEL 7 Install mod_security" + yum: + name: mod_security + state: present + when: ansible_distribution_major_version|int <= 7 + + - name: "SCORED | 6.6 | PATCH | Ensure ModSecurity Is Installed and Enabled | RHEL 8 Install mod_security" + dnf: + name: mod_security + state: present + when: ansible_distribution_major_version|int >= 8 + + - name: "SCORED | 6.6 | PATCH | Ensure ModSecurity Is Installed and Enabled | Find LoadModule for mod_security" + shell: 'grep -ri "LoadModule security2" {{ apache_cis_server_root_dir }} | cut -f1 -d:' + changed_when: false + failed_when: false + register: apache_rhel_6_6_sec_mod_path + + - name: "SCORED | 6.6 | PATCH | Ensure ModSecurity Is Installed and Enabled | Load module if not currently set" + lineinfile: + path: "{{ apache_rhel_6_6_sec_mod_path.stdout }}" + regexp: 'LoadModule security2_module modules/mod_security2.so' + line: 'LoadModule security2_module modules/mod_security2.so' + insertbefore: BOF + when: apache_rhel_6_6_sec_mod_path.stdout != "" + + - name: "SCORED | 6.6 | PATCH | Ensure ModSecurity Is Installed and Enabled | Create load config if needed" + lineinfile: + path: /etc/httpd/conf.modules.d/10-mod_security.conf + regexp: 'LoadModule security2_module modules/mod_security2.so' + line: 'LoadModule security2_module modules/mod_security2.so' + insertbefore: BOF + create: yes + when: apache_rhel_6_6_sec_mod_path.stdout == "" + when: + - apache_cis_6_6 + - apache_cis_section6 + tags: + - level2 + - scored + - patch + - rule_6.6 + +- name: "SCORED | 6.7 | PATCH | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled" + block: + - name: "SCORED | 6.7 | AUDIT | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled | Determine if OWASP is installed" + find: + paths: "{{ apache_cis_server_root_dir }}" + patterns: 'crs-setup.conf' + recurse: yes + file_type: file + register: apache_rhel_6_7_owasp_config + + - name: "SCORED | 6.7 | PATCH | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled | Install OWASP RHEL8" + dnf: + name: mod_security_crs + state: present + when: + - ansible_distribution_major_version|int >= 8 + - apache_rhel_6_7_owasp_config.matched == 0 + - apache_cis_owasp_automate + + - name: "SCORED | 6.7 | PATCH | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled | Install OWASP RHEL7" + yum: + name: mod_security_crs + state: present + when: + - ansible_distribution_major_version|int <= 7 + - apache_rhel_6_7_owasp_config.matched == 0 + - apache_cis_owasp_automate + + - name: "SCORED | 6.7 | PATCH | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled | Apply Configuration" + template: + src: crs-setup.conf.j2 + dest: "{{ apache_cis_server_root_dir }}/modsecurity.d/crs-setup.conf" + when: + - apache_cis_owasp_automate + + - name: "SCORED | 6.7 | AUDIT | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled | Message out when not automated" + debug: + msg: + - "ALERT!!!!To conform to CIS standards you need OWASP installed and enabled" + - "Please go through the process of setup" + when: not apache_cis_owasp_automate + when: + - apache_cis_6_7 + - apache_cis_section6 + tags: + - level2 + - scored + - audit + - rule_6.7 + +# Section 7 Fixes +- name: "SCORED | 7.1 | PATCH | Ensure mod_ssl and/or mod_nss Is Installed" + block: + - name: "SCORED | 7.1 | PATCH | Ensure mod_ssl and/or mod_nss Is Installed | RHEL 7 Install mod_ssl" + yum: + name: mod_ssl + state: present + when: ansible_distribution_major_version|int <= 7 + + - name: "SCORED | 7.1 | PATCH | Ensure mod_ssl and/or mod_nss Is Installed | RHEL 8 Install mod_ssl" + dnf: + name: mod_ssl + state: present + when: ansible_distribution_major_version|int >= 8 + when: + - apache_cis_7_1 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.1 + + +# ----------------------------- +# ----------------------------- +# This contral has 13 steps in the remediation. I think this is the simplest way to handle it +# ----------------------------- +# ----------------------------- + +- name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed" + block: + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Generate private key" + openssl_privatekey: + path: "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.key" + cipher: aes128 + size: 2048 + passphrase: "{{ apache_cis_privatekey_passphrase }}" + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Convert to clear text" + command: openssl rsa -in /etc/ssl/certs/{{ apache_cis_hostname_cert }}.key -out /etc/ssl/certs/{{ apache_cis_hostname_cert }}.clear -passin pass:{{ apache_cis_privatekey_passphrase }} + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Create CSR folder" + file: + path: "{{ apache_cis_csr_folder }}" + state: directory + mode: '0755' + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Create CSR" + openssl_csr: + path: "{{ apache_cis_csr_folder }}/{{ apache_cis_hostname_cert}}.csr" + privatekey_path: /etc/ssl/certs/{{ apache_cis_hostname_cert }}.key + privatekey_passphrase: "{{ apache_cis_privatekey_passphrase }}" + country_name: "{{ apache_req_distinguished_name_settings.countryName_default }}" + state_or_province_name: "{{ apache_req_distinguished_name_settings.stateOrProvinceName_default }}" + locality_name: "{{ apache_req_distinguished_name_settings.localityName_default }}" + organization_name: "{{ apache_req_distinguished_name_settings.organizationName_default }}" + organizational_unit_name: "{{ apache_req_distinguished_name_settings.organizationalUnitName_default }}" + common_name: "{{ apache_req_distinguished_name_settings.commonName_default }}" + email_address: "{{ apache_req_distinguished_name_settings.email_address }}" + subject_alt_name: "{{ apache_cis_alt_names | list }}" + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Send CSR to Certificate Authority" + openssl_certificate: + path: "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.crt" + privatekey_path: "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.key" + privatekey_passphrase: "{{ apache_cis_privatekey_passphrase }}" + csr_path: "{{ apache_cis_csr_folder }}/{{ apache_cis_hostname_cert }}.csr" + provider: selfsigned + mode: '0444' + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Modify ssl.conf, self signed" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf.d/ssl.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^SSLCertificateFile', line: 'SSLCertificateFile /etc/ssl/certs/{{ apache_cis_hostname_cert }}.crt' } + - { regexp: '^SSLCertificateKeyFile', line: 'SSLCertificateKeyFile /etc/ssl/certs/{{ apache_cis_hostname_cert }}.clear' } + notify: restart httpd + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Copy custom CA's over" + copy: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: '0444' + with_items: + - { src: "custom_cert/custom_cert_file.crt", dest: "/etc/ssl/certs/custom_cert_file.crt" } + - { src: "custom_cert/custom_cert_key.key", dest: "/etc/ssl/certs/custom_certkey.key" } + when: apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Modify ssl.conf, custom cert" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf.d/ssl.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^SSLCertificateFile', line: 'SSLCertificateFile /etc/ssl/certs/custom_cert_file.crt' } + - { regexp: '^SSLCertificateKeyFile', line: 'SSLCertificateKeyFile /etc/ssl/certs/custom_cert_key.key' } + notify: restart httpd + when: apache_cis_custom_cert + when: + - apache_cis_7_2 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.2 + +- name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected" + block: + - name: "SCORED | 7.3 | AUDIT | Ensure the Server's Private Key Is Protected | Find config files with SSLCertificateFile parameter" + shell: "grep -r SSLCertificateFile {{ apache_cis_server_root_dir }}/* | grep -v Binary | cut -f1 -d: | sort --unique" + changed_when: false + failed_when: false + register: apache_rhel_7_3_sslcertificatefile_location + + - name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected | Enforce SSLCertificateKeyFile is configured (selfsigned)" + lineinfile: + path: "{{ item }}" + regexp: '^SSLCertificateKeyFile' + line: 'SSLCertificateKeyFile /etc/ssl/certs/{{ apache_cis_hostname_cert }}.clear' + with_items: + - "{{ apache_rhel_7_3_sslcertificatefile_location.stdout_lines }}" + notify: restart httpd + when: not apache_cis_custom_cert + + - name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected | Enforce SSLCertificateKeyFile is configured (custom cert)" + lineinfile: + path: "{{ item }}" + regexp: '^SSLCertificateKeyFile' + line: 'SSLCertificateKeyFile /etc/ssl/certs/custom_cert_key.key' + with_items: + - "{{ apache_rhel_7_3_sslcertificatefile_location.stdout_lines }}" + notify: restart httpd + when: apache_cis_custom_cert + + - name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected | Set permissions on SSLCertificateKeyFile (selfsigned)" + file: + path: "{{ item }}" + owner: root + group: root + mode: '0400' + with_items: + - "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.key" + - "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.clear" + when: not apache_cis_custom_cert + + - name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected | Set permissions on SSLCertificateKeyFile (custom cert)" + file: + path: /etc/ssl/certs/custom_cert_key.key + owner: root + group: root + mode: '0400' + when: apache_cis_custom_cert + when: + - apache_cis_7_3 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.3 + +- name: | + "SCORED | 7.4 | PATCH | Ensure Weak SSL Protocols Are Disabled" + "SCORED | 7.10 | PATCH | Ensure the TLSv1.0 and TLSv1.1 Protocols are Disabled" + block: + - name: | + "SCORED | 7.4 | PATCH | Ensure Weak SSL Protocols Are Disabled | Set TLSv1.2 and TLSv1.3" + "SCORED | 7.10 | PATCH | Ensure the TLSv1.0 and TLSv1.1 Protocols are Disabled | Set TLSv1.2 and TLSv1.3" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf.d/ssl.conf" + regexp: '^#SSLProtocol|^SSLProtocol' + line: 'SSLProtocol TLSv1.2 TLSv1.3' + insertafter: '^SSLEngine' + notify: restart httpd + when: + - apache_cis_tls_1_2_available + - ansible_facts.distribution_major_version == "8" + + - name: | + "SCORED | 7.4 | PATCH | Ensure Weak SSL Protocols Are Disabled | Set TLSv1.2" + "SCORED | 7.10 | PATCH | Ensure the TLSv1.0 and TLSv1.1 Protocols are Disabled| Set TLSv1.2" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf.d/ssl.conf" + regexp: '^#SSLProtocol|^SSLProtocol' + line: 'SSLProtocol TLSv1.2' + insertafter: '^SSLEngine' + notify: restart httpd + when: + - apache_cis_tls_1_2_available + - ansible_facts.distribution_major_version == "7" + + - name: | + "SCORED | 7.4 | PATCH | Ensure Weak SSL Protocols Are Disabled | Set TLSv1" + "SCORED | 7.10 | PATCH | Ensure the TLSv1.0 and TLSv1.1 Protocols are Disabled | Set TLSv1" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf.d/ssl.conf" + regexp: '^#SSLProtocol|^SSLProtocol' + line: 'SSLProtocol TLSv1' + insertafter: '^SSLEngine' + notify: restart httpd + when: not apache_cis_tls_1_2_available + when: + - apache_cis_7_4 or + apache_cis_7_10 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.3 + +# ----------------------------- +# ----------------------------- +# Since the 7.5 and 7.8 controls are virtually the same the apache_cis_sslciphersuite_settings will control iv week and/or Medium ciphers are isabled +# ----------------------------- +# ----------------------------- +- name: | + "SCORED | 7.5 | PATCH | Ensure Weak SSL/TLS Ciphers Are Disabled" + "SCORED | 7.8 | PATCH | Ensure Medium Strength SSL/TLS Ciphers Are Disabled" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf.d/ssl.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: 'SSLProtocol' + with_items: + - { regexp: '^#SSLHonorCipherOrder|^SSLHonorCipherOrder', line: 'SSLHonorCipherOrder on' } + - { regexp: '^#SSLCipherSuite|^SSLCipherSuite', line: 'SSLCipherSuite {{ apache_cis_sslciphersuite_settings }}' } + notify: restart httpd + when: + - apache_cis_7_5 or + apache_cis_7.8 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.5 + +- name: "SCORED | 7.6 | PATCH | Ensure Insecure SSL Renegotiation Is Not Enabled" + block: + - name: "SCORED | 7.6 | AUDIT | Ensure Insecure SSL Renegotiation Is Not Enabled | Check if SSLInsecureRenegotiation is present" + shell: "grep -r SSLInsecureRenegotiation {{ apache_cis_server_root_dir }}/* | grep -v Binary | cut -f1 -d:" + changed_when: false + failed_when: false + register: apache_rhel_7_6_sslinsecurerenegotiation + + - name: "SCORED | 7.6 | PATCH | Ensure Insecure SSL Renegotiation Is Not Enabled | Set SSLInsecureRenegotiation to off if configured" + lineinfile: + path: "{{ apache_rhel_7_6_sslinsecurerenegotiation.stdout }}" + regexp: '^#SSLInsecureRenegotiation|^SSLInsecureRenegotiation' + line: 'SSLInsecureRenegotiation off' + when: apache_rhel_7_6_sslinsecurerenegotiation.stdout != "" + when: + - apache_cis_7_6 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.6 + +- name: "SCORED | 7.7 | PATCH | Ensure SSL Compression is not Enabled" + block: + - name: "SCORED | 7.7 | AUDIT | Ensure SSL Compression is not Enabled | Check for SSLCompression parameter" + shell: "grep -r SSLCompression {{ apache_cis_server_root_dir }}/* | grep -v Binary | cut -f1 -d:" + changed_when: false + failed_when: false + register: apache_rhel_7_7_sslcompression + + - name: "SCORED | 7.7 | PATCH | Ensure SSL Compression is not Enabled | Set SSLCompression to off if Configured" + lineinfile: + path: "{{ apache_rhel_7_7_sslcompression.stdout }}" + regexp: '^#SSLCompression|^SSLCompression' + line: 'SSLCompression off' + notify: restart httpd + when: apache_rhel_7_7_sslcompression.stdout != "" + when: + - apache_cis_7_7 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.7 + +# ----------------------------- +# ----------------------------- +# Control 7.8 combined with control 7.5 +# ----------------------------- +# ----------------------------- + +- name: "SCORED | 7.9 | PATCH | Ensure All Web Content is Accessed via HTTPS" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^ Redirect' + line: ' Redirect permanent / {{ apache_cis_tls_redirect }}' + insertafter: '^' + notify: restart httpd + when: + - apache_cis_7_9 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.9 + +# ----------------------------- +# ----------------------------- +# Control 7.10 combined with control 7.4 +# ----------------------------- +# ----------------------------- + +# ----------------------------- +# ----------------------------- +# The three tasks below requires dynamic changes to elements. Need to figure out best lineinfile way to handle this +# ----------------------------- +# ----------------------------- +- name: "SCORED | 7.11 | PATCH | Ensure OCSP Stapling Is Enabled" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_7_11 + - apache_cis_section7 + tags: + - level2 + - scored + - patch + - rule_7.11 + - notimplimented + +- name: "SCORED | 7.12 | PATCH | Ensure HTTP Strict Transport Security Is Enabled" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_7_12 + - apache_cis_section7 + tags: + - level2 + - scored + - patch + - rule_7.12 + - notimplimented + +- name: "SCORED | 7.13 | PATCH | Ensure Only Cipher Suites That Provide Forward Secrecy Are Enabled" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_7_13 + - apache_cis_section7 + tags: + - level2 + - scored + - patch + - rule_7.13 + - notimplimented + +# Section 8 Fixes +- name: "SCORED | 8.1 | PATCH | Ensure ServerTokens is Set to 'Prod' or 'ProductOnly'" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#ServerTokens|^ServerTokens' + line: 'ServerTokens {{ apache_cis_servertokens }}' + insertafter: '# Supplemental configuration' + when: + - apache_cis_8_1 + - apache_cis_section8 + tags: + - level1 + - level2 + - scored + - patch + - rule_8.1 + +- name: "SCORED | 8.2 | PATCH | Ensure ServerSignature Is Not Enabled" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#ServerSignature|^ServerSignature' + line: 'ServerSignature Off' + insertafter: '# Supplemental configuration' + when: + - apache_cis_8_2 + - apache_cis_section8 + tags: + - level1 + - level2 + - scored + - patch + - rule_8.2 + +- name: "SCORED | 8.3 | PATCH | Ensure All Default Apache Content Is Removed" + replace: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^Include conf/extra/httpd-autoindex.conf' + replace: '#Include conf/extra/httpd-autoindex.conf' + when: + - apache_cis_8_3 + - apache_cis_section8 + tags: + - level2 + - scored + - patch + - rule_8.3 + +- name: "SCORED | 8.4 | PATCH | Ensure ETag Response Header Fields Do Not Include Inodes" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: 'FileETag' + state: absent + when: + - apache_cis_8_4 + - apache_cis_section8 + tags: + - level2 + - scored + - patch + - rule_8.4 + +# Section 9 Fixes +- name: "SCORED | 9.1 | PATCH | Ensure the TimeOut Is Set to 10 or Less" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#Timeout|^Timeout' + line: 'Timeout {{ apache_cis_timeout }}' + insertafter: '# Supplemental configuration' + when: + - apache_cis_9_1 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.1 + +- name: "SCORED | 9.2 | PATCH | Ensure KeepAlive Is Enabled" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#KeepAlive|^KeepAlive' + line: 'KeepAlive On' + insertafter: '# Supplemental configuration' + when: + - apache_cis_9_2 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.2 + +- name: "SCORED | 9.3 | PATCH | Ensure MaxKeepAliveRequests is Set to a Value of 100 or Greater" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#MaxKeepAliveRequests|^MaxKeepAliveRequests' + line: 'MaxKeepAliveRequests {{ apache_cis_maxkeepaliverequests }}' + insertafter: '# Supplemental configuration' + when: + - apache_cis_9_3 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.3 + +- name: "SCORED | 9.4 | PATCH | Ensure KeepAliveTimeout is Set to a Value of 15 or Less" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#KeepAliveTimeout|^KeepAliveTimeout' + line: 'KeepAliveTimeout {{ apache_cis_keepalivetimeout }}' + insertafter: '# Supplemental configuration' + when: + - apache_cis_9_4 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.4 + +# ----------------------------- +# ----------------------------- +# Control 9.5 and 9.6 are hard to set individually so they are combined to a single line. +# ----------------------------- +# ----------------------------- +- name: | + "SCORED | 9.5 | PATCH | Ensure the Timeout Limits for Request Headers is Set to 40 or Less" + "SCORED | 9.6 | PATCH | Ensure Timeout Limits for the Request Body is Set to 20 or Less" + block: + - name: | + "SCORED | 9.5 | PATCH | Ensure the Timeout Limits for Request Headers is Set to 40 or Less | Enable Request Timeout module" + "SCORED | 9.6 | PATCH | Ensure Timeout Limits for the Request Body is Set to 20 or Less | Enable Request Timeout module" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf.modules.d/00-base.conf" + regexp: 'LoadModule reqtimeout_module modules/mod_reqtimeout.so' + line: 'LoadModule reqtimeout_module modules/mod_reqtimeout.so' + + - name: | + "SCORED | 9.5 | AUDIT | Ensure the Timeout Limits for Request Headers is Set to 40 or Less | Check if reqtimeout_module enabled in httpd.conf" + "SCORED | 9.6 | PATCH | Ensure Timeout Limits for the Request Body is Set to 20 or Less | Check if reqtimeout_module enabled in httpd.conf" + shell: grep "" /etc/httpd/conf/httpd.conf + changed_when: false + failed_when: false + register: apache_rhel_9_5_reqtimeout_module_status + + - name: | + "SCORED | 9.5 | PATCH | Ensure the Timeout Limits for Request Headers is Set to 40 or Less | Add reqtimeout_module, set body, and header timeout" + "SCORED | 9.6 | PATCH | Ensure Timeout Limits for the Request Body is Set to 20 or Less| Add reqtimeout_module, set body, and header timeout" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + line: | + + RequestReadTimeout header={{ apache_cis_reqread_timeout }},MinRate=500 body={{ apache_cis_reqread_body }},MinRate=500 + + insertafter: '# Supplemental configuration' + when: apache_rhel_9_5_reqtimeout_module_status.stdout == "" + + - name: | + "SCORED | 9.5 | PATCH | Ensure the Timeout Limits for Request Headers is Set to 40 or Less | Just set header and body timeout value" + "SCORED | 9.6 | PATCH | Ensure Timeout Limits for the Request Body is Set to 20 or Less | Just set header and body timeout value" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^ RequestReadTimeout header' + line: ' RequestReadTimeout header={{ apache_cis_reqread_timeout }},MinRate=500 body={{ apache_cis_reqread_body }},MinRate=500' + insertafter: '' + when: apache_rhel_9_5_reqtimeout_module_status.stdout != "" + when: + - apache_cis_9_5 or + apache_cis_9_6 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.5 + - rule_9.6 + +# ----------------------------- +# ----------------------------- +# Control 9.6 combined with control 9.5 +# ----------------------------- +# ----------------------------- + +# Section 10 Fixes +- name: "SCORED | 10.1 | PATCH | Ensure the LimitRequestLine directive is Set to 512 or less" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#LimitRequestline|^LimitRequestline' + line: 'LimitRequestline {{ apache_cis_limitrequestline }}' + insertafter: '# Supplemental configuration' + when: + - apache_cis_10_1 + - apache_cis_section10 + tags: + - level2 + - scored + - patch + - rule_10.1 + +- name: "SCORED | 10.2 | PATCH | Ensure the LimitRequestFields Directive is Set to 100 or Less" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#LimitRequestFields|^LimitRequestFields' + line: 'LimitRequestFields {{ apache_cis_limitrequestfields }}' + insertafter: '# Supplemental configuration' + when: + - apache_cis_10_2 + - apache_cis_section10 + tags: + - level2 + - scored + - patch + - rule_10.2 + +- name: "SCORED | 10.3 | PATCH | Ensure the LimitRequestFieldsize Directive is Set to 1024 or Less" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#LimitRequestFieldsize|^LimitRequestFieldsize' + line: 'LimitRequestFieldsize {{ apache_cis_limitrequestfieldsize }}' + insertafter: '# Supplemental configuration' + when: + - apache_cis_10_3 + - apache_cis_section10 + tags: + - level2 + - scored + - patch + - rule_10.3 + +- name: "SCORED | 10.4 | PATCH | Ensure the LimitRequestBody Directive is Set to 102400 or Less" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/conf/httpd.conf" + regexp: '^#LimitRequestBody|^LimitRequestBody' + line: 'LimitRequestBody {{ apache_cis_limitrequestbody }}' + insertafter: '# Supplemental configuration' + when: + - apache_cis_10_4 + - apache_cis_section10 + tags: + - level2 + - scored + - patch + - rule_10.4 + +# Section 11 +- name: "SCORED | 11.1 | PATCH | Ensure SELinux Is Enabled in Enforcing Mode" + selinux: + policy: targeted + state: enforcing + when: + - apache_cis_11_1 + - apache_cis_section11 + tags: + - level2 + - scored + - patch + - rule_11.1 + +- name: "SCORED | 11.2 | PATCH | Ensure Apache Processes Run in the httpd_t Confined Context" + sefcontext: + target: "{{ item }}" + setype: httpd_exec_t + ftype: f + with_items: + - /usr/sbin/httpd + - /usr/sbin/httpd.worker + - /usr/sbin/httpd.event + - /usr/sbin/apachectl + when: + - apache_cis_11_2 + - apache_cis_section11 + tags: + - level2 + - scored + - patch + - rule_11.2 + +- name: "SCORED | 11.3 | PATCH | Ensure the httpd_t Type is Not in Permissive Mode" + selinux_permissive: + name: httpd_t + permissive: true + when: + - apache_cis_11_3 + - apache_cis_section11 + tags: + - level2 + - scored + - patch + - rule_11.3 + +- name: "NOTSCORED | 11.4 | PATCH | Ensure Only the Necessary SELinux Booleans are Enabled" + seboolean: + name: httpd_enable_cgi + state: no + persistent: yes + when: + - apache_cis_11_4 + - apache_cis_section11 + tags: + - level2 + - notscored + - patch + - rule_11.4 + +# Section 12 +# ----------------------------- +# ----------------------------- +# AppArmor is not supported on RHEL based systems, this includes CentOS +# https://access.redhat.com/discussions/3983301 +# ----------------------------- +# ----------------------------- +- name: "SCORED | 12.1 | AUDIT | Ensure the AppArmor Framework Is Enabled" + debug: + msg: + - "This control is not applicable to RHEL based systems since AppArmor is not supported" + - "https://access.redhat.com/discussions/3983301" + when: + - apache_cis_12_1 + - apache_cis_section12 + tags: + - level2 + - scored + - audit + - rule_12.1 + +- name: "NOTSCORED | 12.2 | AUDIT | Ensure the Apache AppArmor Profile Is Configured Properly" + debug: + msg: + - "This control is not applicable to RHEL based systems since AppArmor is not supported" + - "https://access.redhat.com/discussions/3983301" + when: + - apache_cis_12_2 + - apache_cis_section12 + tags: + - level2 + - notscored + - audit + - rule_12.2 + +- name: "SCORED | 12.3 | AUDIT | Ensure Apache AppArmor Profile is in Enforce Mode" + debug: + msg: + - "This control is not applicable to RHEL based systems since AppArmor is not supported" + - "https://access.redhat.com/discussions/3983301" + when: + - apache_cis_12_3 + - apache_cis_section12 + tags: + - level2 + - scored + - audit + - rule_12.3 \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/cis_apache_ubuntu_fix.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/cis_apache_ubuntu_fix.yml new file mode 100644 index 0000000..3e02b5f --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/cis_apache_ubuntu_fix.yml @@ -0,0 +1,2137 @@ +--- +# Section 1 Fixes +- name: "NOTSCORED | 1.1 | AUDIT | Ensure the Pre-Installation Planning Checklist Has Been Implemented" + debug: + msg: + - "Warning! Make sure you have your pre-installation checklist completed" + - "Please refer to control 1.1 in the benchmark document for those items" + when: + - apache_cis_1_1 + - apache_cis_section1 + tags: + - level 1 + - level 2 + - notscored + - audit + - rule_1.1 + +- name: "NOTSCORED | 1.2 | AUDIT | Ensure the Server Is Not a Multi-Use System" + block: + - name: "NOT SCORED | 1.2 | AUDIT | Ensure the Server Is Not a Multi-Use System | Gather running services for review" + command: systemctl list-units --type=service --no-pager + changed_when: false + failed_when: false + register: apache_ubuntu_1_2_running_services + + - name: "NOTSCORED | 1.2 | AUDIT | Ensure the Server Is Not a Multi-Use System | Display running services" + debug: + msg: + - "Warning! Below are the running services. Please review" + - "Servers should only be single service" + - "{{ apache_ubuntu_1_2_running_services.stdout_lines }}" + when: + - apache_cis_1_2 + - apache_cis_section1 + tags: + - level1 + - level2 + - notscored + - audit + - rule_1.2 + +- name: "NOTSCORED | 1.3 | PATCH | Ensure Apache Is Installed From the Appropriate Binaries" + apt: + name: apache2 + state: present + when: + - apache_cis_1_1 + - apache_cis_section1 + - apache_cis_disruption_high + tags: + - level1 + - level2 + - notscored + - audit + - rule_1.3 + +# Section 2 Fixes +- name: "NOTSCORED | 2.1 | AUDIT | Ensure Only Necessary Authentication and Authorization Modules Are Enabled" + block: + - name: "NOT SCORED | 2.1 | AUDIT | Ensure Only Necessary Authentication and Authorization Modules Are Enabled | Capture Auth modules" + shell: apache2ctl -M + changed_when: false + failed_when: false + register: apache_ubuntu_2_1_loaded_modules + + - name: "NOTSCORED | 2.1 | AUDIT | Ensure Only Necessary Authentication and Authorization Modules Are Enabled | Display Modules" + debug: + msg: + - "Warning! Below are the installed modules. Please review and remove any un-needed modules" + - "Auth Modules:" + - "{{ apache_ubuntu_2_1_loaded_modules.stdout_lines }}" + when: + - apache_cis_2_1 + - apache_cis_section2 + tags: + - level1 + - level2 + - notscored + - audit + - rule_2.1 + +- name: "SCORED | 2.2 | PATCH | Ensure the Log Config Module Is Enabled" + block: + - name: "SCORED | 2.2 | PATCH | Ensure the Log Config Module Is Enabled | Check to see if mod_log_config already exists" + shell: apachectl -l | grep mod_log_config + changed_when: false + failed_when: false + register: apache_ubuntu_2_2_mod_log_config_state + + - name: "SCORED | 2.2 | PATCH | Ensure the Log Config Module Is Enabled | Enable mod_log_config if not enabled" + command: a2enmod mod_log_config + changed_when: apache_ubuntu_2_2_mod_log_config_enabled.rc == 0 + failed_when: apache_ubuntu_2_2_mod_log_config_enabled.rc >=1 + register: apache_ubuntu_2_2_mod_log_config_enabled + when: apache_ubuntu_2_2_mod_log_config_state == "" + when: + - apache_cis_2_2 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.2 + +- name: "SCORED | 2.3 | PATCH | Ensure the WebDAV Modules Are Disabled | Check for enabled WebDAV Module" + apache2_module: + name: "{{ item }}" + state: absent + force: yes + with_items: + - dav_fs + - dav + - dav_lock + notify: restart apache2 + when: + - apache_cis_2_3 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.3 + +- name: "SCORED | 2.4 | PATCH | Ensure the Status Module Is Disabled" + apache2_module: + name: status + state: absent + force: yes + notify: restart apache2 + when: + - apache_cis_2_4 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.4 + +- name: "SCORED | 2.5 | PATCH | Ensure the Autoindex Module Is Disabled" + apache2_module: + name: autoindex + state: absent + force: yes + notify: restart apache2 + when: + - apache_cis_2_5 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.5 + +- name: "SCORED | 2.6 | PATCH | Ensure the Proxy Modules Are Disabled" + apache2_module: + name: "{{ item }}" + state: absent + force: yes + with_items: + - proxy_ajp + - proxy_balancer + - proxy + - proxy_connect + - proxy_express + - proxy_fcgi + - proxy_fdpass + - proxy_ftp + - proxy_hcheck + - proxy_html + - proxy_http2 + - proxy_http + - proxy_scgi + - proxy_uwsgi + - proxy_wstunnel + notify: restart apache2 + when: + - apache_cis_2_6 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.6 + +- name: "SCORED | 2.7 | PATCH | Ensure the User Directories Module Is Disabled" + apache2_module: + name: userdir + state: absent + force: yes + notify: restart apache2 + when: + - apache_cis_2_7 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.7 + +- name: "SCORED | 2.8 | PATCH | Ensure the Info Module Is Disabled" + apache2_module: + name: info + state: absent + force: yes + notify: restart apache2 + when: + - apache_cis_2_8 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.8 + +- name: "SCORED | 2.9 | PATCH | Ensure the Basic and Digest Authentication Modules are Disabled" + apache2_module: + name: "{{ item }}" + state: absent + force: yes + with_items: + - auth_basic + - auth_digest + notify: restart apache2 + when: + - apache_cis_2_9 + - apache_cis_section2 + tags: + - level1 + - level2 + - scored + - patch + - rule_2.9 + +# Section 3 Fixes +# ---------------------------- +# ----------------------------- +# The service is stopped because if the service is running with the user you are trying to set the options for the task will fail since the user is associated with a process +# ------------------------------ +# ----------------------------- +- name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User" + block: + - name: "SCORED | 3.1 | AUDIT | Ensure the Apache Web Server Runs As a Non-Root User | Get configured user block" + block: + - name: "SCORED | 3.1 | AUDIT | Ensure the Apache Web Server Runs As a Non-Root User | Get configured user" + shell: ps aux | grep apache + changed_when: false + failed_when: false + register: apache_ubuntu_3_1_configured_user_group + + - name: "SCORED | 3.1 | AUDIT | Ensure the Apache Web Server Runs As a Non-Root User | Warn about user" + debug: + msg: + - "Warning! Below is the user and group for the apache service" + - "Please review to confirm it is a non-root user" + - "This task did not change the user information due to apache_cis_disruption_high being set to false" + - "Set that parameter to true and this task will create the user/group and configure as needed" + - "{{ apache_ubuntu_3_1_configured_user_group.stdout_lines }}" + when: not apache_cis_disruption_high + + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User" + block: + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Stop apache service" + service: + name: apache2 + state: stopped + + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Create Apache group" + group: + name: "{{ apache_ubuntu_group }}" + system: yes + state: present + + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Create Apache user" + user: + name: "{{ apache_ubuntu_user }}" + system: yes + group: "{{ apache_ubuntu_group }}" + home: /var/www + shell: /sbin/nologin + + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Add user to configs" + replace: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - { regexp: 'User .*', replace: 'User {{ apache_ubuntu_user }}' } + - { regexp: 'Group .*', replace: 'Group {{ apache_ubuntu_group }}' } + - name: "SCORED | 3.1 | PATCH | Ensure the Apache Web Server Runs As a Non-Root User | Start apache service" + service: + name: apache2 + state: started + when: apache_cis_disruption_high + when: + - apache_cis_3_1 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.1 + +- name: "SCORED | 3.2 | PATCH | Ensure the Apache User Account Has an Invalid Shell" + user: + name: "{{ apache_ubuntu_user }}" + shell: /sbin/nologin + when: + - apache_cis_3_2 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.2 + +- name: "SCORED | 3.3 | PATCH | Ensure the Apache User Account Is Locked" + user: + name: "{{ apache_ubuntu_user }}" + password_lock: yes + when: + - apache_cis_3_3 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.3 + +- name: "SCORED | 3.4 | PATCH | Ensure Apache Directories and Files Are Owned By Root" + file: + name: "{{ apache2_cis_server_root_dir }}" + owner: root + recurse: yes + when: + - apache_cis_3_4 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.4 + +- name: "SCORED | 3.5 | PATCH | Ensure the Group Is Set Correctly on Apache Directories and Files" + file: + name: "{{ apache2_cis_server_root_dir }}" + group: root + recurse: yes + when: + - apache_cis_3_5 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.5 + +- name: "SCORED | 3.6 | PATCH | Ensure Other Write Access on Apache Directories and Files Is Restricted" + file: + name: "{{ apache2_cis_server_root_dir }}" + mode: o-w + recurse: yes + when: + - apache_cis_3_6 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.6 + +- name: "SCORED | 3.7 | PATCH | Ensure the Core Dump Directory Is Secured" + block: + - name: "SCORED | 3.7 | AUDIT | Ensure the Core Dump Directory Is Secured | Find if CoreDumpDirectory is used" + shell: cat {{ apache2_cis_server_root_dir }}/apache2.conf | grep "CoreDumpDirectory" | cut -f2 -d " " + changed_when: false + failed_when: false + register: apache_ubuntu_3_7_coredumpdirectory + + - name: "SCORED | 3.7 | AUDIT | Ensure the Core Dump Directory Is Secured | Message if CoreDumpDirectory is Web Document Root Dir" + debug: + msg: "WARNING!! Your CoreDumpDirectory is using the Web Document Root directory: {{ apache_ubuntu_3_7_coredumpdirectory.stdout }}" + when: + - apache_ubuntu_3_7_coredumpdirectory.stdout == apache2_cis_doc_root_dir + + - name: "SCORED | 3.7 | PATCH | Ensure the Core Dump Directory Is Secured | Set CoreDumpDirectory in apache2.conf" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^CoreDumpDirectory ' + line: 'CoreDumpDirectory {{ apache_cis_core_dump_location }}' + notify: restart apache2 + when: apache_ubuntu_3_7_coredumpdirectory.stdout == "" + + - name: "SCORED | 3.7 | PATCH | Ensure the Core Dump Directory Is Secured" + file: + name: "{{ apache_cis_core_dump_location }}" + owner: root + group: apache + mode: o-rwx + recurse: yes + when: + - apache_cis_3_7 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.7 + +# LockFile is no longer the valid parameter, it was replaced with Mutex File +- name: "SCORED | 3.8 | PATCH | Ensure the Lock File Is Secured" + block: + - name: "SCORED | 3.8 | PATCH | Ensure the Lock File Is Secured | Add LockFile directive if needed" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^LockFile|^Mutex File' + line: 'Mutex File:{{ apache2_cis_lockfile_location }} default' + insertafter: '# Supplemental configuration' + notify: restart apache2 + + - name: "SCORED | 3.8 | PATCH | Ensure the Lock File Is Secured | Create LockFile in new location and set permissions if does not exist" + file: + path: "{{ apache2_cis_lockfile_location }}/apache2.lock" + owner: root + group: root + mode: '0750' + state: touch + + - name: "SCORED | 3.8 | PATCH | Ensure the Lock File Is Secured | Set LockFile permissions" + file: + path: "{{ apache2_cis_lockfile_location }}/apache2.lock" + owner: root + group: root + mode: '0750' + when: + - apache_cis_3_8 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.8 + +- name: "SCORED | 3.9 | PATCH | Ensure the Pid File Is Secured" + block: + - name: "SCORED | 3.9 | AUDIT | Ensure the Pid File Is Secured | Get PID location" + shell: find / -name apache2.pid + changed_when: false + failed_when: false + register: apache_ubuntu_3_9_apache2_pid_loc + + - name: "SCORED | 3.9 | PATCH | Ensure the Pid File Is Secured | Set permissions" + file: + path: "{{ apache_ubuntu_3_9_apache2_pid_loc.stdout }}" + owner: root + group: root + mode: o-w,g-w + + - name: "SCORED | 3.9 | AUDIT | Ensure the Pid File Is Secured | Warn if PID is in DocumentRoot dir" + debug: + msg: + - "WARNING!! Your PID file is in the DocumentRoot directory, to confirm with this STIG control" + - "please move to another folder that is not within the Document root directory ({{ apache2_cis_doc_root_dir.stdout }})" + when: apache2_cis_doc_root_dir == apache_ubuntu_3_9_apache2_pid_loc.stdout + when: + - apache_cis_3_9 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.9 + +# ----------------------------- +# ----------------------------- +# Missing last step to confirm ScoreBoardFile location is not on an externally mounted NFS system. Need to review how to do that +# ----------------------------- +# ----------------------------- +- name: "SCORED | 3.10 | PATCH | Ensure the ScoreBoard File Is Secured" + block: + - name: "SCORED | 3.10 | AUDIT | Ensure the ScoreBoard File Is Secured | Check if ScoreBoardFile is in use" + shell: cat {{ apache2_cis_server_root_dir }}/apache2.conf | grep ScoreBoardFile | cut -f2 -d" " + changed_when: false + failed_when: false + register: apache_ubuntu_3_10_scoreboardfile + + - name: "SCORED | 3.10 | AUDIT | Ensure the ScoreBoard File Is Secured | Warn if the same as DocumentRoot" + debug: + msg: + - "WARNING!! The ScoreBoardFile parameter is in use using the DocumentRoot location" + - "This does not conform to CIS standars. Please change the ScoreBoardFile location in {{ apache2_cis_server_root_dir }}/apache2.conf" + when: + - '"{{ apache2_cis_doc_root_dir }}" in apache_ubuntu_3_10_scoreboardfile.stdout' + + - name: "SCORED | 3.10 | PATCH | Ensure the ScoreBoard File Is Secured | Update file permissions" + file: + path: "{{ apache_ubuntu_3_10_scoreboardfile.stdout }}" + owner: root + group: root + mode: o-w,g-w + when: apache_ubuntu_3_10_scoreboardfile.stdout != "" + when: + - apache_cis_3_10 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.10 + - notimplimented + +- name: "SCORED | 3.11 | PATCH | Ensure Group Write Access for the Apache Directories and Files Is Properly Restricted" + file: + path: "{{ apache2_cis_server_root_dir }}" + mode: g-w + recurse: yes + when: + - apache_cis_3_11 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.11 + +- name: "SCORED | 3.12 | PATCH | Ensure Group Write Access for the Document Root Directories and Files Is Properly Restricted" + file: + path: "{{ apache2_cis_doc_root_dir }}" + mode: g-w + recurse: yes + when: + - apache_cis_3_12 + - apache_cis_section3 + tags: + - level1 + - level2 + - scored + - patch + - rule_3.12 + +# ----------------------------- +# ----------------------------- +# Control 3.13 Not Implemented +# ----------------------------- +# ----------------------------- + +- name: "NOTSCORED | 3.13 | PATCH | Ensure Access to Special Purpose Application Writable Directories is Properly Restricted" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_3_13 + - apache_cis_section3 + tags: + - level1 + - level2 + - notscored + - patch + - rule_3.13 + - notimplimented + +# Section 4 Fixes +- name: "SCORED | 4.1 | PATCH | Ensure Access to OS Root Directory Is Denied By Default" + block: + - name: "SCORED | 4.1 | AUDIT | Ensure Access to OS Root Directory Is Denied By Default | Get Root Directory" + shell: cat {{ apache2_cis_server_root_dir }}/apache2.conf | sed -n '//,/<\/Directory/p' + changed_when: false + failed_when: false + register: apache_ubuntu_4_1_root_directory + + - name: "SCORED | 4.1 | PATCH | Ensure Access to OS Root Directory Is Denied By Default | Repace Require if exists" + replace: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: 'Require .*' + replace: 'Require all denied' + after: '' + before: '' + notify: restart apache2 + when: '"Require" in apache_ubuntu_4_1_root_directory.stdout' + + - name: "SCORED | 4.1 | PATCH | Ensure Access to OS Root Directory Is Denied By Default | Enter Require if it doesn't exist" + lineinfile: + path: "{{ apache_cis_server_root_dir }}/apache2.conf" + regexp: '' + line: | + + Require all denied + notify: restart apache2 + when: '"Require" not in apache_ubuntu_4_1_root_directory.stdout' + when: + - apache_cis_4_1 + - apache_cis_section4 + tags: + - level1 + - level2 + - patch + - rule_4.1 + +# ----------------------------- +# ----------------------------- +# Control 4.2 requires dynamic changes to all and elements. Need to figure out best lineinfile way to handle this +# ----------------------------- +# ----------------------------- + +- name: "NOTSCORED | 4.2 | PATCH | Ensure Appropriate Access to Web Content Is Allowed" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_4_2 + - apache_cis_section4 + tags: + - level1 + - level2 + - scored + - patch + - rule_4.2 + - notimplimented + +- name: "SCORED | 4.3 | PATCH | Ensure OverRide Is Disabled for the OS Root Directory" + block: + - name: "SCORED | 4.3 | AUDIT | Ensure OverRide Is Disabled for the OS Root Directory | Get Root Directory" + shell: cat {{ apache2_cis_server_root_dir }}/apache2.conf | sed -n '//,/<\/Directory/p' + changed_when: false + failed_when: false + register: apache_ubuntu_4_3_root_directory + + - name: "SCORED | 4.3 | PATCH | Ensure OverRide Is Disabled for the OS Root Directory | Repace AllowOverride if exists" + replace: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: 'AllowOverride .*' + replace: 'AllowOverride None' + after: '' + before: '' + notify: restart apache2 + when: '"AllowOverride" in apache_ubuntu_4_3_root_directory.stdout' + + - name: "SCORED | 4.3 | PATCH | Ensure OverRide Is Disabled for the OS Root Directory | Enter AllowOverride if it doesn't exist" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '' + line: | + + AllowOverride None + notify: restart apache2 + when: '"AllowOverride" not in apache_ubuntu_4_3_root_directory.stdout' + + - name: "SCORED | 4.3 | PATCH | Ensure OverRide Is Disabled for the OS Root Directory | Remove AllowOverrideList element" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: 'AllowOverrideList .*' + state: absent + notify: restart apache2 + when: + - apache_cis_4_3 + - apache_cis_section4 + tags: + - level1 + - level2 + - patch + - rule_4.3 + +- name: "SCORED | 4.4 | PATCH | Ensure OverRide Is Disabled for All Directories" + block: + - name: "SCORED | 4.4 | PATCH | Ensure OverRide Is Disabled for All Directories | Set AllowOverride to None" + replace: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: 'AllowOverride.*' + replace: 'AllowOverride None' + notify: restart apache2 + + - name: "SCORED | 4.4 | PATCH | Ensure OverRide Is Disabled for All Directories | Remove AllowOverrideList" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^ AllowOverrideList' + state: absent + notify: restart apache2 + when: + - apache_cis_4_4 + - apache_cis_section4 + tags: + - level1 + - level2 + - scored + - patch + - rule_4.4 + +# Section 5 Fixes +- name: "SCORED | 5.1 | PATCH | Ensure Options for the OS Root Directory Are Restricted" + block: + - name: "SCORED | 5.1 | AUDIT | Ensure Options for the OS Root Directory Are Restricted | Get Root Directory Settings" + shell: cat {{ apache2_cis_server_root_dir }}/apache2.conf | sed -n '//,/<\/Directory>/p' + changed_when: false + failed_when: false + register: apache_ubuntu_5_1_root_directory + + - name: "SCORED | 5.1 | PATCH | Ensure Options for the OS Root Directory Are Restricted | Replace if Options exist" + replace: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: 'Options.*' + replace: 'Options None' + after: '' + before: '' + notify: restart apache2 + when: '"Options" in apache_ubuntu_5_1_root_directory.stdout' + + - name: "SCORED | 5.1 | PATCH | Ensure Options for the OS Root Directory Are Restricted | Enter Options if doesn't exist" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '' + line: | + + Options None + notify: restart apache2 + when: '"Options" not in apache_ubuntu_5_1_root_directory.stdout' + when: + - apache_cis_5_1 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.1 + +# ----------------------------- +# ----------------------------- +# Dynamic directory testing setup. This control is not implemented +# ----------------------------- +# ----------------------------- +- name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted" + block: + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Get Document Root vHost settings" + raw: cat /etc/apache2/apache2.conf | sed -n "//,/<\/Directory>/p" + changed_when: false + failed_when: false + register: apache_rhel_5_2_vdir_doc_settings + + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Get Document Root vHost settings" + raw: cat /etc/apache2/apache2.conf + changed_when: false + failed_when: false + register: test + + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Escape path slashes" + set_fact: + apache_rhel_5_2_doc_root_dir: '{{ apache2_cis_doc_root_dir | replace("/","\/") }}' + + # - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Escape path slashes" + # set_fact: + # test_after: "{{ test | regex_search('([\\s\\S]*?)<\/Directory>', multiline=True) }}" + - debug: var=apache_ubuntu_5_2_vdir_doc_settings + - debug: var=apache_ubuntu_5_2_doc_root_dir + - debug: var=test_after + + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Set options to None or Multiviews if options does not exist" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/conf/httpd.conf" + line: ' Options {{ apache_cis_webrootdir_options }}' + insertafter: '^' + when: "'Options' not in apache_ubuntu_5_2_vdir_doc_settings.stdout" + + - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Get Document Root vHost settings" + shell: cat /etc/httpd/conf/httpd.conf | sed -n "//,/<\/Directory>/p" + changed_when: false + failed_when: false + register: apache_ubuntu_5_2_vdir_doc_settings_2222222 + + - debug: var=apache_ubuntu_5_2_vdir_doc_settings_2222222 + # - name: "SCORED | 5.2 | PATCH | Ensure Options for the Web Root Directory Are Restricted | Set options to None or Multiviews if options does not exist" + # replace: + # path: "{{ apache_cis_server_root_dir }}" + # replace: ' Options.*' + # replace: ' Options {{ apache_cis_webrootdir_options }}' + # after: '^' + # before: '^' + when: + - apache_cis_5_2 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.2 + - notimplimented + +# ----------------------------- +# ----------------------------- +# Control 5.3/5.4requires dynamic changes to all elements. Need to figure out best lineinfile way to handle this +# ----------------------------- +# ----------------------------- +- name: "SCORED | 5.3 | PATCH | Ensure Options for Other Directories Are Minimized" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_5_3 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.3 + - notimplimented + +- name: "SCORED | 5.4 | PATCH | Ensure Default HTML Content Is Removed" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_5_4 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.4 + - notimplimented + +- name: "SCORED | 5.5 | PATCH | Ensure the Default CGI Content printenv Script Is Removed" + block: + - name: "SCORED | 5.5 | AUDIT | Ensure the Default CGI Content printenv Script Is Removed | Get CGI folder" + shell: cat {{ apache2_cis_server_root_dir }}/apache2.conf | grep -i "cgi-bin/" | grep -v "#" | cut -f2 -d'"' + changed_when: false + failed_when: false + register: apache_ubuntu_5_5_cgi_bin_dir + + - name: "SCORED | 5.5 | PATCH | Ensure the Default CGI Content printenv Script Is Removed | Remove printenv" + file: + path: "{{ item }}/printenv" + state: absent + with_items: + - "{{ apache_ubuntu_5_5_cgi_bin_dir.stdout_lines }}" + when: apache_ubuntu_5_5_cgi_bin_dir.stdout != "" + when: + - apache_cis_5_5 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.5 + +- name: "SCORED | 5.6 | PATCH | Ensure the Default CGI Content test-cgi Script Is Removed" + block: + - name: "SCORED | 5.6 | AUDIT | Ensure the Default CGI Content test-cgi Script Is Removed | Get CGI folder" + shell: cat {{ apache2_cis_server_root_dir }}/conf/httpd.conf | grep -i "cgi-bin/" | grep -v "#" | cut -f2 -d'"' + changed_when: false + failed_when: false + register: apache_ubuntu_5_6_cgi_bin_dir + + - name: "SCORED | 5.6 | PATCH | Ensure the Default CGI Content test-cgi Script Is Removed | Remove test-cgi folder" + file: + path: "{{ item }}/test-cgi" + state: absent + with_items: + - "{{ apache_ubuntu_5_6_cgi_bin_dir.stdout_lines }}" + when: apache_ubuntu_5_6_cgi_bin_dir.stdout != "" + when: + - apache_cis_5_6 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.6 + +# ----------------------------- +# ----------------------------- +# Control 5.7requires dynamic changes to all elements. Need to figure out best lineinfile way to handle this +# ----------------------------- +# ----------------------------- +- name: "SCORED | 5.7 | PATCH | Ensure HTTP Request Methods Are Restricted" + block: + - name: "SCORED | 5.7 | PATCH | Ensure HTTP Request Methods Are Restricted" + shell: 'cat /etc/apache2/apache2.conf | sed -n "//,/<\/Directory>/p"' + changed_when: false + failed_when: false + register: apache_ubuntu_5_7_vdir_doc_settings + when: + - apache_cis_5_7 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.7 + - notimplimented + +- name: "SCORED | 5.8 | PATCH | Ensure the HTTP TRACE Method Is Disabled" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: 'TraceEnable.*' + line: 'TraceEnable Off' + notify: restart apache2 + when: + - apache_cis_5_8 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.8 + +- name: "SCORED | 5.9 | PATCH | Ensure Old HTTP Protocol Versions Are Disallowed" + block: + - name: "SCORED | 5.9 | PATCH | Ensure Old HTTP Protocol Versions Are Disallowed | Enable re-write module" + apache2_module: + name: rewrite + state: present + force: yes + notify: restart apache2 + + - name: "SCORED | 5.9 | AUDIT | Ensure Old HTTP Protocol Versions Are Disallowed | Check for .htaccess file" + stat: + path: "{{ apache2_cis_doc_root_dir }}/html/.htaccess" + register: apache_ubuntu_5_9_htaccess_state + + - name: "SCORED | 5.9 | PATCH | Ensure Old HTTP Protocol Versions Are Disallowed | Create .htaccess file" + blockinfile: + path: "{{ apache2_cis_doc_root_dir }}/html/.htaccess" + marker: "# {mark} document root settings ANSIBLE MANAGED BLOCK" + create: yes + block: | + + Options Indexes FollowSymLinks + AllowOverride None + Require all granted + + notify: restart apache2 + when: not apache_ubuntu_5_9_htaccess_state.stat.exists + + - name: "SCORED | 5.9 | PATCH | Ensure Old HTTP Protocol Versions Are Disallowed | Set re-write settings" + lineinfile: + path: "{{ apache2_cis_doc_root_dir }}/html/.htaccess" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + notify: restart apache2 + with_items: + - { regexp: 'RewriteEngine .*', line: 'RewriteEngine On' } + - { regexp: 'RewriteOptions .*', line: 'RewriteOptions Inherit'} + - { regexp: 'RewriteCond .*', line: 'RewriteCond %{THE_REQUEST} !HTTP/1\.1$' } + - { regexp: 'RewriteRule .*', line: 'RewriteRule .* - [F]' } + when: + - apache_cis_5_9 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.9 + +- name: "SCORED | 5.10 | PATCH | Ensure Access to .ht* Files Is Restricted" + block: + - name: "SCORED | 5.10 | AUDIT | Ensure Access to .ht* Files Is Restricted | Does FilesMatch exist" + shell: cat {{ apache2_cis_server_root_dir }}/apache2.conf | grep -ozP '([\S\s]*?)<\/FilesMatch>' + changed_when: false + failed_when: false + register: apache_ubuntu_5_10_files_match_param + + - name: "SCORED | 5.10 | PATCH | Ensure Access to .ht* Files Is Restricted | Replace Require all setting" + replace: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: 'Require.*' + replace: 'Require all denied' + after: '' + before: '' + notify: restart apache2 + when: '"Require" in apache_ubuntu_5_10_files_match_param.stdout' + + - name: "SCORED | 5.10 | PATCH | Ensure Access to .ht* Files Is Restricted | Add Require setting if missing" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '' + line: | + + Require all denied + notify: restart apache2 + when: '"Require" not in apache_ubuntu_5_10_files_match_param.stdout and apache_cis_5_10_files_match_param.stdout != ""' + + - name: "SCORED | 5.10 | PATCH | Ensure Access to .ht* Files Is Restricted | Add FilesMatch settings" + blockinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + marker: "# {mark} FilesMatch ht Setting ANSIBLE MANAGED BLOCK" + block: | + + Require all denied + + notify: restart apache2 + when: apache_ubuntu_5_10_files_match_param.stdout == "" + when: + - apache_cis_5_10 + - apache_cis_section5 + tags: + - level1 + - level2 + - scored + - patch + - rule_5.10 + +- name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted" + block: + - name: "SCORED | 5.11 | AUDIT | Ensure Access to Inappropriate File Extensions Is Restricted | Gather file extentions" + shell: find {{ apache2_cis_doc_root_dir }} -type f -name '*.*' | awk -F. '{print $NF }' | sort -u + changed_when: false + failed_when: false + register: apache_ubuntu_5_11_file_extentions + + - name: "SCORED | 5.11 | AUDIT | Ensure Access to Inappropriate File Extensions Is Restricted | Find FilesMatch for all files" + shell: cat {{ apache2_cis_server_root_dir }}/apache2.conf | grep -ozP '([\S\s]*?)<\/FilesMatch>' + changed_when: false + failed_when: false + register: apache_ubuntu_5_11_files_match_1 + + - name: "SCORED | 5.11 | AUDIT | Ensure Access to Inappropriate File Extensions Is Restricted | Find FilesMatch for specified files" + shell: cat {{ apache2_cis_server_root_dir }}/apache2.conf | grep -ozP '([\S\s]*?)<\/FilesMatch>' + changed_when: false + failed_when: false + register: apache_ubuntu_5_11_files_match_2 + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Replace Require in FilesMatch for all files" + replace: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: 'Require.*' + replace: 'Require all denied' + after: '' + before: '' + notify: restart apache2 + when: '"Require" in apache_ubuntu_5_11_files_match_1.stdout' + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Add Require in FilesMatch for all files" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '' + line: | + + Require all denied + notify: restart apache2 + when: '"Require" not in apache_ubuntu_5_11_files_match_1.stdout and apache_ubuntu_5_11_files_match_1.stdout != ""' + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Add FilesMatch for all files if missing" + blockinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + marker: "# {mark} FilesMatch All Settings ANSIBLE MANAGED BLOCK" + block: | + # Block all files by default, unless specifically allowed. + + Require all denied + + notify: restart apache2 + when: apache_ubuntu_5_11_files_match_1.stdout == "" + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Set allowed file types" + replace: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '' + replace: '' + notify: restart apache2 + when: '"' + before: '' + notify: restart apache2 + when: '"Require" in apache_ubuntu_5_11_files_match_2.stdout' + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Add Require for allowed file types" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '' + line: | + + Require all granted + notify: restart apache2 + when: '"Require" not in apache_ubuntu_5_11_files_match_2.stdout and apache_ubuntu_5_11_files_match_2.stdout != ""' + + - name: "SCORED | 5.11 | PATCH | Ensure Access to Inappropriate File Extensions Is Restricted | Add FilesMatch for allowed files if missing" + blockinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + marker: "# {mark} FilesMatch File Type Settings ANSIBLE MANAGED BLOCK" + block: | + # Allow files with specifically approved file extensions + # Such as (css, htm; html; js; pdf; txt; xml; xsl; ...), + # images (gif; ico; jpeg; jpg; png; ...), multimedia + + Require all granted + + notify: restart apache2 + when: apache_ubuntu_5_11_files_match_2.stdout == "" + + - name: "SCORED | 5.11 | AUDIT | Ensure Access to Inappropriate File Extensions Is Restricted | Display file extensions" + debug: + msg: + - "Alert!! Below are the file extensions in use. Please review to make sure they are all approved" + - "{{ apache_ubuntu_5_11_file_extentions.stdout_lines }}" + when: + - apache_cis_5_11 + - apache_cis_section5 + tags: + - level2 + - scored + - patch + - rule_5.11 + +- name: "SCORED | 5.12 | PATCH | Ensure IP Address Based Requests Are Disallowed" + block: + - name: "SCORED | 5.12 | PATCH | Ensure IP Address Based Requests Are Disallowed | Enable re-write module" + apache2_module: + name: rewrite + state: present + force: yes + notify: restart apache2 + + - name: "SCORED | 5.12 | PATCH | Ensure IP Address Based Requests Are Disallowed | Check for htaccess file" + stat: + path: "{{ apache2_cis_doc_root_dir }}/html/.htaccess" + register: apache_ubuntu_5_12_htaccess_state + - debug: var=apache_ubuntu_5_12_htaccess_state + + - name: "SCORED | 5.12 | PATCH | Ensure IP Address Based Requests Are Disallowed | Create .htaccess file if needed" + blockinfile: + path: "{{ apache2_cis_doc_root_dir }}/html/.htaccess" + marker: "# {mark} document root settings ANSIBLE MANAGED BLOCK" + create: yes + block: | + + Options Indexes FollowSymLinks + AllowOverride None + Require all granted + + notify: restart apache2 + when: not apache_ubuntu_5_12_htaccess_state.stat.exists + + - name: "SCORED | 5.12 | PATCH | Ensure IP Address Based Requests Are Disallowed | Set re-write settings" + lineinfile: + path: "{{ apache2_cis_doc_root_dir }}/html/.htaccess" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + notify: restart apache2 + with_items: + - { regexp: 'RewriteEngine .*', line: 'RewriteEngine On'} + - { regexp: 'RewriteCond %{HTTP_HOST}.*', line: 'RewriteCond %{HTTP_HOST} !^{{ apache_cis_toplevel_svr }} [NC]' } + - { regexp: 'RewriteCond %{REQUEST_URI}.*', line: 'RewriteCond %{REQUEST_URI} !^/error [NC]' } + - { regexp: 'RewriteRule \^\.\(\.\*\).*', line: 'RewriteRule ^.(.*) - [L,F]' } + when: + - apache_cis_5_12 + - apache_cis_section5 + tags: + - level2 + - scored + - patch + - rule_5.12 + +- name: "SCORED | 5.13 | PATCH | Ensure the IP Addresses for Listening for Requests Are Specified" + block: + - name: "SCORED | 5.13 | PATCH | Ensure the IP Addresses for Listening for Requests Are Specified | Remove Listen" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/ports.conf" + regexp: '^Listen.*' + state: absent + notify: restart apache2 + + - name: "SCORED | 5.13 | PATCH | Ensure the IP Addresses for Listening for Requests Are Specified" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/ports.conf" + line: 'Listen {{ item }}' + insertafter: '# prevent Apache from glomming onto all bound IP addresses' + with_items: + - "{{ apache_cis_listen_ip_port }}" + notify: restart apache2 + when: + - apache_cis_5_13 + - apache_cis_section5 + tags: + - level2 + - scored + - patch + - rule_5.13 + +- name: "SCORED | 5.14 | PATCH | Ensure Browser Framing Is Restricted" + block: + - name: "SCORED | 5.14 | PATCH | Ensure Browser Framing Is Restricted | Enable headers module" + apache2_module: + name: headers + state: present + force: yes + notify: restart apache2 + + - name: "SCORED | 5.14 | PATCH | Ensure Browser Framing Is Restricted | Set header settings" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/conf-enabled/security.conf" + regexp: '^Header.*' + line: 'Header always append X-Frame-Options SAMEORIGIN' + insertafter: '# Supplemental configuration' + notify: restart apache2 + when: + - apache_cis_5_14 + - apache_cis_section5 + tags: + - level2 + - scored + - patch + - rule_5.14 + +# Section 6 Fixes +- name: | + "SCORED | 6.1 | PATCH | Ensure the Error Log Filename and Severity Level Are Configured Correctly" + "SCORED | 6.2 | PATCH | Ensure a Syslog Facility Is Configured for Error Logging" + block: + - name: "SCORED | 6.1 | PATCH | Ensure the Error Log Filename and Severity Level Are Configured Correctly | Add LogLevel setting" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^LogLevel' + line: 'LogLevel {{ apache_cis_loglevel.all_mods }} core:{{ apache_cis_loglevel.core_mod }}' + notify: restart apache2 + when: + - apache_cis_6_1 + + - name: "SCORED | 6.1 | PATCH | Ensure the Error Log Filename and Severity Level Are Configured Correctly | Add ErrorLog path" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^ErrorLog' + line: 'ErrorLog "{{ apache_cis_errorlog_path }}/error.log"' + notify: restart apache2 + when: + - apache_cis_6_1 + - not apache_cis_6_2 + + - name: "SCORED | 6.2 | PATCH | Ensure a Syslog Facility Is Configured for Error Logging | Add ErrorLog path set to system:local1" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^ErrorLog' + line: 'ErrorLog "syslog:{{ apache_cis_errorlog_facility }}"' + notify: restart apache2 + when: + - apache_cis_6_2 + - not apache_cis_6_1 + + - name: | + "SCORED | 6.1 | PATCH | Ensure the Error Log Filename and Severity Level Are Configured Correctly | Add combined ErrorLog" + "SCORED | 6.2 | PATCH | Ensure a Syslog Facility Is Configured for Error Logging | Add combined ErrorLog" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^ErrorLog.*' + line: 'ErrorLog "{{ apache_cis_errorlog_path }} syslog:{{ apache_cis_errorlog_facility }}"' + notify: restart apache2 + when: + - apache_cis_6_1 + - apache_cis_6_2 + + - name: "SCORED | 6.1 | AUDIT | Ensure the Error Log Filename and Severity Level Are Configured Correctly | Alert about virtual Directories" + debug: + msg: + - "Caution!! If you are using virutal directories please add the ErrorLog directive." + - "Each responsible individual or organization needs to access their own web logs and and needs the" + - "skills/training/tools for monitoring the logs." + when: + - apache_cis_6_1 or apache_cis_6_2 + - apache_cis_section6 + tags: + - level1 + - level2 + - scored + - patch + - rule_6.1 + - rule_6.2 + +# ----------------------------- +# ----------------------------- +# Control 6.2 combined with control 6.1 +# ----------------------------- +# ----------------------------- + +- name: "SCORED | 6.3 | PATCH | Ensure the Server Access Log Is Configured Correctly" + block: + - name: "SCORED | 6.3 | PATCH | Ensure the Server Access Log Is Configured Correctly | Remove LogFormat" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^LogFormat .*' + state: absent + notify: restart apache2 + + - name: "SCORED | 6.3 | PATCH | Ensure the Server Access Log Is Configured Correctly | Add/Modify LogFormat and CustomLog" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + notify: restart apache2 + with_items: + - { regexp: '^LogFormat "', line: 'LogFormat {{ apache_cis_log_format }} combined' } + - { regexp: '^CustomLog .*', line: 'CustomLog "{{ apache_cis_custom_log }}/access.log" combined' } + + - name: "SCORED | 6.3 | AUDIT | Ensure the Server Access Log Is Configured Correctly | Notify about need for vhost logging" + debug: + msg: + - "Caution!! If you are using virtual directories please add the LogFormat and and CustomLog directives" + - "if you have different people responsible for each web site. Each responsible individual or organization" + - "needs access to their own web logs as well as the skills/training/tools for monitoring the logs" + when: + - apache_cis_6_3 + - apache_cis_section6 + tags: + - level1 + - level2 + - scored + - patch + - rule_6.3 + +- name: "SCORED | 6.4 | PATCH | Ensure Log Storage and Rotation Is Configured Correctly" + block: + - name: "SCORED | 6.4 | PATCH | Ensure Log Storage and Rotation Is Configured Correctly | Edit logrotate.d/httpd" + lineinfile: + path: /etc/logrotate.d/apache2 + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: ' missingok', line: ' missingok', insertafter: '^/var/log/apache2/' } + - { regexp: ' notifempty', line: ' notifempty', insertafter: '^/var/log/apache2/' } + - { regexp: ' sharedscripts', line: ' sharedscripts', insertafter: '^/var/log/apache2/' } + - { regexp: ' postrotate', line: ' postrotate', insertafter: '^/var/log/apache2/' } + - { regexp: ' /bin', line: " /bin/kill -HUP 'cat /var/run/apache2/apache2.pid 2>/dev/null' 2> /dev/null || true", insertafter: ' postrotate' } + + - name: "SCORED | 6.4 | PATCH | Ensure Log Storage and Rotation Is Configured Correctly | Edit logrotate.conf" + lineinfile: + path: /etc/logrotate.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: '^# rotate log files ', line: '# rotate log files weekly', insertafter: '^# see' } + - { regexp: '^daily|weekly|monthly|yearly', line: 'weekly', insertafter: '^# rotate log files' } + - { regexp: '^# keep ([^\s]+) ([^\s]+) worth of backlogs', line: '# keep 13 weeks worth of backlogs', insertafter: '^# see'} + - { regexp: '^rotate', line: 'rotate 13', insertafter: '^# keep'} + when: + - apache_cis_6_4 + - apache_cis_section6 + tags: + - level1 + - level2 + - scored + - patch + - rule_6.4 + +- name: "SCORED | 6.5 | PATCH | Ensure Applicable Patches Are Applied" + apt: + name: "[{{ apache_cis_extra_packages }}]" + state: latest + when: + - apache_cis_6_4 + - apache_cis_section6 + tags: + - level1 + - level2 + - scored + - patch + - rule_6.4 + +- name: "SCORED | 6.6 | PATCH | Ensure ModSecurity Is Installed and Enabled" + block: + - name: "SCORED | 6.6 | PATCH | Ensure ModSecurity Is Installed and Enabled | Install security module" + apt: + name: libapache2-mod-security2 + state: present + + - name: "SCORED | 6.6 | PATCH | Ensure ModSecurity Is Installed and Enabled | Enable security module" + apache2_module: + name: security2 + state: present + force: yes + when: + - apache_cis_6_6 + - apache_cis_section6 + tags: + - level2 + - scored + - patch + - rule_6.6 + +- name: "SCORED | 6.7 | PATCH | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled" + block: + - name: "SCORED | 6.7 | PATCH | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled | Install OWASP RHEL8" + apt: + name: modsecurity-crs + state: present + when: + - apache_cis_owasp_automate + + - name: "SCORED | 6.7 | AUDIT | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled | Create config folder if doesn't exist Configuration" + file: + name: /etc/modsecurity/crs/ + state: directory + when: + - apache_cis_owasp_automate + + - name: "SCORED | 6.7 | AUDIT | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled | Apply Configuration" + template: + src: crs-setup.conf.j2 + dest: "/etc/modsecurity/crs/crs-setup.conf" + when: + - apache_cis_owasp_automate + + - name: "SCORED | 6.7 | AUDIT | Ensure the OWASP ModSecurity Core Rule Set Is Installed and Enabled | Message out when not automated" + debug: + msg: + - "ALERT!!!!To conform to CIS standards you need OWASP installed and enabled" + - "Please go through the process of setup" + when: not apache_cis_owasp_automate + when: + - apache_cis_6_7 + - apache_cis_section6 + tags: + - level2 + - scored + - audit + - rule_6.7 + +# Section 7 Fixes +- name: "SCORED | 7.1 | PATCH | Ensure mod_ssl and/or mod_nss Is Installed" + block: + - name: "SCORED | 7.1 | PATCH | Ensure mod_ssl and/or mod_nss Is Installed | Enable mod_ssl" + apache2_module: + name: ssl + state: present + force: yes + when: + - apache_cis_7_1 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.1 + +# ----------------------------- +# ----------------------------- +# This contral has 13 steps in the remediation. I think this is the simplest way to handle it +# ----------------------------- +# ----------------------------- +- name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed" + block: + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Generate private key" + openssl_privatekey: + path: "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.key" + cipher: aes128 + size: 2048 + passphrase: "{{ apache_cis_privatekey_passphrase }}" + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Convert to clear text" + command: openssl rsa -in /etc/ssl/certs/{{ apache_cis_hostname_cert }}.key -out /etc/ssl/certs/{{ apache_cis_hostname_cert }}.clear -passin pass:{{ apache_cis_privatekey_passphrase }} + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Create CSR folder" + file: + path: "{{ apache_cis_csr_folder }}" + state: directory + mode: '0755' + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Create CSR" + openssl_csr: + path: "{{ apache_cis_csr_folder }}/{{ apache_cis_hostname_cert}}.csr" + privatekey_path: /etc/ssl/certs/{{ apache_cis_hostname_cert }}.key + privatekey_passphrase: "{{ apache_cis_privatekey_passphrase }}" + country_name: "{{ apache_req_distinguished_name_settings.countryName_default }}" + state_or_province_name: "{{ apache_req_distinguished_name_settings.stateOrProvinceName_default }}" + locality_name: "{{ apache_req_distinguished_name_settings.localityName_default }}" + organization_name: "{{ apache_req_distinguished_name_settings.organizationName_default }}" + organizational_unit_name: "{{ apache_req_distinguished_name_settings.organizationalUnitName_default }}" + common_name: "{{ apache_req_distinguished_name_settings.commonName_default }}" + email_address: "{{ apache_req_distinguished_name_settings.email_address }}" + subject_alt_name: "{{ apache_cis_alt_names | list }}" + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Send CSR to Certificate Authority" + openssl_certificate: + path: "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.crt" + privatekey_path: "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.key" + privatekey_passphrase: "{{ apache_cis_privatekey_passphrase }}" + csr_path: "{{ apache_cis_csr_folder }}/{{ apache_cis_hostname_cert }}.csr" + provider: selfsigned + mode: '0444' + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Modify ssl.conf, self signed" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/sites-available/default-ssl.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ SSLCertificateFile', line: ' SSLCertificateFile /etc/ssl/certs/{{ apache_cis_hostname_cert }}.crt' } + - { regexp: '^ SSLCertificateKeyFile', line: ' SSLCertificateKeyFile /etc/ssl/certs/{{ apache_cis_hostname_cert }}.clear' } + notify: restart apache2 + when: not apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Copy custom CA's over" + copy: + src: "{{ item.src }}" + dest: "{{ item.dest }}" + mode: '0444' + with_items: + - { src: "custom_cert/custom_cert_file.crt", dest: "/etc/ssl/certs/custom_cert_file.crt" } + - { src: "custom_cert/custom_cert_key.key", dest: "/etc/ssl/certs/custom_certkey.key" } + when: apache_cis_custom_cert + + - name: "SCORED | 7.2 | PATCH | Ensure a Valid Trusted Certificate Is Installed | Modify ssl.conf, custom cert" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/sites-available/default-ssl.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ SSLCertificateFile', line: '/ssl/certs/custom_cert_file.crt' } + - { regexp: '^ SSLCertificateKeyFile', line: ' SSLCertificateKeyFile /etc/ssl/certs/custom_cert_key.key' } + notify: restart apache2 + when: apache_cis_custom_cert + when: + - apache_cis_7_2 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.2 + +- name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected" + block: + - name: "SCORED | 7.3 | AUDIT | Ensure the Server's Private Key Is Protected | Find config files with SSLCertificateFile parameter" + shell: "grep -r SSLCertificateFile {{ apache2_cis_server_root_dir }}/* | grep -v Binary | cut -f1 -d: | sort --unique" + changed_when: false + failed_when: false + register: apache_ubuntu_7_3_sslcertificatefile_location + + - name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected | Enforce SSLCertificateKeyFile is configured (selfsigned)" + lineinfile: + path: "{{ item }}" + regexp: '^SSLCertificateKeyFile' + line: 'SSLCertificateKeyFile /etc/ssl/certs/{{ apache_cis_hostname_cert }}.clear' + with_items: + - "{{ apache_ubuntu_7_3_sslcertificatefile_location.stdout_lines }}" + notify: restart apache2 + when: not apache_cis_custom_cert + + - name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected | Enforce SSLCertificateKeyFile is configured (custom cert)" + lineinfile: + path: "{{ item }}" + regexp: '^SSLCertificateKeyFile' + line: 'SSLCertificateKeyFile /etc/ssl/certs/custom_cert_key.key' + with_items: + - "{{ apache_ubuntu_7_3_sslcertificatefile_location.stdout_lines }}" + notify: restart apache2 + when: apache_cis_custom_cert + + - name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected | Set permissions on SSLCertificateKeyFile (selfsigned)" + file: + path: "{{ item }}" + owner: root + group: root + mode: '0400' + with_items: + - "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.key" + - "/etc/ssl/certs/{{ apache_cis_hostname_cert }}.clear" + when: not apache_cis_custom_cert + + - name: "SCORED | 7.3 | PATCH | Ensure the Server's Private Key Is Protected | Set permissions on SSLCertificateKeyFile (custom cert)" + file: + path: /etc/ssl/certs/custom_cert_key.key + owner: root + group: root + mode: '0400' + when: apache_cis_custom_cert + when: + - apache_cis_7_3 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.3 + +- name: | + "SCORED | 7.4 | PATCH | Ensure Weak SSL Protocols Are Disabled" + "SCORED | 7.10 | PATCH | Ensure the TLSv1.0 and TLSv1.1 Protocols are Disabled" + block: + - name: | + "SCORED | 7.4 | PATCH | Ensure Weak SSL Protocols Are Disabled | Set TLSv1.2 and TLSv1.3" + "SCORED | 7.10 | PATCH | Ensure the TLSv1.0 and TLSv1.1 Protocols are Disabled | Set TLSv1.2 and TLSv1.3" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/mods-available/ssl.conf" + regexp: ' #SSLProtocol| SSLProtocol' + line: ' SSLProtocol TLSv1.2 TLSv1.3' + backrefs: yes + insertbefore: '^' + notify: restart apache2 + when: + - apache_cis_tls_1_2_available + + - name: | + "SCORED | 7.4 | PATCH | Ensure Weak SSL Protocols Are Disabled | Set TLSv1" + "SCORED | 7.10 | PATCH | Ensure the TLSv1.0 and TLSv1.1 Protocols are Disabled | Set TLSv1" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/mods-available/ssl.conf" + regexp: ' #SSLProtocol| SSLProtocol' + line: ' SSLProtocol TLSv1' + backrefs: yes + insertbefore: '^' + notify: restart apache2 + when: not apache_cis_tls_1_2_available + when: + - apache_cis_7_4 or + apache_cis_7_10 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.3 + +# ----------------------------- +# ----------------------------- +# Since the 7.5 and 7.8 controls are virtually the same the apache_cis_sslciphersuite_settings will control iv week and/or Medium ciphers are isabled +# ----------------------------- +# ----------------------------- +- name: | + "SCORED | 7.5 | PATCH | Ensure Weak SSL/TLS Ciphers Are Disabled" + "SCORED | 7.8 | PATCH | Ensure Medium Strength SSL/TLS Ciphers Are Disabled" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/mods-available/ssl.conf" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + backrefs: yes + insertbefore: '^' + with_items: + - { regexp: ' #SSLHonorCipherOrder| SSLHonorCipherOrder', line: ' SSLHonorCipherOrder on' } + - { regexp: ' #SSLCipherSuite| SSLCipherSuite', line: ' SSLCipherSuite {{ apache_cis_sslciphersuite_settings }}' } + notify: restart apache2 + when: + - apache_cis_7_5 or + apache_cis_7.8 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.5 + +# ----------------------------- +# ----------------------------- +# Control 7.8 combined with control 7.5 +# ----------------------------- +# ----------------------------- + +- name: "SCORED | 7.9 | PATCH | Ensure All Web Content is Accessed via HTTPS" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/sites-available/000-default.conf" + regexp: ' Redirect' + line: ' Redirect permanent / {{ apache_cis_tls_redirect }}' + # backrefs: yes + insertbefore: '^' + notify: restart apache2 + when: + - apache_cis_7_9 + - apache_cis_section7 + tags: + - level1 + - level2 + - scored + - patch + - rule_7.9 + +# ----------------------------- +# ----------------------------- +# Control 7.10 combined with control 7.4 +# ----------------------------- + +# ----------------------------- +# ----------------------------- +# The three tasks below requires dynamic changes to elements. Need to figure out best lineinfile way to handle this +# ----------------------------- +# ----------------------------- +- name: "SCORED | 7.11 | PATCH | Ensure OCSP Stapling Is Enabled" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_7_11 + - apache_cis_section7 + tags: + - level2 + - scored + - patch + - rule_7.11 + - notimplimented + +- name: "SCORED | 7.12 | PATCH | Ensure HTTP Strict Transport Security Is Enabled" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_7_12 + - apache_cis_section7 + tags: + - level2 + - scored + - patch + - rule_7.12 + - notimplimented + +- name: "SCORED | 7.13 | PATCH | Ensure Only Cipher Suites That Provide Forward Secrecy Are Enabled" + command: /bin/true + changed_when: false + failed_when: false + when: + - apache_cis_7_13 + - apache_cis_section7 + tags: + - level2 + - scored + - patch + - rule_7.13 + - notimplimented + +# Section 8 Fixes +- name: "SCORED | 8.1 | PATCH | Ensure ServerTokens is Set to 'Prod' or 'ProductOnly'" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/conf-available/security.conf" + regexp: '^ServerTokens' + line: 'ServerTokens {{ apache_cis_servertokens }}' + when: + - apache_cis_8_1 + - apache_cis_section8 + tags: + - level1 + - level2 + - scored + - patch + - rule_8.1 + +- name: "SCORED | 8.2 | PATCH | Ensure ServerSignature Is Not Enabled" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/conf-available/security.conf" + regexp: '^ServerSignature' + line: 'ServerSignature Off' + when: + - apache_cis_8_2 + - apache_cis_section8 + tags: + - level1 + - level2 + - scored + - patch + - rule_8.2 + +- name: "SCORED | 8.3 | PATCH | Ensure All Default Apache Content Is Removed" + replace: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^Include conf/extra/httpd-autoindex.conf' + replace: '#Include conf/extra/httpd-autoindex.conf' + when: + - apache_cis_8_3 + - apache_cis_section8 + tags: + - level2 + - scored + - patch + - rule_8.3 + +- name: "SCORED | 8.4 | PATCH | Ensure ETag Response Header Fields Do Not Include Inodes" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/conf-enabled/security.conf" + regexp: 'FileETag' + state: absent + when: + - apache_cis_8_4 + - apache_cis_section8 + tags: + - level2 + - scored + - patch + - rule_8.4 + +# Section 9 Fixes +- name: "SCORED | 9.1 | PATCH | Ensure the TimeOut Is Set to 10 or Less" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^# Timeout|^Timeout' + line: 'Timeout {{ apache_cis_timeout }}' + when: + - apache_cis_9_1 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.1 + +- name: "SCORED | 9.2 | PATCH | Ensure KeepAlive Is Enabled" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^# KeepAlive |^KeepAlive ' + line: 'KeepAlive On' + when: + - apache_cis_9_2 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.2 + +- name: "SCORED | 9.3 | PATCH | Ensure MaxKeepAliveRequests is Set to a Value of 100 or Greater" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^# MaxKeepAliveRequests|^MaxKeepAliveRequests' + line: 'MaxKeepAliveRequests {{ apache_cis_maxkeepaliverequests }}' + when: + - apache_cis_9_3 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.3 + +- name: "SCORED | 9.4 | PATCH | Ensure KeepAliveTimeout is Set to a Value of 15 or Less" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^# KeepAliveTimeout|^KeepAliveTimeout' + line: 'KeepAliveTimeout {{ apache_cis_keepalivetimeout }}' + when: + - apache_cis_9_4 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.4 + +# ----------------------------- +# ----------------------------- +# Control 9.5 and 9.6 are hard to set individually so they are combined to be a single line. +# ----------------------------- +# ----------------------------- +- name: | + "SCORED | 9.5 | PATCH | Ensure the Timeout Limits for Request Headers is Set to 40 or Less" + "SCORED | 9.6 | PATCH | Ensure Timeout Limits for the Request Body is Set to 20 or Less" + block: + - name: | + "SCORED | 9.5 | PATCH | Ensure the Timeout Limits for Request Headers is Set to 40 or Less | Enable Request Timeout module" + "SCORED | 9.6 | PATCH | Ensure Timeout Limits for the Request Body is Set to 20 or Less | Enable Request Timeout module" + apache2_module: + name: reqtimeout + state: present + force: yes + + - name: | + "SCORED | 9.5 | PATCH | Ensure the Timeout Limits for Request Headers is Set to 40 or Less | Remove existing RequestReadTimeout if needed" + "SCORED | 9.6 | PATCH | Ensure Timeout Limits for the Request Body is Set to 20 or Less| Remove existing RequestReadTimeout if needed" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/mods-available/reqtimeout.conf" + regexp: 'RequestReadTimeout' + # backrefs: yes + state: absent + + - name: | + "SCORED | 9.5 | PATCH | Ensure the Timeout Limits for Request Headers is Set to 40 or Less | Add reqtimeout_module, set body, and header timeout" + "SCORED | 9.6 | PATCH | Ensure Timeout Limits for the Request Body is Set to 20 or Less| Add reqtimeout_module, set body, and header timeout" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/mods-available/reqtimeout.conf" + regexp: '^ RequestReadTimeout' + line: ' RequestReadTimeout header={{ apache_cis_reqread_timeout }},MinRate=500 body={{ apache_cis_reqread_body }},MinRate=500' + insertafter: '^' + when: + - apache_cis_9_5 or + apache_cis_9_6 + - apache_cis_section9 + tags: + - level1 + - level2 + - scored + - patch + - rule_9.5 + - rule_9.6 + +# ----------------------------- +# ----------------------------- +# Control 9.6 combined with control 9.5 +# ----------------------------- +# ----------------------------- + +# Section 10 Fixes +- name: "SCORED | 10.1 | PATCH | Ensure the LimitRequestLine directive is Set to 512 or less" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^# LimitRequestline|^LimitRequestline' + line: 'LimitRequestline {{ apache_cis_limitrequestline }}' + when: + - apache_cis_10_1 + - apache_cis_section10 + tags: + - level2 + - scored + - patch + - rule_10.1 + +- name: "SCORED | 10.2 | PATCH | Ensure the LimitRequestFields Directive is Set to 100 or Less" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^# LimitRequestFields|^LimitRequestFields' + line: 'LimitRequestFields {{ apache_cis_limitrequestfields }}' + when: + - apache_cis_10_2 + - apache_cis_section10 + tags: + - level2 + - scored + - patch + - rule_10.2 + +- name: "SCORED | 10.3 | PATCH | Ensure the LimitRequestFieldsize Directive is Set to 1024 or Less" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^# LimitRequestFieldsize|^LimitRequestFieldsize' + line: 'LimitRequestFieldsize {{ apache_cis_limitrequestfieldsize }}' + when: + - apache_cis_10_3 + - apache_cis_section10 + tags: + - level2 + - scored + - patch + - rule_10.3 + +- name: "SCORED | 10.4 | PATCH | Ensure the LimitRequestBody Directive is Set to 102400 or Less" + lineinfile: + path: "{{ apache2_cis_server_root_dir }}/apache2.conf" + regexp: '^# LimitRequestBody|^LimitRequestBody' + line: 'LimitRequestBody {{ apache_cis_limitrequestbody }}' + when: + - apache_cis_10_4 + - apache_cis_section10 + tags: + - level2 + - scored + - patch + - rule_10.4 + +# Section 11 Fixes +- name: "SCORED | 11.1 | PATCH | Ensure SELinux Is Enabled in Enforcing Mode" + selinux: + policy: default + state: enforcing + when: + - apache_cis_11_1 + - apache_cis_section11 + - apache2_cis_selinux + tags: + - level2 + - scored + - patch + - rule_11.1 + +- name: "SCORED | 11.2 | PATCH | Ensure Apache Processes Run in the httpd_t Confined Context" + sefcontext: + target: "{{ item }}" + setype: httpd_exec_t + ftype: f + with_items: + - /usr/sbin/httpd + - /usr/sbin/httpd.worker + - /usr/sbin/httpd.event + - /usr/sbin/apachectl + when: + - apache_cis_11_2 + - apache_cis_section11 + - apache2_cis_selinux + tags: + - level2 + - scored + - patch + - rule_11.2 + +- name: "SCORED | 11.3 | PATCH | Ensure the httpd_t Type is Not in Permissive Mode" + selinux_permissive: + name: httpd_t + permissive: true + when: + - apache_cis_11_3 + - apache_cis_section11 + - apache2_cis_selinux + tags: + - level2 + - scored + - patch + - rule_11.3 + +- name: "NOTSCORED | 11.4 | PATCH | Ensure Only the Necessary SELinux Booleans are Enabled" + seboolean: + name: httpd_enable_cgi + state: no + persistent: yes + when: + - apache_cis_11_4 + - apache_cis_section11 + - apache2_cis_selinux + tags: + - level2 + - notscored + - patch + - rule_11.4 + +# Section 12 Fixes +- name: "SCORED | 12.1 | PATCH | Ensure the AppArmor Framework Is Enabled" + block: + - name: "SCORED | 12.1 | PATCH | Ensure the AppArmor Framework Is Enabled | Install AppArmor and module" + apt: + name: ['apparmor', 'libapache2-mod-apparmor', 'apparmor-utils'] + state: present + + - name: "SCORED | 12.1 | PATCH | Ensure the AppArmor Framework Is Enabled | Enable apparmor" + service: + name: apparmor + enabled: yes + state: started + when: + - apache_cis_12_1 + - apache_cis_section12 + - not apache2_cis_selinux + tags: + - level2 + - scored + - patch + - rule_12.1 + +- name: "NOTSCORED | 12.2 | PATCH | Ensure the Apache AppArmor Profile Is Configured Properly" + block: + - name: "NOTSCORED | 12.2 | PATCH | Ensure the Apache AppArmor Profile Is Configured Properly | Stop apache service" + service: + name: apache2 + state: stopped + + - name: "NOTSCORED | 12.2 | PATCH | Ensure the Apache AppArmor Profile Is Configured Properly | Create mostly empty apache2 profile" + command: aa-autodep apache2 + changed_when: "'Writting' in apache_ubuntu_12_2_autodep_state.stdout" + register: apache_ubuntu_12_2_autodep_state + + - name: "NOTSCORED | 12.2 | PATCH | Ensure the Apache AppArmor Profile Is Configured Properly | Set apache2 profile to complain mode" + command: aa-complain apache2 + changed_when: "'Setting' in apache_ubuntu_12_2_complain_state.stdout" + register: apache_ubuntu_12_2_complain_state + + - name: "NOTSCORED | 12.2 | PATCH | Ensure the Apache AppArmor Profile Is Configured Properly | Start apache service" + service: + name: apache2 + state: started + + - name: "NOTSCORED | 12.2 | PATCH | Ensure the Apache AppArmor Profile Is Configured Properly | Set enforce for apache2" + command: aa-enforce /usr/sbin/apache2 + changed_when: "'Setting' in apache_ubuntu_12_2_enforcing_state.stdout" + register: apache_ubuntu_12_2_enforcing_state + + - name: "NOTSCORED | 12.2 | AUDIT | Ensure the Apache AppArmor Profile Is Configured Properly | Testing instructions" + debug: + msg: + - "Warning!!!! Thoroughly test the web application attempting to exercise all intended functionality so that" + - "AppArmor will generate necessary logs of all resources accessed. After those logs are reviewed run the" + - "following command 'aa-logprof' to update the profile based on the logs" + - "You will also want to edit the profile to remove any inappropriate content and adding appropriate access rules." + - "Once the profile is update run this command 'apparmor_parser -r /etc/apparmor.d/usr.sbin.apache2' to reload the" + - "updated profile" + when: + - apache_cis_12_2 + - apache_cis_section12 + - not apache2_cis_selinux + tags: + - level2 + - notscored + - patch + - rule_12.2 + +- name: "SCORED | 12.3 | PATCH | Ensure Apache AppArmor Profile is in Enforce Mode" + block: + - name: "SCORED | 12.3 | PATCH | Ensure Apache AppArmor Profile is in Enforce Mode" + command: aa-enforce /usr/sbin/apache2 + changed_when: "'Setting' in apache_ubuntu_12_3_enforcing_state.stdout" + register: apache_ubuntu_12_3_enforcing_state + + - name: "SCORED | 12.3 | PATCH | Ensure Apache AppArmor Profile is in Enforce Mode | Restart apache2" + service: + name: apache2 + state: restarted + when: + - apache_cis_12_3 + - apache_cis_section12 + tags: + - level2 + - scored + - patch + - rule_12.3 \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/main.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/main.yml new file mode 100644 index 0000000..4dabd30 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/main.yml @@ -0,0 +1,27 @@ +--- +- name: Gather OS Type + setup: + gather_subset: distribution,!all,!min + when: + - ansible_distribution is not defined + tags: + - always + +- name: Include Prelim Tasks + import_tasks: prelim.yml + tags: + - prelim_tasks + +- name: RedHat Fixes + import_tasks: cis_apache_redhat_fix.yml + when: + - ansible_distribution == 'RedHat' or ansible_distribution == 'CentOS' + tags: + - RedHat + +- name: Ubuntu Fixes + import_tasks: cis_apache_ubuntu_fix.yml + when: + - ansible_distribution_file_variety == "Debian" + tags: + - Ubuntu \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/prelim.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/prelim.yml new file mode 100644 index 0000000..0c62037 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/tasks/prelim.yml @@ -0,0 +1,111 @@ +--- +- name: Install Apache + block: + - name: Install httpd RHEL7 based + yum: + name: httpd + state: present + when: ansible_facts.distribution_file_variety == "RedHat" and ansible_facts.distribution_major_version == "7" + + - name: Install httpd RHEL8 based + dnf: + name: httpd + state: present + when: ansible_facts.distribution_file_variety == "RedHat" and ansible_facts.distribution_major_version == "8" + + - name: Install Apache2 + apt: + name: apache2 + state: present + when: ansible_distribution_file_variety == "Debian" + + - name: Start httpd + service: + name: httpd + state: started + when: ansible_facts.distribution_file_variety == "RedHat" + + - name: Start Apache2 + service: + name: apache2 + state: started + when: ansible_distribution_file_variety == "Debian" + when: automate_install + +- name: Add additional RHEL repo + command: subscription-manager repos --enable rhel-7-server-optional-rpms + when: ansible_distribution == "RedHat" and ansible_facts.distribution_major_version == "7" + +- name: Install needed packages + block: + - name: Install RHEL 8 based packages + dnf: + name: ['mod_session', 'mod_ssl', 'apr-util-openssl', 'python3-lxml'] + state: present + when: ansible_facts.distribution_file_variety == "RedHat" and ansible_facts.distribution_major_version == "8" + + - name: Install RHEL 7 based packages + yum: + name: ['mod_session', 'mod_ssl', 'apr-util-openssl', 'python-lxml, pyOpenSSL, python2-cryptography,libselinux-python,policycoreutils-python'] + state: present + when: ansible_facts.distribution_file_variety == "RedHat" and ansible_facts.distribution_major_version == "7" + + - name: Install Ubuntu based packages + apt: + name: ['python-lxml', 'auditd', 'policycoreutils', 'selinux-utils', 'selinux-basics'] + state: present + when: ansible_facts.distribution_file_variety == "Debian" and ansible_facts.distribution_major_version == "20" + +- name: Get ServerRoot folder + block: + - name: Get ServerRoot folder RHEL + shell: grep 'ServerRoot "' /etc/httpd/conf/httpd.conf | cut -f2 -d'"' + changed_when: false + failed_when: false + register: apache_cis_server_root_dir_gather + when: ansible_facts.distribution_file_variety == "RedHat" + + - name: Get ServerRoot folder Ubuntu + shell: find / -name "apache2.conf" | sed 's|\(.*\)/.*|\1|' + changed_when: false + failed_when: false + register: apache2_cis_server_root_dir_gather + when: ansible_facts.distribution_file_variety == "Debian" + - debug: var=apache2_cis_server_root_dir_gather + +- name: Get DocumentRoot folder + block: + - name: Get DocumentRoot folder RHEL + shell: cat /etc/httpd/conf/httpd.conf | grep "DocumentRoot " | cut -f2 -d'"' + changed_when: false + failed_when: false + register: apache_cis_doc_root_dir_gather + when: ansible_facts.distribution_file_variety == "RedHat" + + - name: Get DocumentRoot folder + shell: grep "DocumentRoot" /etc/apache2/sites-available/000-default.conf | sed 's/^[ \t]*//;s/[ \t]*$//' | sed 's|\(.*\)/.*|\1|' | cut -f2 -d" " + changed_when: false + failed_when: false + register: apache2_cis_doc_root_dir_gather + when: ansible_facts.distribution_file_variety == "Debian" + +- name: Install policycoreutils-python-utils + block: + - name: Install policycoreutils-python-utils RHEL 8 + dnf: + name: policycoreutils-python-utils + state: present + when: ansible_facts.distribution_file_variety == "RedHat" and ansible_facts.distribution_major_version == "8" + + - name: Install policycoreutils-python-utils RHEL 7 and lower + yum: + name: policycoreutils-python-utils + state: present + when: ansible_facts.distribution_file_variety == "RedHat" and ansible_facts.distribution_major_version == "8" + when: + - apache_cis_11_2 + - apache_cis_section11 + +# - name: Install PyOpenSSL +# apt: +# name: \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/templates/crs-setup.conf.j2 b/Linux/ansible-lockdown/APACHE-2.4-CIS/templates/crs-setup.conf.j2 new file mode 100644 index 0000000..194d3a3 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/templates/crs-setup.conf.j2 @@ -0,0 +1,772 @@ +# ------------------------------------------------------------------------ +# OWASP ModSecurity Core Rule Set ver.3.0.0 +# Copyright (c) 2006-2016 Trustwave and contributors. All rights reserved. +# +# The OWASP ModSecurity Core Rule Set is distributed under +# Apache Software License (ASL) version 2 +# Please see the enclosed LICENSE file for full details. +# ------------------------------------------------------------------------ + + +# +# -- [[ Introduction ]] -------------------------------------------------------- +# +# The OWASP ModSecurity Core Rule Set (CRS) is a set of generic attack +# detection rules that provide a base level of protection for any web +# application. They are written for the open source, cross-platform +# ModSecurity Web Application Firewall. +# +# See also: +# https://modsecurity.org/crs/ +# https://github.com/SpiderLabs/owasp-modsecurity-crs +# https://www.owasp.org/index.php/Category:OWASP_ModSecurity_Core_Rule_Set_Project +# + + +# +# -- [[ System Requirements ]] ------------------------------------------------- +# +# CRS requires ModSecurity version 2.8.0 or above. +# We recommend to always use the newest ModSecurity version. +# +# The configuration directives/settings in this file are used to control +# the OWASP ModSecurity CRS. These settings do **NOT** configure the main +# ModSecurity settings (modsecurity.conf) such as SecRuleEngine, +# SecRequestBodyAccess, SecAuditEngine, SecDebugLog, and XML processing. +# +# The CRS assumes that modsecurity.conf has been loaded. If you don't have this +# file, you can get it from: +# https://github.com/SpiderLabs/ModSecurity/blob/master/modsecurity.conf-recommended +# +# The order of file inclusion in your webserver configuration should always be: +# 1. modsecurity.conf +# 2. crs-setup.conf (this file) +# 3. rules/*.conf (the CRS rule files) +# +# Please refer to the INSTALL file for detailed installation instructions. +# + + +# +# -- [[ Mode of Operation: Anomaly Scoring vs. Self-Contained ]] --------------- +# +# The CRS can run in two modes: +# +# -- [[ Anomaly Scoring Mode (default) ]] -- +# In CRS3, anomaly mode is the default and recommended mode, since it gives the +# most accurate log information and offers the most flexibility in setting your +# blocking policies. It is also called "collaborative detection mode". +# In this mode, each matching rule increases an 'anomaly score'. +# At the conclusion of the inbound rules, and again at the conclusion of the +# outbound rules, the anomaly score is checked, and the blocking evaluation +# rules apply a disruptive action, by default returning an error 403. +# +# -- [[ Self-Contained Mode ]] -- +# In this mode, rules apply an action instantly. This was the CRS2 default. +# It can lower resource usage, at the cost of less flexibility in blocking policy +# and less informative audit logs (only the first detected threat is logged). +# Rules inherit the disruptive action that you specify (i.e. deny, drop, etc). +# The first rule that matches will execute this action. In most cases this will +# cause evaluation to stop after the first rule has matched, similar to how many +# IDSs function. +# +# -- [[ Alert Logging Control ]] -- +# In the mode configuration, you must also adjust the desired logging options. +# There are three common options for dealing with logging. By default CRS enables +# logging to the webserver error log (or Event viewer) plus detailed logging to +# the ModSecurity audit log (configured under SecAuditLog in modsecurity.conf). +# +# - To log to both error log and ModSecurity audit log file, use: "log,auditlog" +# - To log *only* to the ModSecurity audit log file, use: "nolog,auditlog" +# - To log *only* to the error log file, use: "log,noauditlog" +# +# Examples for the various modes follow. +# You must leave one of the following options enabled. +# Note that you must specify the same line for phase:1 and phase:2. +# + +# Default: Anomaly Scoring mode, log to error log, log to ModSecurity audit log +# - By default, offending requests are blocked with an error 403 response. +# - To change the disruptive action, see RESPONSE-999-EXCEPTIONS.conf.example +# and review section 'Changing the Disruptive Action for Anomaly Mode'. +# - In Apache, you can use ErrorDocument to show a friendly error page or +# perform a redirect: https://httpd.apache.org/docs/2.4/custom-error.html +# +SecDefaultAction "phase:1,log,auditlog,pass" +SecDefaultAction "phase:2,log,auditlog,pass" + +# Example: Anomaly Scoring mode, log only to ModSecurity audit log +# - By default, offending requests are blocked with an error 403 response. +# - To change the disruptive action, see RESPONSE-999-EXCEPTIONS.conf.example +# and review section 'Changing the Disruptive Action for Anomaly Mode'. +# - In Apache, you can use ErrorDocument to show a friendly error page or +# perform a redirect: https://httpd.apache.org/docs/2.4/custom-error.html +# +# SecDefaultAction "phase:1,nolog,auditlog,pass" +# SecDefaultAction "phase:2,nolog,auditlog,pass" + +# Example: Self-contained mode, return error 403 on blocking +# - In this configuration the default disruptive action becomes 'deny'. After a +# rule triggers, it will stop processing the request and return an error 403. +# - You can also use a different error status, such as 404, 406, et cetera. +# - In Apache, you can use ErrorDocument to show a friendly error page or +# perform a redirect: https://httpd.apache.org/docs/2.4/custom-error.html +# +# SecDefaultAction "phase:1,log,auditlog,deny,status:403" +# SecDefaultAction "phase:2,log,auditlog,deny,status:403" + +# Example: Self-contained mode, redirect back to homepage on blocking +# - In this configuration the 'tag' action includes the Host header data in the +# log. This helps to identify which virtual host triggered the rule (if any). +# - Note that this might cause redirect loops in some situations; for example +# if a Cookie or User-Agent header is blocked, it will also be blocked when +# the client subsequently tries to access the homepage. You can also redirect +# to another custom URL. +# SecDefaultAction "phase:1,log,auditlog,redirect:'http://%{request_headers.host}/',tag:'Host: %{request_headers.host}'" +# SecDefaultAction "phase:2,log,auditlog,redirect:'http://%{request_headers.host}/',tag:'Host: %{request_headers.host}'" + + +# +# -- [[ Paranoia Level Initialization ]] --------------------------------------- +# +# The Paranoia Level (PL) setting allows you to choose the desired level +# of rule checks. +# +# With each paranoia level increase, the CRS enables additional rules +# giving you a higher level of security. However, higher paranoia levels +# also increase the possibility of blocking some legitimate traffic due to +# false alarms (also named false positives or FPs). If you use higher +# paranoia levels, it is likely that you will need to add some exclusion +# rules for certain requests and applications receiving complex input. +# +# - A paranoia level of 1 is default. In this level, most core rules +# are enabled. PL1 is advised for beginners, installations +# covering many different sites and applications, and for setups +# with standard security requirements. +# At PL1 you should face FPs rarely. If you encounter FPs, please +# open an issue on the CRS GitHub site and don't forget to attach your +# complete Audit Log record for the request with the issue. +# - Paranoia level 2 includes many extra rules, for instance enabling +# many regexp-based SQL and XSS injection protections, and adding +# extra keywords checked for code injections. PL2 is advised +# for moderate to experienced users desiring more complete coverage +# and for installations with elevated security requirements. +# PL2 comes with some FPs which you need to handle. +# - Paranoia level 3 enables more rules and keyword lists, and tweaks +# limits on special characters used. PL3 is aimed at users experienced +# at the handling of FPs and at installations with a high security +# requirement. +# - Paranoia level 4 further restricts special characters. +# The highest level is advised for experienced users protecting +# installations with very high security requirements. Running PL4 will +# likely produce a very high number of FPs which have to be +# treated before the site can go productive. +# +# Rules in paranoia level 2 or higher will log their PL to the audit log; +# example: [tag "paranoia-level/2"]. This allows you to deduct from the +# audit log how the WAF behavior is affected by paranoia level. +# +# Uncomment this rule to change the default: +# +#SecAction \ +# "id:900000,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.paranoia_level=1" + + +# +# -- [[ Anomaly Mode Severity Levels ]] ---------------------------------------- +# +# Each rule in the CRS has an associated severity level. +# These are the default scoring points for each severity level. +# These settings will be used to increment the anomaly score if a rule matches. +# You may adjust these points to your liking, but this is usually not needed. +# +# - CRITICAL severity: Anomaly Score of 5. +# Mostly generated by the application attack rules (93x and 94x files). +# - ERROR severity: Anomaly Score of 4. +# Generated mostly from outbound leakage rules (95x files). +# - WARNING severity: Anomaly Score of 3. +# Generated mostly by malicious client rules (91x files). +# - NOTICE severity: Anomaly Score of 2. +# Generated mostly by the protocol rules (92x files). +# +# In anomaly mode, these scores are cumulative. +# So it's possible for a request to hit multiple rules. +# +# (Note: In this file, we use 'phase:1' to set CRS configuration variables. +# In general, 'phase:request' is used. However, we want to make absolutely sure +# that all configuration variables are set before the CRS rules are processed.) +# +#SecAction \ +# "id:900100,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.critical_anomaly_score=5,\ +# setvar:tx.error_anomaly_score=4,\ +# setvar:tx.warning_anomaly_score=3,\ +# setvar:tx.notice_anomaly_score=2" + + +# +# -- [[ Anomaly Mode Blocking Threshold Levels ]] ------------------------------ +# +# Here, you can specify at which cumulative anomaly score an inbound request, +# or outbound response, gets blocked. +# +# Most detected inbound threats will give a critical score of 5. +# Smaller violations, like violations of protocol/standards, carry lower scores. +# +# [ At default value ] +# If you keep the blocking thresholds at the defaults, the CRS will work +# similarly to previous CRS versions: a single critical rule match will cause +# the request to be blocked and logged. +# +# [ Using higher values ] +# If you want to make the CRS less sensitive, you can increase the blocking +# thresholds, for instance to 7 (which would require multiple rule matches +# before blocking) or 10 (which would require at least two critical alerts - or +# a combination of many lesser alerts), or even higher. However, increasing the +# thresholds might cause some attacks to bypass the CRS rules or your policies. +# +# [ New deployment strategy: Starting high and decreasing ] +# It is a common practice to start a fresh CRS installation with elevated +# anomaly scoring thresholds (>100) and then lower the limits as your +# confidence in the setup grows. You may also look into the Sampling +# Percentage section below for a different strategy to ease into a new +# CRS installation. +# +# [ Anomaly Threshold / Paranoia Level Quadrant ] +# +# High Anomaly Limit | High Anomaly Limit +# Low Paranoia Level | High Paranoia Level +# -> Fresh Site | -> Experimental Site +# ------------------------------------------------------ +# Low Anomaly Limit | Low Anomaly Limit +# Low Paranoia Level | High Paranoia Level +# -> Standard Site | -> High Security Site +# +# Uncomment this rule to change the defaults: +# +#SecAction \ +# "id:900110,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.inbound_anomaly_score_threshold=5,\ +# setvar:tx.outbound_anomaly_score_threshold=4" + +# +# -- [[ Application Specific Rule Exclusions ]] ---------------------------------------- +# +# Some well-known applications may undertake actions that appear to be +# malicious. This includes actions such as allowing HTML or Javascript within +# parameters. In such cases the CRS aims to prevent false positives by allowing +# administrators to enable prebuilt, application specific exclusions on an +# application by application basis. +# These application specific exclusions are distinct from the rules that would +# be placed in the REQUEST-900-EXCLUSION-RULES-BEFORE-CRS configuration file as +# they are prebuilt for specific applications. The 'REQUEST-900' file is +# designed for users to add their own custom exclusions. Note, using these +# application specific exclusions may loosen restrictions of the CRS, +# especially if used with an application they weren't designed for. As a result +# they should be applied with care. +# To use this functionality you must specify a supported application. To do so +# uncomment rule 900130. In addition to uncommenting the rule you will need to +# specify which application(s) you'd like to enable exclusions for. Only a +# (very) limited set of applications are currently supported, please use the +# filenames prefixed with 'REQUEST-903' to guide you in your selection. +# Such filenames use the following convention: +# REQUEST-903.9XXX-{APPNAME}-EXCLUSIONS-RULES.conf +# +# It is recommended if you run multiple web applications on your site to limit +# the effects of the exclusion to only the path where the excluded webapp +# resides using a rule similar to the following example: +# SecRule REQUEST_URI "@beginsWith /wordpress/" setvar:crs_exclusions_wordpress=1 + +# +# Modify and uncomment this rule to select which application: +# +#SecAction \ +# "id:900130,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.crs_exclusions_drupal=1,\ +# setvar:tx.crs_exclusions_wordpress=1" + +# +# -- [[ HTTP Policy Settings ]] ------------------------------------------------ +# +# This section defines your policies for the HTTP protocol, such as: +# - allowed HTTP versions, HTTP methods, allowed request Content-Types +# - forbidden file extensions (e.g. .bak, .sql) and request headers (e.g. Proxy) +# +# These variables are used in the following rule files: +# - REQUEST-911-METHOD-ENFORCEMENT.conf +# - REQUEST-912-DOS-PROTECTION.conf +# - REQUEST-920-PROTOCOL-ENFORCEMENT.conf + +# HTTP methods that a client is allowed to use. +# Default: GET HEAD POST OPTIONS +# Example: for RESTful APIs, add the following methods: PUT PATCH DELETE +# Example: for WebDAV, add the following methods: CHECKOUT COPY DELETE LOCK +# MERGE MKACTIVITY MKCOL MOVE PROPFIND PROPPATCH PUT UNLOCK +# Uncomment this rule to change the default. +#SecAction \ +# "id:900200,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:'tx.allowed_methods=GET HEAD POST OPTIONS'" + +# Content-Types that a client is allowed to send in a request. +# Default: application/x-www-form-urlencoded|multipart/form-data|text/xml|application/xml|application/x-amf|application/json|text/plain +# Uncomment this rule to change the default. +#SecAction \ +# "id:900220,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:'tx.allowed_request_content_type=application/x-www-form-urlencoded|multipart/form-data|text/xml|application/xml|application/x-amf|application/json|text/plain'" + +# Allowed HTTP versions. +# Default: HTTP/1.0 HTTP/1.1 HTTP/2 HTTP/2.0 +# Example for legacy clients: HTTP/0.9 HTTP/1.0 HTTP/1.1 HTTP/2 HTTP/2.0 +# Note that some web server versions use 'HTTP/2', some 'HTTP/2.0', so +# we include both version strings by default. +# Uncomment this rule to change the default. +#SecAction \ +# "id:900230,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:'tx.allowed_http_versions=HTTP/1.0 HTTP/1.1 HTTP/2 HTTP/2.0'" + +# Forbidden file extensions. +# Guards against unintended exposure of development/configuration files. +# Default: .asa/ .asax/ .ascx/ .axd/ .backup/ .bak/ .bat/ .cdx/ .cer/ .cfg/ .cmd/ .com/ .config/ .conf/ .cs/ .csproj/ .csr/ .dat/ .db/ .dbf/ .dll/ .dos/ .htr/ .htw/ .ida/ .idc/ .idq/ .inc/ .ini/ .key/ .licx/ .lnk/ .log/ .mdb/ .old/ .pass/ .pdb/ .pol/ .printer/ .pwd/ .resources/ .resx/ .sql/ .sys/ .vb/ .vbs/ .vbproj/ .vsdisco/ .webinfo/ .xsd/ .xsx/ +# Example: .bak/ .config/ .conf/ .db/ .ini/ .log/ .old/ .pass/ .pdb/ .sql/ +# Uncomment this rule to change the default. +#SecAction \ +# "id:900240,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:'tx.restricted_extensions=.asa/ .asax/ .ascx/ .axd/ .backup/ .bak/ .bat/ .cdx/ .cer/ .cfg/ .cmd/ .com/ .config/ .conf/ .cs/ .csproj/ .csr/ .dat/ .db/ .dbf/ .dll/ .dos/ .htr/ .htw/ .ida/ .idc/ .idq/ .inc/ .ini/ .key/ .licx/ .lnk/ .log/ .mdb/ .old/ .pass/ .pdb/ .pol/ .printer/ .pwd/ .resources/ .resx/ .sql/ .sys/ .vb/ .vbs/ .vbproj/ .vsdisco/ .webinfo/ .xsd/ .xsx/'" + +# Forbidden request headers. +# Header names should be lowercase, enclosed by /slashes/ as delimiters. +# Blocking Proxy header prevents 'httpoxy' vulnerability: https://httpoxy.org +# Default: /proxy/ /lock-token/ /content-range/ /translate/ /if/ +# Uncomment this rule to change the default. +#SecAction \ +# "id:900250,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:'tx.restricted_headers=/proxy/ /lock-token/ /content-range/ /translate/ /if/'" + +# File extensions considered static files. +# Extensions include the dot, lowercase, enclosed by /slashes/ as delimiters. +# Used in DoS protection rule. See section "Anti-Automation / DoS Protection". +# Default: /.jpg/ /.jpeg/ /.png/ /.gif/ /.js/ /.css/ /.ico/ /.svg/ /.webp/ +# Uncomment this rule to change the default. +#SecAction \ +# "id:900260,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:'tx.static_extensions=/.jpg/ /.jpeg/ /.png/ /.gif/ /.js/ /.css/ /.ico/ /.svg/ /.webp/'" + + +# +# -- [[ HTTP Argument/Upload Limits ]] ----------------------------------------- +# +# Here you can define optional limits on HTTP get/post parameters and uploads. +# This can help to prevent application specific DoS attacks. +# +# These values are checked in REQUEST-920-PROTOCOL-ENFORCEMENT.conf. +# Beware of blocking legitimate traffic when enabling these limits. +# + +# Block request if number of arguments is too high +# Default: unlimited +# Example: 255 +# Uncomment this rule to set a limit. +#SecAction \ +# "id:900300,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.max_num_args=255" + +# Block request if the length of any argument name is too high +# Default: unlimited +# Example: 100 +# Uncomment this rule to set a limit. +#SecAction \ +# "id:900310,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.arg_name_length=100" + +# Block request if the length of any argument value is too high +# Default: unlimited +# Example: 400 +# Uncomment this rule to set a limit. +#SecAction \ +# "id:900320,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.arg_length=400" + +# Block request if the total length of all combined arguments is too high +# Default: unlimited +# Example: 64000 +# Uncomment this rule to set a limit. +#SecAction \ +# "id:900330,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.total_arg_length=64000" + +# Block request if the file size of any individual uploaded file is too high +# Default: unlimited +# Example: 1048576 +# Uncomment this rule to set a limit. +#SecAction \ +# "id:900340,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.max_file_size=1048576" + +# Block request if the total size of all combined uploaded files is too high +# Default: unlimited +# Example: 1048576 +# Uncomment this rule to set a limit. +#SecAction \ +# "id:900350,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.combined_file_sizes=1048576" + + +# +# -- [[ Easing In / Sampling Percentage ]] ------------------------------------- +# +# Adding the Core Rule Set to an existing productive site can lead to false +# positives, unexpected performance issues and other undesired side effects. +# +# It can be beneficial to test the water first by enabling the CRS for a +# limited number of requests only and then, when you have solved the issues (if +# any) and you have confidence in the setup, to raise the ratio of requests +# being sent into the ruleset. +# +# Adjust the percentage of requests that are funnelled into the Core Rules by +# setting TX.sampling_percentage below. The default is 100, meaning that every +# request gets checked by the CRS. The selection of requests, which are going +# to be checked, is based on a pseudo random number generated by ModSecurity. +# +# If a request is allowed to pass without being checked by the CRS, there is no +# entry in the audit log (for performance reasons), but an error log entry is +# written. If you want to disable the error log entry, then issue the +# following directive somewhere after the inclusion of the CRS +# (E.g., RESPONSE-999-EXCEPTIONS.conf). +# +# SecRuleUpdateActionById 901150 "nolog" +# +# ATTENTION: If this TX.sampling_percentage is below 100, then some of the +# requests will bypass the Core Rules completely and you lose the ability to +# protect your service with ModSecurity. +# +# Uncomment this rule to enable this feature: +# +#SecAction "id:900400,\ +# phase:1,\ +# pass,\ +# nolog,\ +# setvar:tx.sampling_percentage=100" + + +# +# -- [[ Project Honey Pot HTTP Blacklist ]] ------------------------------------ +# +# Optionally, you can check the client IP address against the Project Honey Pot +# HTTPBL (dnsbl.httpbl.org). In order to use this, you need to register to get a +# free API key. Set it here with SecHttpBlKey. +# +# Project Honeypot returns multiple different malicious IP types. +# You may specify which you want to block by enabling or disabling them below. +# +# Ref: https://www.projecthoneypot.org/httpbl.php +# Ref: https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual#wiki-SecHttpBlKey +# +# Uncomment these rules to use this feature: +# +#SecHttpBlKey XXXXXXXXXXXXXXXXX +#SecAction "id:900500,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.block_search_ip=1,\ +# setvar:tx.block_suspicious_ip=1,\ +# setvar:tx.block_harvester_ip=1,\ +# setvar:tx.block_spammer_ip=1" + + +# +# -- [[ GeoIP Database ]] ------------------------------------------------------ +# +# There are some rulesets that inspect geolocation data of the client IP address +# (geoLookup). The CRS uses geoLookup to implement optional country blocking. +# +# To use geolocation, we make use of the MaxMind GeoIP database. +# This database is not included with the CRS and must be downloaded. +# You should also update the database regularly, for instance every month. +# The CRS contains a tool to download it to util/geo-location/GeoIP.dat: +# util/upgrade.py --geoip +# +# This product includes GeoLite data created by MaxMind, available from: +# http://www.maxmind.com. +# +# Ref: http://blog.spiderlabs.com/2010/10/detecting-malice-with-modsecurity-geolocation-data.html +# Ref: http://blog.spiderlabs.com/2010/11/detecting-malice-with-modsecurity-ip-forensics.html +# +# Uncomment this rule to use this feature: +# +#SecGeoLookupDB util/geo-location/GeoIP.dat + + +# +# -=[ Block Countries ]=- +# +# Rules in the IP Reputation file can check the client against a list of high +# risk country codes. These countries have to be defined in the variable +# tx.high_risk_country_codes via their ISO 3166 two-letter country code: +# https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2#Officially_assigned_code_elements +# +# If you are sure that you are not getting any legitimate requests from a given +# country, then you can disable all access from that country via this variable. +# The rule performing the test has the rule id 910100. +# +# This rule requires SecGeoLookupDB to be enabled and the GeoIP database to be +# downloaded (see the section "GeoIP Database" above.) +# +# By default, the list is empty. A list used by some sites was the following: +# setvar:'tx.high_risk_country_codes=UA ID YU LT EG RO BG TR RU PK MY CN'" +# +# Uncomment this rule to use this feature: +# +#SecAction \ +# "id:900600,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:'tx.high_risk_country_codes='" + + +# +# -- [[ Anti-Automation / DoS Protection ]] ------------------------------------ +# +# Optional DoS protection against clients making requests too quickly. +# +# When a client is making more than 100 requests (excluding static files) within +# 60 seconds, this is considered a 'burst'. After two bursts, the client is +# blocked for 600 seconds. +# +# Requests to static files are not counted towards DoS; they are listed in the +# 'tx.static_extensions' setting, which you can change in this file (see +# section "HTTP Policy Settings"). +# +# For a detailed description, see rule file REQUEST-912-DOS-PROTECTION.conf. +# +# Uncomment this rule to use this feature: +# +#SecAction \ +# "id:900700,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:'tx.dos_burst_time_slice=60',\ +# setvar:'tx.dos_counter_threshold=100',\ +# setvar:'tx.dos_block_timeout=600'" + + +# +# -- [[ Check UTF-8 encoding ]] ------------------------------------------------ +# +# The CRS can optionally check request contents for invalid UTF-8 encoding. +# We only want to apply this check if UTF-8 encoding is actually used by the +# site; otherwise it will result in false positives. +# +# Uncomment this rule to use this feature: +# +#SecAction \ +# "id:900950,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.crs_validate_utf8_encoding=1" + + +# +# -- [[ Blocking Based on IP Reputation ]] ------------------------------------ +# +# Blocking based on reputation is permanent in the CRS. Unlike other rules, +# which look at the indvidual request, the blocking of IPs is based on +# a persistent record in the IP collection, which remains active for a +# certain amount of time. +# +# There are two ways an individual client can become flagged for blocking: +# - External information (RBL, GeoIP, etc.) +# - Internal information (Core Rules) +# +# The record in the IP collection carries a flag, which tags requests from +# individual clients with a flag named IP.reput_block_flag. +# But the flag alone is not enough to have a client blocked. There is also +# a global switch named tx.do_reput_block. This is off by default. If you set +# it to 1 (=On), requests from clients with the IP.reput_block_flag will +# be blocked for a certain duration. +# +# Variables +# ip.reput_block_flag Blocking flag for the IP collection record +# ip.reput_block_reason Reason (= rule message) that caused to blocking flag +# tx.do_reput_block Switch deciding if we really block based on flag +# tx.reput_block_duration Setting to define the duration of a block +# +# It may be important to know, that all the other core rules are skipped for +# requests, when it is clear that they carry the blocking flag in question. +# +# Uncomment this rule to use this feature: +# +#SecAction \ +# "id:900960,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.do_reput_block=1" +# +# Uncomment this rule to change the blocking time: +# Default: 300 (5 minutes) +# +#SecAction \ +# "id:900970,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# setvar:tx.reput_block_duration=300" + + +# +# -- [[ Collection timeout ]] -------------------------------------------------- +# +# Set the SecCollectionTimeout directive from the ModSecurity default (1 hour) +# to a lower setting which is appropriate to most sites. +# This increases performance by cleaning out stale collection (block) entries. +# +# This value should be greater than or equal to: +# tx.reput_block_duration (see section "Blocking Based on IP Reputation") and +# tx.dos_block_timeout (see section "Anti-Automation / DoS Protection"). +# +# Ref: https://github.com/SpiderLabs/ModSecurity/wiki/Reference-Manual#wiki-SecCollectionTimeout + +# Please keep this directive uncommented. +# Default: 600 (10 minutes) +SecCollectionTimeout 600 + + +# +# -- [[ Debug Mode ]] ---------------------------------------------------------- +# +# To enable rule development and debugging, CRS has an optional debug mode +# that does not block a request, but instead sends detection information +# back to the HTTP client. +# +# This functionality is currently only supported with the Apache web server. +# The Apache mod_headers module is required. +# +# In debug mode, the webserver inserts "X-WAF-Events" / "X-WAF-Score" +# response headers whenever a debug client makes a request. Example: +# +# # curl -v 'http://192.168.1.100/?foo=../etc/passwd' +# X-WAF-Events: TX:930110-OWASP_CRS/WEB_ATTACK/DIR_TRAVERSAL-REQUEST_URI, +# TX:930120-OWASP_CRS/WEB_ATTACK/FILE_INJECTION-ARGS:foo, +# TX:932160-OWASP_CRS/WEB_ATTACK/RCE-ARGS:foo +# X-WAF-Score: Total=15; sqli=0; xss=0; rfi=0; lfi=10; rce=5; php=0; http=0; ses=0 +# +# To enable debug mode, include the RESPONSE-981-DEBUG.conf file. +# This file resides in a separate folder, as it is not compatible with +# nginx and IIS. +# +# You must specify the source IP address/network where you will be running the +# tests from. The source IP will BYPASS all CRS blocking, and will be sent the +# response headers as specified above. Be careful to only list your private +# IP addresses/networks here. +# +# Tip: for regression testing of CRS or your own ModSecurity rules, you may +# be interested in using the OWASP CRS regression testing suite instead. +# View the file util/regression-tests/README for more information. +# +# Uncomment these rules, filling in your CRS path and the source IP address, +# to enable debug mode: +# +#Include /path/to/crs/util/debug/RESPONSE-981-DEBUG.conf +#SecRule REMOTE_ADDR "@ipMatch 192.168.1.100" \ +# "id:900980,\ +# phase:1,\ +# nolog,\ +# pass,\ +# t:none,\ +# ctl:ruleEngine=DetectionOnly,\ +# setvar:tx.crs_debug_mode=1" + + +# +# -- [[ End of setup ]] -------------------------------------------------------- +# +# The CRS checks the tx.crs_setup_version variable to ensure that the setup +# has been loaded. If you are not planning to use this setup template, +# you must manually set the tx.crs_setup_version variable before including +# the CRS rules/* files. +# +# The variable is a numerical representation of the CRS version number. +# E.g., v3.0.0 is represented as 300. +# +SecAction \ + "id:900990,\ + phase:1,\ + nolog,\ + pass,\ + t:none,\ + setvar:tx.crs_setup_version=300" diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/tests/inventory b/Linux/ansible-lockdown/APACHE-2.4-CIS/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/tests/test.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/tests/test.yml new file mode 100644 index 0000000..0c6cb4a --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - initial_apache \ No newline at end of file diff --git a/Linux/ansible-lockdown/APACHE-2.4-CIS/vars/main.yml b/Linux/ansible-lockdown/APACHE-2.4-CIS/vars/main.yml new file mode 100644 index 0000000..d9321f5 --- /dev/null +++ b/Linux/ansible-lockdown/APACHE-2.4-CIS/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for initial_apache \ No newline at end of file diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/.ansible-lint b/Linux/ansible-lockdown/POSTGRES-12-CIS/.ansible-lint new file mode 100644 index 0000000..f2a7e7c --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/.ansible-lint @@ -0,0 +1,11 @@ +parseable: true +quiet: true +skip_list: + - '204' + - '305' + - '303' + - '403' + - '306' + - '602' +use_default_rules: true +verbosity: 0 diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/.travis.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/CONTRIBUTING.rst b/Linux/ansible-lockdown/POSTGRES-12-CIS/CONTRIBUTING.rst new file mode 100644 index 0000000..23ce2fb --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/CONTRIBUTING.rst @@ -0,0 +1,67 @@ +Contributing to MindPoint Group Projects +======================================== + +Rules +----- +1) All commits must be GPG signed (details in Signing section) +2) All commits must have Signed-off-by (Signed-off-by: Joan Doe ) in the commit message (details in Signing section) +3) All work is done in your own branch +4) All pull requests go into the devel branch. There are automated checks for signed commits, signoff in commit message, and functional testing) +5) Be open and nice to eachother + +Workflow +-------- +- Your work is done in your own individual branch. Make sure to to Signed-off and GPG sign all commits you intend to merge +- All community Pull Requests are into the devel branch. There are automated checks for GPG signed, Signed-off in commits, and functional tests before being approved. If your pull request comes in from outside of our repo, the pull request will go into a staging branch. There is info needed from our repo for our CI/CD testing. +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release + +Signing your contribution +------------------------- + +We've chosen to use the Developer's Certificate of Origin (DCO) method +that is employed by the Linux Kernel Project, which provides a simple +way to contribute to MindPoint Group projects. + +The process is to certify the below DCO 1.1 text +:: + + Developer's Certificate of Origin 1.1 + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +:: + +Then, when it comes time to submit a contribution, include the +following text in your contribution commit message: + +:: + + Signed-off-by: Joan Doe + +:: + + +This message can be entered manually, or if you have configured git +with the correct `user.name` and `user.email`, you can use the `-s` +option to `git commit` to automatically include the signoff message. diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/LICENSE b/Linux/ansible-lockdown/POSTGRES-12-CIS/LICENSE new file mode 100644 index 0000000..0d0b836 --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Ansible Lockdown + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/README.md b/Linux/ansible-lockdown/POSTGRES-12-CIS/README.md new file mode 100644 index 0000000..f71abc0 --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/README.md @@ -0,0 +1,62 @@ +PostgresSQL 12 CIS +========= +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/POSTGRES-12-cis/CommunityToDevel?label=Devel%20Build%20Status&style=plastic) +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/POSTGRES-12-cis/DevelToMain?label=Main%20Build%20Status&style=plastic) +![Release](https://img.shields.io/github/v/release/ansible-lockdown/POSTGRES-12-CIS?style=plastic) + +Configure PostgreSQL 12 machine to be [CIS](https://www.cisecurity.org/cis-benchmarks/) compliant + +Based on [CIS PostgreSQL 12 Benchmark v1.0.0 - November 19, 2019 ](https://www.cisecurity.org/cis-benchmarks/) + +Caution(s) +------- +This role **will make changes to the system** which may have unintended concequences. This is not an auditing tool but rather a remediation tool to be used after an audit has been conducted. + +This role was developed against a clean install of the Operating System and PosgreSQL 12 database engine. If you are implimenting to an existing system please review this role for any site specific changes that are needed. + +To use release version please point to main branch + +Documentation +------------- +[Getting Started](https://www.lockdownenterprise.com/docs/getting-started-with-lockdown)
+[Customizing Roles](https://www.lockdownenterprise.com/docs/customizing-lockdown-enterprise)
+[Per-Host Configuration](https://www.lockdownenterprise.com/docs/per-host-lockdown-enterprise-configuration)
+[Getting the Most Out of the Role](https://www.lockdownenterprise.com/docs/get-the-most-out-of-lockdown-enterprise)
+[Wiki](https://github.com/ansible-lockdown/POSTGRES-12-CIS/wiki)
+[Repo GitHub Page](https://ansible-lockdown.github.io/POSTGRES-12-CIS/)
+ +Requirements +------------ +**General:** +- Basic knowledge of Ansible, below are some links to the Ansible documentation to help get started if you are unfamiliar with Ansible + - [Main Ansible documentation page](https://docs.ansible.com) + - [Ansible Getting Started](https://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html) + - [Tower User Guide](https://docs.ansible.com/ansible-tower/latest/html/userguide/index.html) + - [Ansible Community Info](https://docs.ansible.com/ansible/latest/community/index.html) +- Functioning Ansible and/or Tower Installed, configured, and running. This includes all of the base Ansible/Tower configurations, needed packages installed, and infrastructure setup. +- Please read through the tasks in this role to gain an understanding of what each control is doing. Some of the tasks are disruptive and can have unintended consiquences in a live production system. Also familiarize yourself with the variables in the defaults/main.yml file or the [Main Variables Wiki Page](https://github.com/ansible-lockdown/POSTGRES-12-cis/wiki/Main-Variables). + +**Technical Dependencies:** +- Running Ansible/Tower setup (this role is tested against Ansible version 2.9.1 and newer) +- Python3 Ansible run environment + +Role Variables +-------------- + +This role is designed that the end user should not have to edit the tasks themselves. All customizing should be done via the defaults/main.yml file or with extra vars within the project, job, workflow, etc. These variables can be found [here](https://github.com/ansible-lockdown/POSTGRES-12-CIS/wiki/Main-Variables) in the Main Variables Wiki page. All variables are listed there along with descriptions. + +Branches +------- +**devel** - This is the default branch and the working development branch. Community pull requests will pull into this branch
+**main** - This is the release branch
+**reports** - This is a protected branch for our scoring reports, no code should ever go here
+**all other branches** - Individual community member branches
+ +Community Contribution +---------------------- + +We encourage you (the community) to contribute to this role. Please read the rules below. +- Your work is done in your own individual branch. Make sure to Signed-off and GPG sign all commits you intend to merge. +- All community Pull Requests are pulled into the devel branch +- Pull Requests into devel will confirm your commits have a GPG signature, Signed-off, and a functional test before being approved +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/defaults/main.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/defaults/main.yml new file mode 100644 index 0000000..45e1ca4 --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/defaults/main.yml @@ -0,0 +1,284 @@ +--- +# Adjusts role to run in a non-privileged container +pgs12cis_system_is_container: false + +# pgs12cis_install_postgresql this is a toggle to have this role do a generic install of PostgreSQL +# it is recommended that you perform the install and setup on your own and then run this role +pgs12cis_install_postgresql: false + +# Enable/Disable full sections toggle +# Set to true will run that section, set to false will skip section +pgs12cis_section1: true +pgs12cis_section2: true +pgs12cis_section3: true +pgs12cis_section4: true +pgs12cis_section5: true +pgs12cis_section6: true +pgs12cis_section7: true +pgs12cis_section8: true + +# Section 1 +# Installation and Patches +pgs12cis_rule_1_1: true +pgs12cis_rule_1_2: true +pgs12cis_rule_1_3: true +pgs12cis_rule_1_4: true +pgs12cis_rule_1_5: true + +# Section 2 +# Directory and File Permissions +pgs12cis_rule_2_1: true +pgs12cis_rule_2_2: true + +# Section 3 +# Logging Monitoring and Auditing +pgs12cis_rule_3_1_2: true +pgs12cis_rule_3_1_3: true +pgs12cis_rule_3_1_4: true +pgs12cis_rule_3_1_5: true +pgs12cis_rule_3_1_6: true +pgs12cis_rule_3_1_7: true +pgs12cis_rule_3_1_8: true +pgs12cis_rule_3_1_9: true +pgs12cis_rule_3_1_10: true +pgs12cis_rule_3_1_11: true +pgs12cis_rule_3_1_12: true +pgs12cis_rule_3_1_13: true +pgs12cis_rule_3_1_14: true +pgs12cis_rule_3_1_15: true +pgs12cis_rule_3_1_16: true +pgs12cis_rule_3_1_17: true +pgs12cis_rule_3_1_18: true +pgs12cis_rule_3_1_19: true +pgs12cis_rule_3_1_20: true +pgs12cis_rule_3_1_21: true +pgs12cis_rule_3_1_22: true +pgs12cis_rule_3_1_23: true +pgs12cis_rule_3_1_24: true +pgs12cis_rule_3_2: true + +# Section 4 +# User Access and Authorization +pgs12cis_rule_4_1: true +pgs12cis_rule_4_2: true +pgs12cis_rule_4_3: true +pgs12cis_rule_4_4: true +pgs12cis_rule_4_5: true +pgs12cis_rule_4_6: true +pgs12cis_rule_4_7: true +pgs12cis_rule_4_8: true + +# Section 5 +# Connection and Login +pgs12cis_rule_5_1: true +pgs12cis_rule_5_2: true + +# Section 6 +# PostgreSQL Settings +pgs12cis_rule_6_1: true +pgs12cis_rule_6_2: true +pgs12cis_rule_6_3: true +pgs12cis_rule_6_4: true +pgs12cis_rule_6_5: true +pgs12cis_rule_6_6: true +pgs12cis_rule_6_7: true +pgs12cis_rule_6_8: true +pgs12cis_rule_6_9: true + +# Section 7 +# Replication +pgs12cis_rule_7_1: true +pgs12cis_rule_7_2: true +pgs12cis_rule_7_3: true +pgs12cis_rule_7_4: true + +# Section 8 +# Special Configuration Considerations +pgs12cis_rule_8_1: true +pgs12cis_rule_8_2: true +pgs12cis_rule_8_3: true +pgs12cis_rule_8_4: true + +# Role vars - Global +# pgs12cis_pgs12_usr is the configured postgre user +pgs12cis_pgs12_usr: postgres +# pgs12cis_postgres_user_pw is the password for the postgres user. This is only used in the prelim task to install PostgreSQL 12 +# DO NOT USE PASSWORDS IN PLAIN TEXT +# If you are using this role to initially install PostgreSQL 12 it is recommended to use a pw tool like vault +pgs12cis_postgres_user_pw: letmein + +# pgs12cis_pgs12_usr_home_dir is the home directory of the pgs12cis_pgs12_usr user directory +pgs12cis_pgs12_usr_home_dir: "{{ pgs12cis_pgs12_usr_home_dir_get.stdout }}" + +# pgs12cis_config_file_get is the location of the postgresql.config file +# This is gathered from a prelim task +pgs12cis_config_file: "{{ pgs12cis_config_file_get.stdout }}" +# pgs12cis_hba_config_file is the location of the pg_hba.conf file +# This is gathered from a prelim task +pgs12cis_hba_config_file: "{{ pgs12cis_hba_config_file_get.stdout }}" + +# Vars for enabling FIPS 140-2, control(s) 6.7 +pgs12cis_boot_part: "{{ pgs12cis_prelim_boot_part.stdout }}" +pgs12cis_machine_uses_uefi: "{{ pgs12cis_prelim_sys_firmware_efi.stat.exists }}" +pgs12cis_grub_cfg_path: "{{ pgs12cis_machine_uses_uefi | ternary('/boot/efi/EFI/' ~ (ansible_distribution | lower) ~ '/grub.cfg', '/boot/grub2/grub.cfg') }}" + +# pgs12cis_postgresql_service is the name of the PostgreSQL service. +# The name postgresql-12 is a general default for RHEL based systems and posgresql for Ubuntu based systems +# but this allows for customization of service name if needed. There is a prelim task to set this automatically to the default name +pgs12cis_postgresql_service: "{{ pgs12cis_postgresql_service_auto }}" + +# Role vars - Sections +# Section 1 +# 1.5 +# pgs12cis_db_data_dir_ub is the data directory for the database cluster. This is for Ubuntu only +# If you have this role installing PostgreSQL this will be used for the initdb actions in that configuration +pgs12cis_db_data_dir_ub: /usr/local/pgsql/data + +# Section 2 +# 2.1 +# pgs12cis_umask_value is the value for the PostgreSQL users umask settings +# The value must be 077 or more restrictive +pgs12cis_umask_value: "077" + +# Section 3 +# 3.1.2 +# pgs12cis_log_destination is the log destination configured for PostgreSQL +# PostgreSQL supports several methods for logging server messages, including stderr, csvlog and syslog +# One of these destinations should be et for server log output +pgs12cis_log_destination: syslog + +# 3.1.4 +# pgs12cis_log_directory is the directory logs are written to +# The default value is log, which is relative to the clusters data directory. +# To use absolute paths start the value with a /, /var/log/pg_log for example +pgs12cis_log_directory: log + +# 3.1.5 +# pgs12cis_log_filename is the file name pattern for log files +# The value is treated as a strftime pattern +pgs12cis_log_filename: postgresql-%a.log + +# 3.1.6 +# pgs12cis_log_file_mode is the permissions set to the log files when created +# The permissions should be set to allow only the necessary access to authorized personnel. +# In most cases 600 is the best setting, but 640 is another common one as well +pgs12cis_log_file_mode: 600 + +# 3.1.8 +# pgs12cis_log_file_age is the duration to keep a log file +# Please set this to an acceptable value +pgs12cis_log_file_age: 1d + +# 3.1.9 +# pgs12cis_log_rotation_size is the max size of an individual log file. Once this size is reached the automatic log file rotation will occur +# To conform to CIS standards this value needs to be larger than zero +pgs12cis_log_rotation_size: 1GB + +# 3.1.10 +# pgs12cis_syslog_facility is the syslog "facility" to be used when logging to syslog is enabled. +# Possible values are LOCAL0, LOCAL1, through LOCAL7 +pgs12cis_syslog_facility: LOCAL1 + +# 3.1.11 +# pgs12cis_syslog_ident is the program name used to identify PostgreSQL messages in rsyslog logs +pgs12cis_syslog_ident: postgres + +# 3.1.12 +# pgs12cis_log_min_messages specifies the warning levels that are written to the server log. +# Options are DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1, INFO, NOTICE, WARNING, ERROR, LOG, FATAL, PANIC +# The list above is in order from most chatty (DEBUG5) to practically mute (PANIC). The level WARNING is considered best practice unless indicated otherwise. +pgs12cis_log_min_messages: WARNING + +# 3.1.13 +# pgs12cis_log_min_error_statement is the value the rate at which all SQL statements generating errors will be logged at +# Options are DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1, INFO, NOTICE, WARNING, ERROR, LOG, FATAL, PANIC +# The list above are in order from most chatty (DEBUG5) to practically mute (PANIC). The level ERROR is considered best practice unless indicated otherwise. +pgs12cis_log_min_error_statement: ERROR + +# 3.1.20 +# pgs12cis_log_error_verbosity sets the verbosity (amount of detail) of logged messages +# Options are TERSE, DEFAULT, and VERBOSE. +pgs12cis_log_error_verbosity: DEFAULT + +# 3.1.21 +# pgs12cis_log_hostname enables/disables logging of connecting host names in logs in addition to the IP +# Enabling this could cause non-negligible performance impacts. It is suggested to leave off, but have given the option to enable if needed +pgs12cis_log_hostname: off + +# 3.1.22 +# pgs12cis_log_line_prefix is the printf style string that is prefixed to each log line +# for this var you must use the single ticks before and after the values +# Example pgs12cis_log_line_prefix: '%m [%p]: [%l-1] db=%d,user=%u,app=%a,client=%h ' +pgs12cis_log_line_prefix: '%m [%p]: [%l-1] db=%d,user=%u,app=%a,client=%h ' + +# 3.1.23 +# pgs12cis_log_statement specifies the types of SQL statements that are logged +# The values are none, ddl, mod, and all. Value none logs no SQL statements and all logs all SQL statements +# To conform to CIS standards this must NOT be set to none +pgs12cis_log_statement: ddl + +# 3.1.24 +# pgs12cis_log_timezone is the timezone to use in timestamps within log messages +# This value needs to be set to GMT, UTC, or the timezone defined by your organization's logging policy +pgs12cis_log_timezone: GMT + +# 3.2 +# pgs12cis_shared_preload_libraries are the shared pre-load libraries +# The value for this variable must list pgaudit, but can have more items and will be comma separated list with no space between items +# Example for multiple items pgs12cis_shared_preload_libraries: pgaudit,somethingelse +pgs12cis_shared_preload_libraries: pgaudit +# pgs12cis_pgaudit_log_types are the pgaudit log types that will be logged +# The options are READ, WRITE, FUNCTION, ROLE, DDL, and MISC. You can also have multiple types listed and separated by a comma with no spaces +# Example pgs12cis_pgaudit_log_types: ddl,write +pgs12cis_pgaudit_log_types: ddl,write + +# Section 4 +# 4.2 +# pgs12cis_allowed_superusers is the list of authorized super users. These are users that should have SUPERUSER, CREATEROLE, CREATEDB, REPLICATION, and/or BYPASSRLS roles +# Please add additional users in list form starting with a dash, as the first user currently is +pgs12cis_allowed_superusers: + - "{{ pgs12cis_pgs12_usr }}" + +# Section 5 +# 5.2 +# pgs12cis_encrypt_method is the encryption method used for allowing ssl communication with the pgs12cis_pgs12_usr configured user +# md5, scram-sha-256, gss, sspi, pam, ldap, radius, and cert are allowed. Trust, password, and ident methods are NOT allowed +pgs12cis_encrypt_method: scram-sha-256 + +# Section 6 +# 6.8 +# pgs12cis_create_selfsigned_cert turns on a prelim task to generate a self-signed certificate +# The prelim task uses these parameters (which matches defaults 'openssl req -new -x509 -days 365 -nodes -text -out {{pgs12cis_data_folder }}/server.crt -keyout {{ pgs12cis_data_folder }}/server.key -subj "/CN={{ ansible_nodename }}"' +# If you have your own certificates (self-signed is not best) set this value to false and use the variables below to specify those certificates +pgs12cis_create_selfsigned_cert: true +# pgs12cis_ssl_ciphers are the ciphers to use with ssl. Default config value HIGH:MEDIUM:+3DES:!aNULL +pgs12cis_ssl_ciphers: HIGH:MEDIUM:+3DES:!aNULL +# pgs12cis_ssl_cert_file is the certificate file to use with ssl. Default config value (file used) server.crt +pgs12cis_ssl_cert_file: server.crt +# pgs12cis_ssl_key_file the key file. Default config value (key file used) server.key +pgs12cis_ssl_key_file: server.key +# pgs12cis_password_encryption the encryption method used. Default config value +pgs12cis_password_encryption: scram-sha-256 + +# Section 7 +# 7.1 +# pgs12cis_replication_user a user to be given replication rights +# Disable this task if you already have your users configured as desired +pgs12cis_replication_user: replication_user +# pgs12cis_replication_user_pw is the pw for the created replication user +# DO NOT USE PLAIN TEXT PASSWORDS +# It is recommended to use a vaulted pw here, the default pw here is just for testing/example purposes +pgs12cis_replication_user_pw: test1 +# pgs12cis_replication_user_enc_method is the encryption method for the replication user in the pg_hba.conf file +pgs12cis_replication_user_enc_method: md5 + +# 7.2 +# pgs12cis_primary_db_server is the primary DB server that a standby server will use +# pgs12cis_primary_db_server can be either the IP or hostname of the primary db server +pgs12cis_primary_db_server: test +# pgs12cis_is_standby_server is the toggle to set the host as standby server. The control 7.2 is to only be run on a standby server +pgs12cis_is_standby_server: true + +# 7.3 +# pgs12cis_archive_command is the value for the archive_command feature in postgresql.conf +pgs12cis_archive_command: rsync -e ssh -a %p postgres@remotehost:/var/lib/pgsql/WAL/%f \ No newline at end of file diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/handlers/main.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/handlers/main.yml new file mode 100644 index 0000000..1982dfc --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/handlers/main.yml @@ -0,0 +1,18 @@ +--- +- name: restart postgresql + service: + name: "{{ pgs12cis_postgresql_service }}" + state: restarted + +- name: rebuild initramfs + command: dracut -f + +- name: make grub2 config + command: /usr/sbin/grub2-mkconfig --output={{ pgs12cis_grub_cfg_path }} + when: + - not pgs12cis_system_is_container + +- name: reboot system + shell: sleep 3; reboot + async: 15 + poll: 0 \ No newline at end of file diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/meta/main.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/meta/main.yml new file mode 100644 index 0000000..d02ea8e --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/meta/main.yml @@ -0,0 +1,26 @@ +galaxy_info: + author: "Sam Doran, Josh Springer, Daniel Shepherd, Bas Meijeri, James Cassell, Mike Renfro, DFed, George Nalen" + description: "Apply the CIS PostgreSQL 12 benchmarks" + company: "MindPoint Group" + license: MIT + min_ansible_version: 2.8.0 + + platforms: + - name: EL + versions: + - 7 + - 8 + - name: Ubuntu + versions: + - 16 + - 18 + - 20 + + galaxy_tags: + - system + - security + - CIS + - hardening + +dependencies: [] + \ No newline at end of file diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/site.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/site.yml new file mode 100644 index 0000000..d266373 --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/site.yml @@ -0,0 +1,6 @@ +--- +- hosts: all + become: true + + roles: + - role: "{{ playbook_dir }}" diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/cis_pgs12_redhat_fixes.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/cis_pgs12_redhat_fixes.yml new file mode 100644 index 0000000..740233b --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/cis_pgs12_redhat_fixes.yml @@ -0,0 +1,1329 @@ +--- +# Section 1 +# Installation and Patches +- name: "NOTSCORED | 1.1 | AUDIT | Ensure packages are obtained from authorized repositories" + block: + - name: "NOTSCORED | 1.1 | AUDIT | Ensure packages are obtained from authorized repositories | Get repo list RHEL8" + shell: dnf repolist all | grep enabled + changed_when: false + failed_when: false + register: pgs12cis_1_1_enabled_repos_rh8 + when: ansible_distribution_major_version == "8" + + - name: "NOTSCORED | 1.1 | AUDIT | Ensure packages are obtained from authorized repositories | Get repo list RHEL7" + shell: yum repolist all | grep enabled + changed_when: false + failed_when: false + register: pgs12cis_1_1_enabled_repos_rh7 + when: ansible_distribution_major_version == "7" + + - name: "NOTSCORED | 1.1 | AUDIT | Ensure packages are obtained from authorized repositories | Show repo list RHEL8" + debug: + msg: + - "Alert! Below are the enabled repo's. Please review to confirm these are authorized repositories" + - "{{ pgs12cis_1_1_enabled_repos_rh8.stdout_lines }}" + when: ansible_distribution_major_version == "8" + + - name: "NOTSCORED | 1.1 | AUDIT | Ensure packages are obtained from authorized repositories | Show repo list RHEL8" + debug: + msg: + - "Alert! Below are the enabled repo's. Please review to confirm these are authorized repositories" + - "{{ pgs12cis_1_1_enabled_repos_rh7.stdout_lines }}" + when: ansible_distribution_major_version == "7" + when: + - pgs12cis_rule_1_1 + - pgs12cis_section1 + tags: + - level1-postgresql + - notscored + - audit + - rhel + - rule_1.1 + +- name: "NOTSCORED | 1.2 | AUDIT | Ensure Installation of Binary Packages" + block: + - name: "NOTSCORED | 1.2 | AUDIT | Ensure Installation of Binary Packages | Get installed packages and repo RHEL8" + shell: dnf info $(rpm -qa|grep postgres) | egrep '^Name|^Version|^From' + changed_when: false + failed_when: false + register: pgs12cis_1_2_installed_pckg_rpm_rh8 + when: ansible_distribution_major_version == "8" + + - name: "NOTSCORED | 1.2 | AUDIT | Ensure Installation of Binary Packages | Get installed packages and repo RHEL7" + shell: yum info $(rpm -qa|grep postgres) | egrep '^Name|^Version|^From' + changed_when: false + failed_when: false + register: pgs12cis_1_2_installed_pckg_rpm_rh7 + when: ansible_distribution_major_version == "7" + + - name: "NOTSCORED | 1.2 | AUDIT | Ensure Installation of Binary Packages | Show installed packages and repo RHEL8" + debug: + msg: + - "Alert! Below are the installed postgres packages and where they came from" + - "Please review and if the expected binary packages are not installed or did not come from an appropriate repo, this is a fail" + - "{{ pgs12cis_1_2_installed_pckg_rpm_rh8.stdout_lines }}" + when: ansible_distribution_major_version == "8" + + - name: "NOTSCORED | 1.2 | AUDIT | Ensure Installation of Binary Packages | Show installed packages and repo RHEL7" + debug: + msg: + - "Alert! Below are the installed postgres packages and where they came from" + - "Please review and if the expected binary packages are not installed or did not come from an appropriate repo, this is a fail" + - "{{ pgs12cis_1_2_installed_pckg_rpm_rh7.stdout_lines }}" + when: ansible_distribution_major_version == "7" + when: + - pgs12cis_rule_1_2 + - pgs12cis_section1 + tags: + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_1.2 + +- name: "NOTSCORED | 1.3 | AUDIT | Ensure Installation of Community Packages" + block: + - name: "NOTSCORED | 1.3 | AUDIT | Ensure Installation of Community Packages | Get installed packages and repo RHEL8" + shell: dnf info $(rpm -qa|grep postgres) | egrep '^Name|^Version|^From' + changed_when: false + failed_when: false + register: pgs12cis_1_3_installed_pckg_rpm_rh8 + when: ansible_distribution_major_version == "8" + + - name: "NOTSCORED | 1.3 | AUDIT | Ensure Installation of Community Packages | Get installed packages and repo RHEL7" + shell: yum info $(rpm -qa|grep postgres) | egrep '^Name|^Version|^From' + changed_when: false + failed_when: false + register: pgs12cis_1_3_installed_pckg_rpm_rh7 + when: ansible_distribution_major_version == "7" + + - name: "NOTSCORED | 1.3 | AUDIT | Ensure Installation of Community Packages | Show installed packages and repo RHEL8" + debug: + msg: + - "Alert! Below are the installed postgres packages and where they came from" + - "Please review and if the expected community packages are not installed or did not come from the PGDG repo, this is a fail" + - "{{ pgs12cis_1_3_installed_pckg_rpm_rh8.stdout_lines }}" + when: ansible_distribution_major_version == "8" + + - name: "NOTSCORED | 1.3 | AUDIT | Ensure Installation of Community Packages | Show installed packages and repo RHEL7" + debug: + msg: + - "Alert! Below are the installed postgres packages and where they came from" + - "Please review and if the expected community packages are not installed or did not come from an PGDG repo, this is a fail" + - "{{ pgs12cis_1_3_installed_pckg_rpm_rh7.stdout_lines }}" + when: ansible_distribution_major_version == "7" + when: + - pgs12cis_rule_1_3 + - pgs12cis_section1 + tags: + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_1.3 + +- name: "SCORED | 1.4 | PATCH | Ensure systemd Service Files Are Enabled" + service: + name: "{{ pgs12cis_postgresql_service }}" + enabled: true + when: + - pgs12cis_rule_1_4 + - pgs12cis_section1 + tags: + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_1.4 + +- name: "SCORED | 1.5 | PATCH | Ensure Data Cluster Initialized Successfully" + shell: 'PGSETUP_INITDB_OPTIONS="-k" /usr/pgsql-12/bin/postgresql-12-setup initdb' + changed_when: '"OK" in pgs12cis_1_5_db_initialize.stdout' + failed_when: false + register: pgs12cis_1_5_db_initialize + when: + - pgs12cis_rule_1_5 + - pgs12cis_section1 + tags: + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_1.5 + +# Section 2 +# Directory and File Permissions +- name: "SCORED | 2.1 | PATCH | Ensure the file permissions mask is correct" + block: + - name: "SCORED | 2.1 | AUDIT | Ensure the file permissions mask is correct | Get user environment file (.bashrc, .bash_profile, or .profile}" + # shell: ls -a /var/lib/pgsql/ | grep -e '.bashrc\|.bash_profile\|.profile' + shell: ls -a "{{ pgs12cis_pgs12_usr_home_dir }}" | grep -e '.bashrc\|.bash_profile\|.profile' + changed_when: false + failed_when: false + register: pgs12cis_2_1_env_file + + - name: "SCORED | 2.1 | PATCH | Ensure the file permissions mask is correct | Set umask" + lineinfile: + path: "{{ pgs12cis_pgs12_usr_home_dir }}/{{ pgs12cis_2_1_env_file.stdout }}" + regexp: '^umask|^UMASK' + line: "umask 0{{ pgs12cis_umask_value }}" + when: + - pgs12cis_rule_2_1 + - pgs12cis_section2 + tags: + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_2.1 + +- name: "SCORED | 2.2 | PATCH | Ensure the PostgreSQL pg_wheel group membership is correct" + block: + - name: "SCORED | 2.2 | PATCH | Ensure the PostgreSQL pg_wheel group membership is correct | Create pg_wheel group" + group: + name: pg_wheel + state: present + + - name: "SCORED | 2.2 | PATCH | Ensure the PostgreSQL pg_wheel group membership is correct" + user: + name: "{{ pgs12cis_pgs12_usr }}" + groups: pg_wheel + when: + - pgs12cis_rule_2_2 + - pgs12cis_section2 + tags: + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_2.2 + +# Section 3 +# # Logging Monitoring and Auditing +- name: "SCORED | 3.1.2 | PATCH | Ensure the log destinations are set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_destination =|^#log_destination =' + line: "log_destination = '{{ pgs12cis_log_destination }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_2 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.2 + +- name: "SCORED | 3.1.3 | PATCH | Ensure the logging collector is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^logging_collector =|^#logging_collector =' + line: "logging_collector = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_3 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.3 + +- name: "SCORED | 3.1.4 | PATCH | Ensure the log file destination directory is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_directory =|^#log_directory =' + line: "log_directory = '{{ pgs12cis_log_directory }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_4 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.4 + +- name: "SCORED | 3.1.5 | PATCH | Ensure the filename pattern for log files is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_filename =|^#log_filename =' + line: "log_filename = '{{ pgs12cis_log_filename }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_5 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.4 + +- name: "SCORED | 3.1.6 | PATCH | Ensure the log file permissions are set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_truncate_on_rotation =|^#log_truncate_on_rotation =' + line: "log_truncate_on_rotation = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_6 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.6 + +- name: "SCORED | 3.1.7 | PATCH | Ensure 'log_truncate_on_rotation' is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_file_mode =|^#log_file_mode =' + line: "log_file_mode = 0{{ pgs12cis_log_file_mode }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_7 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.7 + +- name: "SCORED | 3.1.8 | PATCH | Ensure the maximum log file lifetime is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_rotation_age =|^#log_rotation_age =' + line: "log_rotation_age = {{ pgs12cis_log_file_age }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_8 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.8 + +- name: "SCORED | 3.1.9 | PATCH | Ensure the maximum log file size is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_rotation_size =|^#log_rotation_size =' + line: "log_rotation_size = {{ pgs12cis_log_rotation_size }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_9 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.9 + +- name: "SCORED | 3.1.10 | PATCH | Ensure the correct syslog facility is selected" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^syslog_facility =|^#syslog_facility =' + line: "syslog_facility = '{{ pgs12cis_syslog_facility }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_10 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.10 + +- name: "SCORED | 3.1.11 | PATCH | Ensure the program name for PostgreSQL syslog messages is correct" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^syslog_ident =|^#syslog_ident =' + line: "syslog_ident = '{{ pgs12cis_syslog_ident }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_11 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.11 + +- name: "NOTSCORED | 3.1.12 | PATCH | Ensure the correct messages are written to the server log" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_min_messages =|^#log_min_messages =' + line: "log_min_messages = {{ pgs12cis_log_min_messages }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_12 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - rhel + - rule_3.1.12 + +- name: "NOTSCORED | 3.1.13 | PATCH | Ensure the correct SQL statements generating errors are recorded" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_min_error_statement =|^#log_min_error_statement =' + line: "log_min_error_statement = {{ pgs12cis_log_min_error_statement }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_13 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - rhel + - rule_3.1.13 + +- name: "SCORED | 3.1.14 | PATCH | Ensure 'debug_print_parse' is disabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^debug_print_parse =|^#debug_print_parse =' + line: "debug_print_parse = off" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_14 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.14 + +- name: "SCORED | 3.1.15 | PATCH | Ensure 'debug_print_rewritten' is disabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^debug_print_rewritten =|^#debug_print_rewritten =' + line: "debug_print_rewritten = off" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_15 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.15 + +- name: "SCORED | 3.1.16 | PATCH | Ensure 'debug_print_plan' is disabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^debug_print_plan =|^#debug_print_plan =' + line: "debug_print_plan = off" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_16 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.16 + +- name: "SCORED | 3.1.17 | PATCH | Ensure 'debug_pretty_print' is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^debug_pretty_print =|^#debug_pretty_print =' + line: "debug_pretty_print = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_17 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.17 + +- name: "SCORED | 3.1.18 | PATCH | Ensure 'log_connections' is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_connections =|^#log_connections =' + line: "log_connections = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_18 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.18 + +- name: "SCORED | 3.1.19 | PATCH | Ensure 'log_disconnections' is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_disconnections =|^#log_disconnections =' + line: "log_disconnections = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_19 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.19 + +- name: "SCORED | 3.1.20 | PATCH | Ensure 'log_error_verbosity' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_error_verbosity =|^#log_error_verbosity =' + line: "log_error_verbosity = {{ pgs12cis_log_error_verbosity }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_20 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.20 + +- name: "SCORED | 3.1.21 | PATCH | Ensure 'log_hostname' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_hostname =|^#log_hostname =' + line: "log_hostname = {{ pgs12cis_log_hostname }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_21 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.21 + +- name: "NOTSCORED | 3.1.22 | PATCH | Ensure 'log_line_prefix' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_line_prefix =|^#log_line_prefix =' + line: "log_line_prefix = '{{ pgs12cis_log_line_prefix }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_22 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - rhel + - rule_3.1.22 + +- name: "SCORED | 3.1.23 | PATCH | Ensure 'log_statement' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_statement =|^#log_statement =' + line: "log_statement = '{{ pgs12cis_log_statement }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_23 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.23 + +- name: "SCORED | 3.1.24 | PATCH | Ensure 'log_timezone' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_timezone =|^#log_timezone =' + line: "log_timezone = '{{ pgs12cis_log_timezone }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_24 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.1.24 + +- name: "SCORED | 3.2 | PATCH | Ensure the PostgreSQL Audit Extension (pgAudit) is enabled" + block: + - name: "SCORED | 3.2 | PATCH | Ensure the PostgreSQL Audit Extension (pgAudit) is enabled | Install pgAudit RHEL8" + dnf: + name: pgaudit14_12 + state: present + when: ansible_distribution_major_version == "8" + + - name: "SCORED | 3.2 | PATCH | Ensure the PostgreSQL Audit Extension (pgAudit) is enabled | Install pgAudit RHEL7" + yum: + name: pgaudit14_12 + state: present + when: ansible_distribution_major_version == "7" + + - name: "SCORED | 3.2 | PATCH | Ensure the PostgreSQL Audit Extension (pgAudit) is enabled | Set pgAudit settings in postgresql.conf" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + notify: restart postgresql + with_items: + - { regexp: '^shared_preload_libraries =|^#shared_preload_libraries =', line: "shared_preload_libraries = '{{ pgs12cis_shared_preload_libraries }}'", insertafter: EOF } + - { regexp: '^# for this example we are logging the ddl and write operations', line: '# for this example we are logging the ddl and write operations', insertafter: EOF } + - { regexp: '^pgaudit.log=', line: "pgaudit.log='{{ pgs12cis_pgaudit_log_types }}'", insertafter: '# for this example we are logging the ddl and write operations'} + when: + - pgs12cis_rule_3_2 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_3.2 + +# Section 4 +# User Access and Authorization +- name: "SCORED | 4.1 | PATCH | Ensure sudo is configured correctly" + block: + - name: "SCORED | 4.1 | PATCH | Ensure sudo is configured correctly | Configure pg_wheel to sudoers" + lineinfile: + path: /etc/sudoers.d/postgres + regexp: '^%pg_wheel ALL=' + line: '%pg_wheel ALL= /bin/su - {{ pgs12cis_pgs12_usr }}' + create: yes + + - name: "SCORED | 4.1 | PATCH | Ensure sudo is configured correctly | Set file permissions" + file: + path: /etc/sudoers.d/postgres + mode: 0600 + when: + - pgs12cis_rule_4_1 + - pgs12cis_section4 + tags: + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_4.1 + +- name: "SCORED | 4.2 | PATCH | Ensure excessive administrative privileges are revoked" + block: + - name: "SCORED | 4.2 | AUDIT | Ensure excessive administrative privileges are revoked | Get list of PostgreSQL users" + shell: psql -t -c "\du" | awk '{print $1}' | sed -r '/^\s*$/d' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_2_all_postgresql_users + + - set_fact: + pgs12cis_disallowed_superusers: "{{ pgs12cis_4_2_all_postgresql_users.stdout_lines | difference(pgs12cis_allowed_superusers) }}" + + - name: "SCORED | 4.2 | PATCH | Ensure excessive administrative privileges are revoked | Remove excessive privileges from users" + command: psql -c 'alter user {{ item }} with NOSUPERUSER NOCREATEROLE NOCREATEDB NOREPLICATION NOBYPASSRLS ;' + become_user: "{{ pgs12cis_pgs12_usr }}" + with_items: + - "{{ pgs12cis_disallowed_superusers }}" + when: + - pgs12cis_rule_4_2 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - patch + - rhel + - rule_4.2 + +- name: "SCORED | 4.3 | AUDIT | Ensure excessive function privileges are revoked" + block: + - name: "SCORED | 4.3 | AUDIT | Ensure excessive function privileges are revoked" + command: psql -c "SELECT nspname, proname, proargtypes, prosecdef, rolname, proconfig FROM pg_proc p JOIN pg_namespace n ON p.pronamespace = n.oid JOIN pg_authid a ON a.oid = p.proowner WHERE prosecdef OR NOT proconfig IS NULL;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_3_security_definer + + - name: "SCORED | 4.3 | AUDIT | Ensure excessive function privileges are revoked" + debug: + msg: + - "Caution! Please review the list below and confirm all results should have Security Definer status" + - "When possible revoke Security Definer, Security Invoker is another option that is valid to use" + - "{{ pgs12cis_4_3_security_definer.stdout_lines }}" + when: + - pgs12cis_rule_4_3 + - pgs12cis_section4 + tags: + - level1-postgresqlonlinux + - scored + - audit + - rhel + - rule_4.3 + +- name: "SCORED | 4.4 | AUDIT | Ensure excessive DML privileges are revoked" + debug: + msg: + - "Alert! Please review all users and databases to determine no users have excessive DML privileges" + - "If a user has excessive DML privileges that user can change or delete ifnormation without proper authorization and this is a finding" + when: + - pgs12cis_rule_4_4 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - rhel + - rule_4.4 + +- name: "NOTSCORED | 4.5 | AUDIT | Use pg_permission extension to audit object permissions" + block: + - name: "NOTSCORED | 4.5 | AUDIT | Use pg_permission extension to audit object permissions | Get pg_permission status" + command: psql -c "select t.schemaname, t.tablename, u.usename, has_table_privilege(u.usename, t.tablename, 'select') as select, has_table_privilege(u.usename, t.tablename, 'insert') as insert, has_table_privilege(u.usename, t.tablename, 'update') as update, has_table_privilege(u.usename, t.tablename, 'delete') as delete from pg_tables t, pg_user u where t.schemaname not in ('information_schema','pg_catalog');" | grep rows | tr -d '()' + changed_when: false + failed_when: false + register: pgs12cis_4_5_pg_permission_status + + - name: "NOTSCORED | 4.5 | AUDIT | Use pg_permission extension to audit object permissions | Alert on not available" + debug: + msg: + - "Alert! You do not have the pg_permission module installed and this this is a finding" + - "Please review and install the pg_permission module" + when: "'0 rows' in pgs12cis_4_5_pg_permission_status.stdout" + + - name: "NOTSCORED | 4.5 | AUDIT | Use pg_permission extension to audit object permissions | Alert on available" + debug: + msg: "Good News! You have the module available for use. Please make sure it configured correctly" + when: "'0 rows' not in pgs12cis_4_5_pg_permission_status.stdout" + when: + - pgs12cis_rule_4_5 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - rhel + - rule_4.5 + +- name: "NOTSCORED | 4.6 | AUDIT | Ensure Row Level Security (RLS) is configured correctly" + block: + - name: "NOTSCORED | 4.6 | AUDIT | Ensure Row Level Security (RLS) is configured correctly | Get databases using RLS" + command: psql -c 'SELECT oid, relname, relrowsecurity FROM pg_class WHERE relrowsecurity IS TRUE;' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_6_rls_databases + + - name: "NOTSCORED | 4.6 | AUDIT | Ensure Row Level Security (RLS) is configured correctly | Message out" + debug: + msg: + - "Alert! You need to confirm RLS is configured correctly" + - "Below are the tables that are using RLS" + - "{{ pgs12cis_4_6_rls_databases.stdout_lines }}" + when: + - pgs12cis_rule_4_6 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - rhel + - rule_4.6 + +- name: "NOTSCORED | 4.7 | AUDIT | Ensure the set_user extension is installed" + block: + - name: "NOTSCORED | 4.7 | AUDIT | Ensure the set_user extension is installed | Get set_user status" + shell: psql -c "select * from pg_available_extensions where name = 'set_user';" | grep rows | tr -d '()' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_7_set_user_status + + - name: "NOTSCORED | 4.7 | AUDIT | Ensure the set_user extension is installed | Alert on not available" + debug: + msg: + - "Alert! You do not have the set_user extention installed and this is a finding" + - "Please review and install the set_user extention then configure as needed" + when: "'0 rows' in pgs12cis_4_7_set_user_status.stdout" + + - name: "NOTSCORED | 4.7 | AUDIT | Ensure the set_user extension is installed | Alert on available" + debug: + msg: "Good News! You have the module available for use. Please make sure it is configured correctly" + when: "'0 rows' not in pgs12cis_4_7_set_user_status.stdout" + when: + - pgs12cis_rule_4_7 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - rhel + - rule_4.7 + +- name: "NOTSCORED | 4.8 | AUDIT | Make use of default roles" + block: + - name: "NOTSCORED | 4.8 | AUDIT | Make use of default roles | Get superusers" + command: psql -c 'select rolname from pg_roles where rolsuper is true;' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_8_superusers + + - name: "NOTSCORED | 4.8 | AUDIT | Make use of default roles | Message out" + debug: + msg: + - "Alert! Below are the superusers, please review and add any needed users to the superusers or remove any users that should not have superuser status" + - "{{ pgs12cis_4_8_superusers.stdout_lines }}" + when: + - pgs12cis_rule_4_8 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - rhel + - rule_4.8 + +# Section 5 +# Connection and Login +- name: 'NOTSCORED | 5.1 | PATCH | Ensure login via "local" UNIX Domain Socket is configured correctly' + debug: + msg: + - "Alert! Please ensure logins via local Unix Domain Socket is configured correctly" + when: + - pgs12cis_rule_5_1 + - pgs12cis_section5 + tags: + - level1-postgresqlonlinux + - scored + - audit + - rhel + - rule_5.1 + +- name: 'SCORED | 5.2 | PATCH | Ensure login via "host" TCP/IP Socket is configured correctly' + block: + - name: 'SCORED | 5.2 | PATCH | Ensure login via "host" TCP/IP Socket is configured correctly | Set listen_addresses' + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^listen_addresses =|^#listen_addresses =' + line: "listen_addresses = '*'" + notify: restart postgresql + + - name: 'SCORED | 5.2 | PATCH | Ensure login via "host" TCP/IP Socket is configured correctly | Set allow encrypted and deny unencrypted' + lineinfile: + path: "{{ pgs12cis_hba_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + notify: restart postgresql + with_items: + - { regexp: '^this is just a place holder value', line: '# The two rules below were inserted via CIS automation', insertafter: '^#.*TYPE.*DATABASE.*USER.*ADDRESS.*METHOD' } + - { regexp: '^hostnossl.*all.*{{ pgs12cis_pgs12_usr }}', line: 'hostnossl all {{ pgs12cis_pgs12_usr }} 0.0.0.0/0 reject', insertafter: '# The two rules below were inserted via CIS automation' } + - { regexp: '^hostssl.*all.*{{ pgs12cis_pgs12_usr }}', line: "hostssl all {{ pgs12cis_pgs12_usr }} 0.0.0.0/0 {{ pgs12cis_encrypt_method }}", insertafter: '# The two rules below were inserted via CIS automation' } + when: + - pgs12cis_rule_5_2 + - pgs12cis_section5 + tags: + - level1-postgresqlonlinux + - scored + - audit + - rhel + - rule_5.2 + +# Section 6 +# PostgreSQL Settings +- name: "NOTSCORED | 6.1 | AUDIT | Ensure 'Attack Vectors' Runtime Parameters are Configured" + debug: + msg: "ALERT! Please review all configuration settings. Configure PostgreSQL loggint to record all modifications and chagnes to the RDBMS." + when: + - pgs12cis_rule_6_1 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_6.1 + +- name: "SCORED | 6.2 | AUDIT | Ensure 'backend' runtime parameters are configured correctly" + block: + - name: "SCORED | 6.2 | AUDIT | Ensure 'backend' runtime parameters are configured correctly | Get backend runtime parameters" + command: psql -c "SELECT name, setting FROM pg_settings WHERE context IN ('backend','superuser-backend') ORDER BY 1;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_6_2_backend_runtime_param + + - name: "SCORED | 6.2 | AUDIT | Ensure 'backend' runtime parameters are configured correctly | Get process output" + shell: ps aux | grep -E '[p]ost' | grep -- '-[D]' + changed_when: false + failed_when: false + register: pgs12cis_6_2_process_output + + - name: "SCORED | 6.2 | AUDIT | Ensure 'backend' runtime parameters are configured correctly | Message out parameters" + debug: + msg: + - "ALert! Please review this output and compare with a previous OK output looking for any changes" + - "Also review the postgresql.conf and postgresql.auto.conf and compare them with previously archived files looking for changes" + - "{{ pgs12cis_6_2_backend_runtime_param.stdout_lines }}" + - "" + - "Please review the process output below and look for parameters that were used at server startup" + - "{{ pgs12cis_6_2_process_output.stdout_lines }}" + when: + - pgs12cis_rule_6_2 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - audit + - rhel + - rule_6.2 + +- name: "NOTSCORED | 6.3 | AUDIT | Ensure 'Postmaster' Runtime Parameters are Configured" + block: + - name: "SCORED | 6.3 | AUDIT | Ensure 'Postmaster' Runtime Parameters are Configured | Get Postmaster runtime parameters" + command: psql -c "SELECT name, setting FROM pg_settings WHERE context = 'postmaster' ORDER BY 1;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_6_3_postmaster_runtime_param + + - name: "SCORED | 6.3 | AUDIT | Ensure 'Postmaster' Runtime Parameters are Configured | Get process output" + shell: ps aux | grep -E 'postgres' | grep -- '-[D]' + changed_when: false + failed_when: false + register: pgs12cis_6_3_process_output + + - name: "SCORED | 6.2 | AUDIT | Ensure 'backend' runtime parameters are configured correctly | Message out parameters" + debug: + msg: + - "Alert! Please review this output and compare with a previous OK output looking for any changes" + - "Also review the postgresql.conf and postgresql.auto.conf and compare them with previously archived files looking for changes" + - "{{ pgs12cis_6_3_postmaster_runtime_param.stdout_lines }}" + - "" + - "Please review the process output below and look for parameters that were used at server startup" + - "{{ pgs12cis_6_3_process_output.stdout_lines }}" + when: + - pgs12cis_rule_6_3 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_6.3 + +- name: "NOTSCORED | 6.4 | AUDIT | Ensure 'SIGHUP' Runtime Parameters are Configured" + block: + - name: "NOTSCORED | 6.4 | AUDIT | Ensure 'SIGHUP' Runtime Parameters are Configured | Get sighup settings" + command: psql -c "SELECT name, setting FROM pg_settings WHERE context = 'sighup' ORDER BY 1;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_6_4_sighup_settings + + - name: "NOTSCORED | 6.4 | AUDIT | Ensure 'SIGHUP' Runtime Parameters are Configured | Message out settings" + debug: + msg: + - "Alert! Below are the current sighup settings. Please revert all values in the PostgreSQL configuration files" + - "and invoke the server to reload the configuration files." + - "{{ pgs12cis_6_4_sighup_settings.stdout_lines }}" + when: + - pgs12cis_rule_6_4 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_6.4 + +- name: "NOTSCORED | 6.5 | AUDIT | Ensure 'Superuser' Runtime Parameters are Configured" + block: + - name: "NOTSCORED | 6.5 | AUDIT | Ensure 'Superuser' Runtime Parameters are Configured | Get superuser settings" + command: psql -c "SELECT name, setting FROM pg_settings WHERE context = 'superuser' ORDER BY 1;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_6_5_superuser_settings + + - name: "NOTSCORED | 6.5 | AUDIT | Ensure 'Superuser' Runtime Parameters are Configured | Get process output" + shell: ps aux | grep -E 'postgres' | grep -- '-[D]' + changed_when: false + failed_when: false + register: pgs12cis_6_5_process_output + + - name: "NOTSCORED | 6.5 | AUDIT | Ensure 'Superuser' Runtime Parameters are Configured | Message out settings" + debug: + msg: + - "Alert! Please review this output and compare with a previous OK output looking for any changes" + - "Also review the postgresql.conf and postgresql.auto.conf and compare them with previously archived files looking for changes" + - "{{ pgs12cis_6_5_superuser_settings.stdout_lines }}" + - "" + - "Please review the process output below and look for parameters that were used at server startup" + - "{{ pgs12cis_6_5_process_output.stdout_lines }}" + when: + - pgs12cis_rule_6_5 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_6.5 + +- name: "NOTSCORED | 6.6 | AUDIT | Ensure 'User' Runtime Parameters are Configured" + block: + - name: "NOTSCORED | 6.6 | AUDIT | Ensure 'User' Runtime Parameters are Configured | Get user settings" + command: psql -c "SELECT name, setting FROM pg_settings WHERE context = 'user' ORDER BY 1;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_6_6_user_settings + + - name: "NOTSCORED | 6.6 | AUDIT | Ensure 'User' Runtime Parameters are Configured | Message out settings" + debug: + msg: + - "Alert! In the matter of a user session, the login sessions must be validated that it is not executing undesired parameter changes." + - "In the matter of attributes that have been changed in entities, they must be manually reverted to its default value(s)." + - "Below are the user settings" + - "{{ pgs12cis_6_6_user_settings.stdout_lines }}" + when: + - pgs12cis_rule_6_6 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_6.6 + +- name: "SCORED | 6.7 | PATCH | Ensure FIPS 140-2 OpenSSL Cryptography Is Used" + block: + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Check for FIPS capability" + command: openssl version + changed_when: false + failed_when: false + register: pgs12cis_6_7_fips_capability + + - name: "SCORED | 6.7 | PATCH | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Setup FIPS Enabled RHEL7" + include_tasks: rhel7_fips.yml + when: + - ansible_distribution_major_version == "7" + - "'fips' in pgs12cis_6_7_fips_capability.stdout|lower" + + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Check FIPS status" + command: fips-mode-setup --check + changed_when: false + failed_when: false + register: pgs12cis_6_7_fips_check + when: ansible_distribution_major_version == "8" + + - name: "SCORED | 6.7 | PATCH | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Set FIPS enabled RHEL8" + command: fips-mode-setup --enable + notify: reboot system + when: + - "'FIPS' in pgs12cis_6_7_fips_capability.stdout" + - "'disabled' in pgs12cis_6_7_fips_check.stdout" + - ansible_distribution_major_version == "8" + + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Alert that system is not FIPS capable" + debug: + msg: + - "Alert! The system does not support FIPS 140-2" + - "This is a finding and your system will not conform to control 6.7 of the PostgreSQL CIS" + when: "'fips' not in pgs12cis_6_7_fips_capability.stdout|lower" + when: + - pgs12cis_rule_6_7 + - pgs12cis_section6 + tags: + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_6.7 + +- name: "SCORED | 6.8 | PATCH | Ensure SSL is enabled and configured correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + notify: restart postgresql + with_items: + - { regexp: '^ssl =|^#ssl =', line: 'ssl = on', insertafter: '# - SSL -' } + - { regexp: '^ssl_ciphers =| ssl_ciphers =', line: "ssl_ciphers = '{{ pgs12cis_ssl_ciphers }}'", insertafter: 'ssl = on' } + - { regexp: '^ssl_cert_file =|^#ssl_cert_file =', line: "ssl_cert_file = '{{ pgs12cis_ssl_cert_file }}'", insertafter: 'ssl = on' } + - { regexp: '^ssl_key_file =|ssl_key_file =', line: "ssl_key_file = '{{ pgs12cis_ssl_key_file }}'", insertafter: 'ssl = on' } + - { regexp: '^password_encryption =|^#password_encryption =', line: 'password_encryption = {{ pgs12cis_password_encryption }}', insertafter: 'ssl = on' } + when: + - pgs12cis_rule_6_8 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_6.8 + +- name: "NOTSCORED | 6.9 | PATCH | Ensure the pgcrypto extension is installed and configured correctly" + command: psql -c "CREATE EXTENSION pgcrypto;" + changed_when: "'CREATE' in pgs12cis_6_9_pgcrypto_status.stdout" + failed_when: false + register: pgs12cis_6_9_pgcrypto_status + become_user: "{{ pgs12cis_pgs12_usr }}" + when: + - pgs12cis_rule_6_9 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - rhel + - rule_6.9 + +# Section 7 +# Replication +- name: "NOTSCORED | 7.1 | | PATCH | Ensure a replication-only user is created and used for streaming replication" + block: + - name: "NOTSCORED | 7.1 | | AUDIT | Ensure a replication-only user is created and used for streaming replication | Create replication user" + shell: psql -t -c "select rolname from pg_roles where rolreplication is true;" | sed -r '/^\s*$/d' | awk '{$1=$1};1' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_7_1_replication_users + + - name: "NOTSCORED | 7.1 | | PATCH | Ensure a replication-only user is created and used for streaming replication | Create replication user" + command: psql -c "create user {{ pgs12cis_replication_user }} REPLICATION encrypted password '{{ pgs12cis_replication_user_pw }}';" + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + when: "pgs12cis_replication_user not in pgs12cis_7_1_replication_users.stdout" + + - name: "NOTSCORED | 7.1 | | PATCH | Ensure a replication-only user is created and used for streaming replication | Create replication user pg_hba.conf entry" + lineinfile: + path: "{{ pgs12cis_hba_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: '^this is just a place holder value', line: '# The rule below was inserted via CIS automation', insertafter: '^# TYPE DATABASE USER'} + - { regexp: '^hostssl replication {{ pgs12cis_replication_user }}', line: "hostssl replication {{ pgs12cis_replication_user }} 0.0.0.0/0 {{ pgs12cis_replication_user_enc_method }}", insertafter: '^# The rule below was inserted via CIS automation' } + notify: restart postgresql + when: "pgs12cis_replication_user not in pgs12cis_7_1_replication_users.stdout" + when: + - pgs12cis_rule_7_1 + - pgs12cis_section7 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - rhel + - rule_7.1 + +- name: "NOTSCORED | 7.2 | AUDIT | Ensure base backups are configured and functional" + debug: + msg: + - "Alert! To conform to CIS standards make sure to use base backups" + when: + - pgs12cis_rule_7_2 + - pgs12cis_section7 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_7.2 + +- name: "SCORED | 7.3 | PATCH | Ensure WAL archiving is configured and functional" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + notify: restart postgresql + with_items: + - { regexp: '^archive_mode =|^#archive_mode =', line: 'archive_mode = on' } + - { regexp: '^archive_command =|^#archive_command =', line: "archive_command = '{{ pgs12cis_archive_command }}'" } + when: + - pgs12cis_rule_7_3 + - pgs12cis_section7 + tags: + - level1-postgresqlonlinux + - scored + - patch + - rhel + - rule_7.3 + +- name: "NOTSCORED | 7.4 | AUDIT | Ensure streaming replication parameters are configured correctly" + debug: + msg: + - "Alert! Please ensure streaming replication parameters are configured" + when: + - pgs12cis_rule_7_4 + - pgs12cis_section7 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_7.4 + +# Section 8 +# Special Configuration Considerations +- name: "NOTSCORED | 8.1 | AUDIT | Ensure PostgreSQL configuration files are outside the data cluster" + block: + - name: "NOTSCORED | 8.1 | AUDIT | Ensure PostgreSQL configuration files are outside the data cluster | Get file list" + command: psql -c "select name, setting from pg_settings where name ~ '.*_file$';" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_8_1_files_used + + - name: "NOTSCORED | 8.1 | AUDIT | Ensure PostgreSQL configuration files are outside the data cluster | Display file listings" + debug: + msg: + - "Alert! Please review the settings and determine appropriate locations and file permissions for the configuration files based on organization's security policies" + - "{{ pgs12cis_8_1_files_used.stdout_lines }}" + when: + - pgs12cis_rule_8_1 + - pgs12cis_section8 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_8.1 + +- name: "NOTSCORED | 8.2 | AUDIT | Ensure PostgreSQL subdirectory locations are outside the data cluster" + block: + - name: "NOTSCORED | 8.2 | AUDIT | Ensure PostgreSQL subdirectory locations are outside the data cluster | Get directory settings" + command: psql -c "select name, setting from pg_settings where (name ~ '_directory$' or name ~ '_tablespace');" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_8_2_directory_settings + + - name: "NOTSCORED | 8.2 | AUDIT | Ensure PostgreSQL subdirectory locations are outside the data cluster | Display settings" + debug: + msg: + - "Alert! Please inspect the file and directory permissions for all returned values. Only superusers and authorized users" + - "should have access control rights for htese files and directories." + - "{{ pgs12cis_8_2_directory_settings.stdout_lines }}" + when: + - pgs12cis_rule_8_2 + - pgs12cis_section8 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_8.2 + +- name: "NOTSCORED | 8.3 | AUDIT | Ensure the backup and restore tool, 'pgBackRest', is installed and configured" + block: + - name: "NOTSCORED | 8.3 | AUDIT | Ensure the backup and restore tool, 'pgBackRest', is installed and configured | Check for pgBackRest" + command: which pgbackrest + changed_when: false + failed_when: false + register: pgs12cis_pgbackrest_status + + - name: "NOTSCORED | 8.3 | AUDIT | Ensure the backup and restore tool, 'pgBackRest', is installed and configured | Missing pgBackRest" + debug: + msg: + - "Alert! You do not have pgBackRest installed. Please install and configure pgBackRest" + when: "'no pgbackrest' in pgs12cis_pgbackrest_status.stdout" + + - name: "NOTSCORED | 8.3 | AUDIT | Ensure the backup and restore tool, 'pgBackRest', is installed and configured | pgBackRest installed" + debug: + msg: + - "Good News! You have pgBackRest installed. Make sure it is configured appropriatly for things like stanza name, backup location, retention policy, logging, etc" + when: "'no pgbackrest' not in pgs12cis_pgbackrest_status.stdout" + when: + - pgs12cis_rule_8_3 + - pgs12cis_section8 + tags: + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_8.3 + +- name: "NOTSCORED | 8.4 | AUDIT | Ensure miscellaneous configuration settings are correct" + block: + - name: "NOTSCORED | 8.4 | AUDIT | Ensure miscellaneous configuration settings are correct | Get settings" + command: psql -c "select name, setting from pg_settings where name in ('external_pid_file', 'unix_socket_directories','shared_preload_libraries','dynamic_library_path',' local_preload_libraries','session_preload_libraries');" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_8_4_settings + + - name: "NOTSCORED | 8.4 | AUDIT | Ensure miscellaneous configuration settings are correct | Display settings" + debug: + msg: + - "Alert! Please review the settings to confirm permissions are based on organization's policies" + - "{{ pgs12cis_8_4_settings.stdout_lines }}" + when: + - pgs12cis_rule_8_4 + - pgs12cis_section8 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - rhel + - rule_8.4 diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/cis_pgs12_ubuntu_fixes.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/cis_pgs12_ubuntu_fixes.yml new file mode 100644 index 0000000..286ad91 --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/cis_pgs12_ubuntu_fixes.yml @@ -0,0 +1,1213 @@ +--- +# Section 1 +# Installation and Patches +- name: "NOTSCORED | 1.1 | AUDIT | Ensure packages are obtained from authorized repositories" + block: + - name: "NOTSCORED | 1.1 | AUDIT | Ensure packages are obtained from authorized repositories | Get repo list Ubuntu" + command: apt-cache policy + changed_when: false + failed_when: false + register: pgs12cis_1_1_enabled_repos_ub + + - name: "NOTSCORED | 1.1 | AUDIT | Ensure packages are obtained from authorized repositories | Show repo list Ubuntu" + debug: + msg: + - "Alert! Below are teh enabled repo's. Please review to confirm these are authorized repositories" + - "{{ pgs12cis_1_1_enabled_repos_ub.stdout_lines }}" + when: + - pgs12cis_rule_1_1 + - pgs12cis_section1 + tags: + - level1-postgresql + - notscored + - audit + - ubuntu + - rule_1.1 + +- name: "NOTSCORED | 1.2 | AUDIT | Ensure Installation of Binary Packages" + block: + - name: "NOTSCORED | 1.2 | AUDIT | Ensure Installation of Binary Packages | Get installed packages and repo Ubuntu" + shell: apt list --installed | grep postgres + changed_when: false + failed_when: false + register: pgs12cis_1_2_installed_pckg_ub + + - name: "NOTSCORED | 1.2 | AUDIT | Ensure Installation of Binary Packages | Show installed packages" + debug: + msg: + - "Alert! Below are the installed postgres packages" + - "Please review and if the expected packages did not come from an appropriate repo, this is a fail" + - "{{ pgs12cis_1_2_installed_pckg_ub.stdout_lines }}" + when: + - pgs12cis_rule_1_2 + - pgs12cis_section1 + tags: + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_1.2 + +- name: "NOTSCORED | 1.3 | AUDIT | Ensure Installation of Community Packages" + block: + - name: "NOTSCORED | 1.3 | AUDIT | Ensure Installation of Community Packages | Get installed packages and repo Ubuntu" + shell: apt list --installed | grep postgres + changed_when: false + failed_when: false + register: pgs12cis_1_3_installed_pckg_ub + + - name: "NOTSCORED | 1.3 | AUDIT | Ensure Installation of Community Packages | Show installed packages" + debug: + msg: + - "Alert! Below are teh installed postgres packages" + - "Please review and if the expected community packages are not installed or did not come from the PGDG repo, this is a fail" + - "{{ pgs12cis_1_3_installed_pckg_ub.stdout_lines }}" + when: + - pgs12cis_rule_1_3 + - pgs12cis_section1 + tags: + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_1.3 + +- name: "SCORED | 1.4 | PATCH | Ensure systemd Service Files Are Enabled" + service: + name: "{{ pgs12cis_postgresql_service }}" + enabled: true + when: + - pgs12cis_rule_1_4 + - pgs12cis_section1 + tags: + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_1.4 + +- name: "SCORED | 1.5 | PATCH | Ensure Data Cluster Initialized Successfully" + command: "/usr/lib/postgresql/12/bin/initdb -D {{ pgs12cis_db_data_dir_ub }}" + changed_when: '"Success" in pgs12cis_1_5_db_initialize.stdout' + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_1_5_db_initialize + when: + - pgs12cis_rule_1_5 + - pgs12cis_section1 + tags: + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_1.5 + +# Section 2 +# Directory and File Permissions +- name: "SCORED | 2.1 | PATCH | Ensure the file permissions mask is correct" + block: + - name: "SCORED | 2.1 | AUDIT | Ensure the file permissions mask is correct | Get user environment file (.bashrc, .bash_profile, or .profile}" + shell: ls -a "{{ pgs12cis_pgs12_usr_home_dir }}" | grep -e '.bashrc\|.bash_profile\|.profile' + changed_when: false + failed_when: false + register: pgs12cis_2_1_env_file + + - name: "SCORED | 2.1 | PATCH | Ensure the file permissions mask is correct | Set umask" + lineinfile: + path: "{{ pgs12cis_pgs12_usr_home_dir }}/{{ pgs12cis_2_1_env_file.stdout }}" + regexp: '^umask|^UMASK' + line: "umask 0{{ pgs12cis_umask_value }}" + when: pgs12cis_2_1_env_file.stdout != "" + + - name: "SCORED | 2.1 | PATCH | Ensure the file permissions mask is correct | Set umask if file doesn't exist" + lineinfile: + path: "{{ pgs12cis_pgs12_usr_home_dir }}/.bashrc" + line: "umask 0{{ pgs12cis_umask_value }}" + create: yes + when: pgs12cis_2_1_env_file.stdout == "" + when: + - pgs12cis_rule_2_1 + - pgs12cis_section2 + tags: + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_2.1 + +# this control makes it so you can't get run commands in postgres user +- name: "SCORED | 2.2 | PATCH | Ensure the PostgreSQL pg_wheel group membership is correct" + block: + - name: "SCORED | 2.2 | PATCH | Ensure the PostgreSQL pg_wheel group membership is correct | Create pg_wheel group" + group: + name: pg_wheel + state: present + + - name: "SCORED | 2.2 | PATCH | Ensure the PostgreSQL pg_wheel group membership is correct" + user: + name: "{{ pgs12cis_pgs12_usr }}" + groups: pg_wheel + append: yes + when: + - pgs12cis_rule_2_2 + - pgs12cis_section2 + tags: + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_2.2 + +# Section 3 +# # Logging Monitoring and Auditing +- name: "SCORED | 3.1.2 | PATCH | Ensure the log destinations are set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_destination =|^#log_destination =' + line: "log_destination = '{{ pgs12cis_log_destination }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_2 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.2 + +- name: "SCORED | 3.1.3 | PATCH | Ensure the logging collector is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^logging_collector =|^#logging_collector =' + line: "logging_collector = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_3 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.3 + +- name: "SCORED | 3.1.4 | PATCH | Ensure the log file destination directory is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_directory =|^#log_directory =' + line: "log_directory = '{{ pgs12cis_log_directory }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_4 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.4 + +- name: "SCORED | 3.1.5 | PATCH | Ensure the filename pattern for log files is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_filename =|^#log_filename =' + line: "log_filename = '{{ pgs12cis_log_filename }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_5 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.4 + +- name: "SCORED | 3.1.6 | PATCH | Ensure the log file permissions are set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_truncate_on_rotation =|^#log_truncate_on_rotation =' + line: "log_truncate_on_rotation = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_6 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.6 + +- name: "SCORED | 3.1.7 | PATCH | Ensure 'log_truncate_on_rotation' is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_file_mode =|^#log_file_mode =' + line: "log_file_mode = 0{{ pgs12cis_log_file_mode }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_7 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.7 + +- name: "SCORED | 3.1.8 | PATCH | Ensure the maximum log file lifetime is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_rotation_age =|^#log_rotation_age =' + line: "log_rotation_age = {{ pgs12cis_log_file_age }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_8 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.8 + +- name: "SCORED | 3.1.9 | PATCH | Ensure the maximum log file size is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_rotation_size =|^#log_rotation_size =' + line: "log_rotation_size = {{ pgs12cis_log_rotation_size }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_9 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.9 + +- name: "SCORED | 3.1.10 | PATCH | Ensure the correct syslog facility is selected" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^syslog_facility =|^#syslog_facility =' + line: "syslog_facility = '{{ pgs12cis_syslog_facility }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_10 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.10 + +- name: "SCORED | 3.1.11 | PATCH | Ensure the program name for PostgreSQL syslog messages is correct" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^syslog_ident =|^#syslog_ident =' + line: "syslog_ident = '{{ pgs12cis_syslog_ident }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_11 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.11 + +- name: "NOTSCORED | 3.1.12 | PATCH | Ensure the correct messages are written to the server log" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_min_messages =|^#log_min_messages =' + line: "log_min_messages = {{ pgs12cis_log_min_messages }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_12 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - ubuntu + - rule_3.1.12 + +- name: "NOTSCORED | 3.1.13 | PATCH | Ensure the correct SQL statements generating errors are recorded" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_min_error_statement =|^#log_min_error_statement =' + line: "log_min_error_statement = {{ pgs12cis_log_min_error_statement }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_13 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - ubuntu + - rule_3.1.13 + +- name: "SCORED | 3.1.14 | PATCH | Ensure 'debug_print_parse' is disabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^debug_print_parse =|^#debug_print_parse =' + line: "debug_print_parse = off" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_14 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.14 + +- name: "SCORED | 3.1.15 | PATCH | Ensure 'debug_print_rewritten' is disabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^debug_print_rewritten =|^#debug_print_rewritten =' + line: "debug_print_rewritten = off" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_15 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.15 + +- name: "SCORED | 3.1.16 | PATCH | Ensure 'debug_print_plan' is disabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^debug_print_plan =|^#debug_print_plan =' + line: "debug_print_plan = off" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_16 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.16 + +- name: "SCORED | 3.1.17 | PATCH | Ensure 'debug_pretty_print' is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^debug_pretty_print =|^#debug_pretty_print =' + line: "debug_pretty_print = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_17 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.17 + +- name: "SCORED | 3.1.18 | PATCH | Ensure 'log_connections' is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_connections =|^#log_connections =' + line: "log_connections = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_18 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.18 + +- name: "SCORED | 3.1.19 | PATCH | Ensure 'log_disconnections' is enabled" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_disconnections =|^#log_disconnections =' + line: "log_disconnections = on" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_19 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.19 + +- name: "SCORED | 3.1.20 | PATCH | Ensure 'log_error_verbosity' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_error_verbosity =|^#log_error_verbosity =' + line: "log_error_verbosity = {{ pgs12cis_log_error_verbosity }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_20 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.20 + +- name: "SCORED | 3.1.21 | PATCH | Ensure 'log_hostname' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_hostname =|^#log_hostname =' + line: "log_hostname = {{ pgs12cis_log_hostname }}" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_21 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.21 + +- name: "NOTSCORED | 3.1.22 | PATCH | Ensure 'log_line_prefix' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_line_prefix =|^#log_line_prefix =' + line: "log_line_prefix = '{{ pgs12cis_log_line_prefix }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_22 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - ubuntu + - rule_3.1.22 + +- name: "SCORED | 3.1.23 | PATCH | Ensure 'log_statement' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_statement =|^#log_statement =' + line: "log_statement = '{{ pgs12cis_log_statement }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_23 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.23 + +- name: "SCORED | 3.1.24 | PATCH | Ensure 'log_timezone' is set correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^log_timezone =|^#log_timezone =' + line: "log_timezone = '{{ pgs12cis_log_timezone }}'" + notify: restart postgresql + when: + - pgs12cis_rule_3_1_24 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.1.24 + +- name: "SCORED | 3.2 | PATCH | Ensure the PostgreSQL Audit Extension (pgAudit) is enabled" + block: + - name: "SCORED | 3.2 | PATCH | Ensure the PostgreSQL Audit Extension (pgAudit) is enabled | Install pgAudit Ubuntu" + apt: + name: postgresql-12-pgaudit + state: present + + - name: "SCORED | 3.2 | PATCH | Ensure the PostgreSQL Audit Extension (pgAudit) is enabled | Set pgAudit settings in postgresql.conf" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + notify: restart postgresql + with_items: + - { regexp: '^shared_preload_libraries =|^#shared_preload_libraries =', line: "shared_preload_libraries = '{{ pgs12cis_shared_preload_libraries }}'", insertafter: EOF } + - { regexp: '^# for this example we are logging the ddl and write operations', line: '# for this example we are logging the ddl and write operations', insertafter: EOF } + - { regexp: '^pgaudit.log=', line: "pgaudit.log='{{ pgs12cis_pgaudit_log_types }}'", insertafter: '# for this example we are logging the ddl and write operations'} + when: + - pgs12cis_rule_3_2 + - pgs12cis_section3 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_3.2 + +# Section 4 +# User Access and Authorization +- name: "SCORED | 4.1 | PATCH | Ensure sudo is configured correctly" + block: + - name: "SCORED | 4.1 | PATCH | Ensure sudo is configured correctly | Configure pg_wheel to sudoers" + lineinfile: + path: /etc/sudoers.d/postgres + regexp: '^%pg_wheel ALL=' + line: '%pg_wheel ALL= /bin/su - {{ pgs12cis_pgs12_usr }}' + create: yes + + - name: "SCORED | 4.1 | PATCH | Ensure sudo is configured correctly | Set file permissions" + file: + path: /etc/sudoers.d/postgres + mode: 0600 + when: + - pgs12cis_rule_4_1 + - pgs12cis_section4 + tags: + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_4.1 + +- name: "SCORED | 4.2 | PATCH | Ensure excessive administrative privileges are revoked" + block: + - name: "SCORED | 4.2 | AUDIT | Ensure excessive administrative privileges are revoked | Get list of PostgreSQL users" + shell: psql -t -c "\du" | awk '{print $1}' | sed -r '/^\s*$/d' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_2_all_postgresql_users + + - name: "SCORED | 4.2 | PATCH | Ensure excessive administrative privileges are revoked | Get list of disallowed superusers" + set_fact: + pgs12cis_disallowed_superusers: "{{ pgs12cis_4_2_all_postgresql_users.stdout_lines | difference(pgs12cis_allowed_superusers) }}" + + - name: "SCORED | 4.2 | PATCH | Ensure excessive administrative privileges are revoked | Remove excessive privileges from users" + command: psql -c 'alter user {{ item }} with NOSUPERUSER NOCREATEROLE NOCREATEDB NOREPLICATION NOBYPASSRLS ;' + become_user: "{{ pgs12cis_pgs12_usr }}" + with_items: + - "{{ pgs12cis_disallowed_superusers }}" + when: + - pgs12cis_rule_4_2 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - patch + - ubuntu + - rule_4.2 + +- name: "SCORED | 4.4 | AUDIT | Ensure excessive DML privileges are revoked" + debug: + msg: + - "Alert! Please review all users and databases to determine no users have excessive DML privileges" + - "If a user has excessive DML privileges that user can change or delete ifnormation without proper authorization and this is a finding" + when: + - pgs12cis_rule_4_4 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - ubuntu + - rule_4.4 + +- name: "NOTSCORED | 4.5 | AUDIT | Use pg_permission extension to audit object permissions" + block: + - name: "NOTSCORED | 4.5 | AUDIT | Use pg_permission extension to audit object permissions | Get pg_permission status" + shell: psql -c "select t.schemaname, t.tablename, u.usename, has_table_privilege(u.usename, t.tablename, 'select') as select, has_table_privilege(u.usename, t.tablename, 'insert') as insert, has_table_privilege(u.usename, t.tablename, 'update') as update, has_table_privilege(u.usename, t.tablename, 'delete') as delete from pg_tables t, pg_user u where t.schemaname not in ('information_schema','pg_catalog');" | grep rows | tr -d '()' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_5_pg_permission_status + + - name: "NOTSCORED | 4.5 | AUDIT | Use pg_permission extension to audit object permissions | Alert on not available" + debug: + msg: + - "Alert! You do not have the pg_permission module installed and this this is a finding" + - "Please review and install the pg_permission module then configure as needed" + when: "'0 rows' in pgs12cis_4_5_pg_permission_status.stdout" + + - name: "NOTSCORED | 4.5 | AUDIT | Use pg_permission extension to audit object permissions | Alert on available" + debug: + msg: "Good News! You have the module available for use. Please make sure it configured correctly" + when: "'0 rows' not in pgs12cis_4_5_pg_permission_status.stdout" + when: + - pgs12cis_rule_4_5 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - ubuntu + - rule_4.5 + +- name: "NOTSCORED | 4.6 | AUDIT | Ensure Row Level Security (RLS) is configured correctly" + block: + - name: "NOTSCORED | 4.6 | AUDIT | Ensure Row Level Security (RLS) is configured correctly | Get databases using RLS" + command: psql -c 'SELECT oid, relname, relrowsecurity FROM pg_class WHERE relrowsecurity IS TRUE;' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_6_rls_databases + + - name: "NOTSCORED | 4.6 | AUDIT | Ensure Row Level Security (RLS) is configured correctly | Message out" + debug: + msg: + - "Alert! You need to confirm RLS is configured correctly" + - "Below are the tables that are using RLS" + - "{{ pgs12cis_4_6_rls_databases.stdout_lines }}" + when: + - pgs12cis_rule_4_6 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - ubuntu + - rule_4.6 + +- name: "NOTSCORED | 4.7 | AUDIT | Ensure the set_user extension is installed" + block: + - name: "NOTSCORED | 4.7 | AUDIT | Ensure the set_user extension is installed | Get set_user status" + shell: psql -c "select * from pg_available_extensions where name = 'set_user';" | grep rows | tr -d '()' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_7_set_user_status + + - name: "NOTSCORED | 4.7 | AUDIT | Ensure the set_user extension is installed | Alert on not available" + debug: + msg: + - "Alert! You do not have the set_user extention installed and this is a finding" + - "Please review and install the set_user extention then configure as needed" + when: "'0 rows' in pgs12cis_4_7_set_user_status.stdout" + + - name: "NOTSCORED | 4.7 | AUDIT | Ensure the set_user extension is installed | Alert on available" + debug: + msg: "Good News! You have the module available for use. Please make sure it is configured correctly" + when: "'0 rows' not in pgs12cis_4_7_set_user_status.stdout" + when: + - pgs12cis_rule_4_7 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - ubuntu + - rule_4.7 + +- name: "NOTSCORED | 4.8 | AUDIT | Make use of default roles" + block: + - name: "NOTSCORED | 4.8 | AUDIT | Make use of default roles | Get superusers" + command: psql -c 'select rolname from pg_roles where rolsuper is true;' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_4_8_superusers + + - name: "NOTSCORED | 4.8 | AUDIT | Make use of default roles | Message out" + debug: + msg: + - "Alert! Below are the superusers, please review and add any needed users to the superusers or remove any users that should not have superuser status" + - "{{ pgs12cis_4_8_superusers.stdout_lines }}" + when: + - pgs12cis_rule_4_8 + - pgs12cis_section4 + tags: + - level1-postgresql + - scored + - audit + - ubuntu + - rule_4.8 + +# Section 5 +# Connection and Login +- name: 'NOTSCORED | 5.1 | PATCH | Ensure login via "local" UNIX Domain Socket is configured correctly' + debug: + msg: + - "Alert! Please ensure logins via local Unix Domain Socket is configured correctly" + when: + - pgs12cis_rule_5_1 + - pgs12cis_section5 + tags: + - level1-postgresqlonlinux + - scored + - audit + - ubuntu + - rule_5.1 + +- name: 'SCORED | 5.2 | PATCH | Ensure login via "host" TCP/IP Socket is configured correctly' + block: + - name: 'SCORED | 5.2 | PATCH | Ensure login via "host" TCP/IP Socket is configured correctly | Set listen_addresses' + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: '^listen_addresses =|^#listen_addresses =' + line: "listen_addresses = '*'" + notify: restart postgresql + + - name: 'SCORED | 5.2 | PATCH | Ensure login via "host" TCP/IP Socket is configured correctly | Set allow encrypted and deny unencrypted' + lineinfile: + path: "{{ pgs12cis_hba_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + notify: restart postgresql + with_items: + - { regexp: '^this is just a place holder value', line: '# The two rules below were inserted via CIS automation', insertafter: '^#.*TYPE.*DATABASE.*USER.*ADDRESS.*METHOD' } + - { regexp: '^hostnossl.*all.*{{ pgs12cis_pgs12_usr }}', line: 'hostnossl all {{ pgs12cis_pgs12_usr }} 0.0.0.0/0 reject', insertafter: '# The two rules below were inserted via CIS automation' } + - { regexp: '^hostssl.*all.*{{ pgs12cis_pgs12_usr }}', line: "hostssl all {{ pgs12cis_pgs12_usr }} 0.0.0.0/0 {{ pgs12cis_encrypt_method }}", insertafter: '# The two rules below were inserted via CIS automation' } + when: + - pgs12cis_rule_5_2 + - pgs12cis_section5 + tags: + - level1-postgresqlonlinux + - scored + - audit + - ubuntu + - rule_5.2 + +# Section 6 +# PostgreSQL Settings +- name: "NOTSCORED | 6.1 | AUDIT | Ensure 'Attack Vectors' Runtime Parameters are Configured" + debug: + msg: "ALERT! Please review all configuration settings. Configure PostgreSQL loggint to record all modifications and chagnes to the RDBMS." + when: + - pgs12cis_rule_6_1 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_6.1 + +- name: "SCORED | 6.2 | AUDIT | Ensure 'backend' runtime parameters are configured correctly" + block: + - name: "SCORED | 6.2 | AUDIT | Ensure 'backend' runtime parameters are configured correctly | Get backend runtime parameters" + command: psql -c "SELECT name, setting FROM pg_settings WHERE context IN ('backend','superuser-backend') ORDER BY 1;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_6_2_backend_runtime_param + + - name: "SCORED | 6.2 | AUDIT | Ensure 'backend' runtime parameters are configured correctly | Get process output" + shell: ps aux | grep -E '[p]ost' | grep -- '-[D]' + changed_when: false + failed_when: false + register: pgs12cis_6_2_process_output + + - name: "SCORED | 6.2 | AUDIT | Ensure 'backend' runtime parameters are configured correctly | Message out parameters" + debug: + msg: + - "ALert! Please review this output and compare with a previous OK output looking for any changes" + - "Also review the postgresql.conf and postgresql.auto.conf and compare them with previously archived files looking for changes" + - "{{ pgs12cis_6_2_backend_runtime_param.stdout_lines }}" + - "" + - "Please review the process output below and look for parameters that were used at server startup" + - "{{ pgs12cis_6_2_process_output.stdout_lines }}" + when: + - pgs12cis_rule_6_2 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - audit + - ubuntu + - rule_6.2 + +- name: "NOTSCORED | 6.4 | AUDIT | Ensure 'SIGHUP' Runtime Parameters are Configured" + block: + - name: "NOTSCORED | 6.4 | AUDIT | Ensure 'SIGHUP' Runtime Parameters are Configured | Get sighup settings" + command: psql -c "SELECT name, setting FROM pg_settings WHERE context = 'sighup' ORDER BY 1;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_6_4_sighup_settings + + - name: "NOTSCORED | 6.4 | AUDIT | Ensure 'SIGHUP' Runtime Parameters are Configured | Message out settings" + debug: + msg: + - "Alert! Below are the current sighup settings. Please revert all values in the PostgreSQL configuration files" + - "and invoke the server to reload the configuration files." + - "{{ pgs12cis_6_4_sighup_settings.stdout_lines }}" + when: + - pgs12cis_rule_6_4 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_6.4 + +- name: "NOTSCORED | 6.5 | AUDIT | Ensure 'Superuser' Runtime Parameters are Configured" + block: + - name: "NOTSCORED | 6.5 | AUDIT | Ensure 'Superuser' Runtime Parameters are Configured | Get superuser settings" + command: psql -c "SELECT name, setting FROM pg_settings WHERE context = 'superuser' ORDER BY 1;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_6_5_superuser_settings + + - name: "NOTSCORED | 6.5 | AUDIT | Ensure 'Superuser' Runtime Parameters are Configured | Get process output" + shell: ps aux | grep -E 'postgres' | grep -- '-[D]' + changed_when: false + failed_when: false + register: pgs12cis_6_5_process_output + + - name: "NOTSCORED | 6.5 | AUDIT | Ensure 'Superuser' Runtime Parameters are Configured | Message out settings" + debug: + msg: + - "Alert! Please review this output and compare with a previous OK output looking for any changes" + - "Also review the postgresql.conf and postgresql.auto.conf and compare them with previously archived files looking for changes" + - "{{ pgs12cis_6_5_superuser_settings.stdout_lines }}" + - "" + - "Please review the process output below and look for parameters that were used at server startup" + - "{{ pgs12cis_6_5_process_output.stdout_lines }}" + when: + - pgs12cis_rule_6_5 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_6.5 + +- name: "NOTSCORED | 6.6 | AUDIT | Ensure 'User' Runtime Parameters are Configured" + block: + - name: "NOTSCORED | 6.6 | AUDIT | Ensure 'User' Runtime Parameters are Configured | Get user settings" + command: psql -c "SELECT name, setting FROM pg_settings WHERE context = 'user' ORDER BY 1;" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_6_6_user_settings + + - name: "NOTSCORED | 6.6 | AUDIT | Ensure 'User' Runtime Parameters are Configured | Message out settings" + debug: + msg: + - "Alert! In the matter of a user session, the login sessions must be validated that it is not executing undesired parameter changes." + - "In the matter of attributes that have been changed in entities, they must be manually reverted to its default value(s)." + - "Below are the user settings" + - "{{ pgs12cis_6_6_user_settings.stdout_lines }}" + when: + - pgs12cis_rule_6_6 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_6.6 + +- name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used" + block: + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Check for Fips being enabled" + command: cat /proc/sys/crypto/fips_enabled + changed_when: false + failed_when: false + register: pgs12cis_ubuntu_fips_enabled + + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Alert that FIPS is not enabled" + debug: + msg: + - "Alert! The system does not have FIPS 140-2 enabled" + - "This is a finding and your system will not conform to control 6.7 of the PostgreSQL CIS" + - "Enabling FIPS 140-2 and installing related packages requires an Ubuntu Advantage accout" + - "Please follow the guide in this link to enable FIPS 140-2" + - "https://security-certs.docs.ubuntu.com/en/fips" + when: + - pgs12cis_ubuntu_fips_enabled.stdout == "" or + "'0' in pgs12cis_ubuntu_fips_enabled.stdout" + + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Alert that FIPS is enabled" + debug: + msg: "Good News! FIPS 140-2 is enabled" + when: "'1' in pgs12cis_ubuntu_fips_enabled.stdout" + when: + - pgs12cis_rule_6_7 + - pgs12cis_section6 + tags: + - level1-postgresqlonlinux + - scored + - audit + - ubuntu + - rule_6.7 + +- name: "SCORED | 6.8 | PATCH | Ensure SSL is enabled and configured correctly" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + notify: restart postgresql + with_items: + - { regexp: '^ssl =|^#ssl =', line: 'ssl = on', insertafter: '# - SSL -' } + - { regexp: '^ssl_ciphers =| ssl_ciphers =', line: "ssl_ciphers = '{{ pgs12cis_ssl_ciphers }}'", insertafter: 'ssl = on' } + - { regexp: '^ssl_cert_file =|^#ssl_cert_file =', line: "ssl_cert_file = '{{ pgs12cis_ssl_cert_file }}'", insertafter: 'ssl = on' } + - { regexp: '^ssl_key_file =|ssl_key_file =', line: "ssl_key_file = '{{ pgs12cis_ssl_key_file }}'", insertafter: 'ssl = on' } + - { regexp: '^password_encryption =|^#password_encryption =', line: 'password_encryption = {{ pgs12cis_password_encryption }}', insertafter: 'ssl = on' } + when: + - pgs12cis_rule_6_8 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_6.8 + +- name: "NOTSCORED | 6.9 | PATCH | Ensure the pgcrypto extension is installed and configured correctly" + command: psql -c "CREATE EXTENSION pgcrypto;" + changed_when: "'CREATE' in pgs12cis_6_9_pgcrypto_status.stdout" + failed_when: false + register: pgs12cis_6_9_pgcrypto_status + become_user: "{{ pgs12cis_pgs12_usr }}" + when: + - pgs12cis_rule_6_9 + - pgs12cis_section6 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - ubuntu + - rule_6.9 + +# Section 7 +# Replication +- name: "NOTSCORED | 7.1 | | PATCH | Ensure a replication-only user is created and used for streaming replication" + block: + - name: "NOTSCORED | 7.1 | | AUDIT | Ensure a replication-only user is created and used for streaming replication | Create replication user" + shell: psql -t -c "select rolname from pg_roles where rolreplication is true;" | sed -r '/^\s*$/d' | awk '{$1=$1};1' + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_7_1_replication_users + + - name: "NOTSCORED | 7.1 | | PATCH | Ensure a replication-only user is created and used for streaming replication | Create replication user" + command: psql -c "create user {{ pgs12cis_replication_user }} REPLICATION encrypted password '{{ pgs12cis_replication_user_pw }}';" + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + when: "pgs12cis_replication_user not in pgs12cis_7_1_replication_users.stdout" + + - name: "NOTSCORED | 7.1 | | PATCH | Ensure a replication-only user is created and used for streaming replication | Create replication user pg_hba.conf entry" + lineinfile: + path: "{{ pgs12cis_hba_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: '^this is just a place holder value', line: '# The rule below was inserted via CIS automation', insertafter: '^# TYPE DATABASE USER'} + - { regexp: '^hostssl replication {{ pgs12cis_replication_user }}', line: "hostssl replication {{ pgs12cis_replication_user }} 0.0.0.0/0 {{ pgs12cis_replication_user_enc_method }}", insertafter: '^# The rule below was inserted via CIS automation' } + notify: restart postgresql + when: "pgs12cis_replication_user not in pgs12cis_7_1_replication_users.stdout" + when: + - pgs12cis_rule_7_1 + - pgs12cis_section7 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - patch + - ubuntu + - rule_7.1 + +- name: "NOTSCORED | 7.2 | AUDIT | Ensure base backups are configured and functional" + debug: + msg: + - "Alert! To conform to CIS standards make sure to use base backups" + when: + - pgs12cis_rule_7_2 + - pgs12cis_section7 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_7.2 + +- name: "SCORED | 7.3 | PATCH | Ensure WAL archiving is configured and functional" + lineinfile: + path: "{{ pgs12cis_config_file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + notify: restart postgresql + with_items: + - { regexp: '^archive_mode =|^#archive_mode =', line: 'archive_mode = on' } + - { regexp: '^archive_command =|^#archive_command =', line: "archive_command = '{{ pgs12cis_archive_command }}'" } + when: + - pgs12cis_rule_7_3 + - pgs12cis_section7 + tags: + - level1-postgresqlonlinux + - scored + - patch + - ubuntu + - rule_7.3 + +- name: "NOTSCORED | 7.4 | AUDIT | Ensure streaming replication parameters are configured correctly" + debug: + msg: + - "Alert! Please ensure streaming replication parameters are configured" + when: + - pgs12cis_rule_7_4 + - pgs12cis_section7 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_7.4 + +# Section 8 +# Special Configuration Considerations +- name: "NOTSCORED | 8.1 | AUDIT | Ensure PostgreSQL configuration files are outside the data cluster" + block: + - name: "NOTSCORED | 8.1 | AUDIT | Ensure PostgreSQL configuration files are outside the data cluster | Get file list" + command: psql -c "select name, setting from pg_settings where name ~ '.*_file$';" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_8_1_files_used + + - name: "NOTSCORED | 8.1 | AUDIT | Ensure PostgreSQL configuration files are outside the data cluster | Display file listings" + debug: + msg: + - "Alert! Please review the settings and determine appropriate locations and file permissions for the configuration files based on organization's security policies" + - "{{ pgs12cis_8_1_files_used.stdout_lines }}" + when: + - pgs12cis_rule_8_1 + - pgs12cis_section8 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_8.1 + +- name: "NOTSCORED | 8.2 | AUDIT | Ensure PostgreSQL subdirectory locations are outside the data cluster" + block: + - name: "NOTSCORED | 8.2 | AUDIT | Ensure PostgreSQL subdirectory locations are outside the data cluster | Get directory settings" + command: psql -c "select name, setting from pg_settings where (name ~ '_directory$' or name ~ '_tablespace');" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_8_2_directory_settings + + - name: "NOTSCORED | 8.2 | AUDIT | Ensure PostgreSQL subdirectory locations are outside the data cluster | Display settings" + debug: + msg: + - "Alert! Please inspect the file and directory permissions for all returned values. Only superusers and authorized users" + - "should have access control rights for htese files and directories." + - "{{ pgs12cis_8_2_directory_settings.stdout_lines }}" + when: + - pgs12cis_rule_8_2 + - pgs12cis_section8 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_8.2 + +- name: "NOTSCORED | 8.3 | AUDIT | Ensure the backup and restore tool, 'pgBackRest', is installed and configured" + block: + - name: "NOTSCORED | 8.3 | AUDIT | Ensure the backup and restore tool, 'pgBackRest', is installed and configured | Check for pgBackRest" + command: which pgbackrest + changed_when: false + failed_when: false + register: pgs12cis_pgbackrest_status + + - name: "NOTSCORED | 8.3 | AUDIT | Ensure the backup and restore tool, 'pgBackRest', is installed and configured | Missing pgBackRest" + debug: + msg: + - "Alert! You do not have pgBackRest installed. Please install and configure pgBackRest" + when: "'no pgbackrest' in pgs12cis_pgbackrest_status.stdout" + + - name: "NOTSCORED | 8.3 | AUDIT | Ensure the backup and restore tool, 'pgBackRest', is installed and configured | pgBackRest installed" + debug: + msg: + - "Good News! You have pgBackRest installed. Make sure it is configured appropriatly for things like stanza name, backup location, retention policy, logging, etc" + when: "'no pgbackrest' not in pgs12cis_pgbackrest_status.stdout" + when: + - pgs12cis_rule_8_3 + - pgs12cis_section8 + tags: + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_8.3 + +- name: "NOTSCORED | 8.4 | AUDIT | Ensure miscellaneous configuration settings are correct" + block: + - name: "NOTSCORED | 8.4 | AUDIT | Ensure miscellaneous configuration settings are correct | Get settings" + command: psql -c "select name, setting from pg_settings where name in ('external_pid_file', 'unix_socket_directories','shared_preload_libraries','dynamic_library_path',' local_preload_libraries','session_preload_libraries');" + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_8_4_settings + + - name: "NOTSCORED | 8.4 | AUDIT | Ensure miscellaneous configuration settings are correct | Display settings" + debug: + msg: + - "Alert! Please review the settings to confirm permissions are based on organization's policies" + - "{{ pgs12cis_8_4_settings.stdout_lines }}" + when: + - pgs12cis_rule_8_4 + - pgs12cis_section8 + tags: + - level1-postgresql + - level1-postgresqlonlinux + - notscored + - audit + - ubuntu + - rule_8.4 diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/main.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/main.yml new file mode 100644 index 0000000..b6d1fbd --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/main.yml @@ -0,0 +1,23 @@ +--- +- name: import preliminary tasks + import_tasks: prelim.yml + +- name: RedHat Fixes + import_tasks: cis_pgs12_redhat_fixes.yml + when: + - ansible_distribution_file_variety == 'RedHat' + - ansible_distribution_major_version == "7" or + ansible_distribution_major_version == "8" + tags: + - RedHat + - CentOS + +- name: Ubuntu Fixes + import_tasks: cis_pgs12_ubuntu_fixes.yml + when: + - ansible_distribution == 'Ubuntu' + - ansible_distribution_major_version == "16" or + ansible_distribution_major_version == "18" or + ansible_distribution_major_version == "20" + tags: + - Ubuntu diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/postgresql12_install.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/postgresql12_install.yml new file mode 100644 index 0000000..4d2aaa4 --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/postgresql12_install.yml @@ -0,0 +1,78 @@ +--- +- name: "PRELIM | Install and Start PostgreSQL | RHEL/CentOS" + block: + - name: "PRELIM | Install and Start PostgreSQL | Add PostgreSQL Repo RHEL/CentOS 8" + dnf: + name: https://download.postgresql.org/pub/repos/yum/reporpms/EL-8-x86_64/pgdg-redhat-repo-latest.noarch.rpm + state: present + when: ansible_distribution_major_version == "8" + + - name: "PRELIM | Install and Start PostgreSQL | Add PostgreSQL Repo RHEL/CentOS 7" + yum: + name: https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm + state: present + when: ansible_distribution_major_version == "7" + + - name: "PRELIM | Install and Start PostgreSQL | Disable built in PostgreSQL RHEL/CentOS 8" + shell: dnf -y module disable postgresql + register: prelim_disable_pgs_old_rh8 + changed_when: "'Disabling modules' in prelim_disable_pgs_old_rh8.stdout" + when: ansible_distribution_major_version == "8" + + - name: "PRELIM | Install and Start PostgreSQL | Install PostgreSQL 12 RHEL/CentOS 8" + dnf: + name: ['postgresql12', 'postgresql12-server'] + state: present + when: ansible_distribution_major_version == "8" + + - name: "PRELIM | Install and Start PostgreSQL | Install PostgreSQL 12 RHEL/CentOS 7" + yum: + name: ['postgresql12', 'postgresql12-server'] + state: present + when: ansible_distribution_major_version == "7" + + - name: "PRELIM | Install and Start PostgreSQL | Initialize PostgreSQL 12 RHEL/CentOS 7 and 8" + command: /usr/pgsql-12/bin/postgresql-12-setup initdb + failed_when: false + + - name: "PRELIM | PRELIM | Install and Start PostgreSQL | Start and enable service RHEL/CentOS 7 and 8" + service: + name: postgresql-12 + enabled: yes + state: started + + - name: "PRELIM | PRELIM | Install and Start PostgreSQL | Set postgres user pw" + user: + name: postgres + password: "{{ pgs12cis_postgres_user_pw }}" + when: ansible_distribution_file_variety == 'RedHat' + +- name: "PRELIM | Install and Start PostgreSQL | Ubuntu" + block: + - name: "PRELIM | Install and Start PostgreSQL | Install wget for future task | Ubuntu" + apt: + name: wget + state: present + + - name: "PRELIM | Install and Start PostgreSQL | Get PostgreSQL GPG key | Ubuntu" + shell: wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add - + + - name: "PRELIM | Install and Start PostgreSQL | Add PostgreSQL repo | Ubuntu" + shell: echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" |sudo tee /etc/apt/sources.list.d/pgdg.list + + - name: "PRELIM | Install and Start PostgreSQL | Install PostgreSQL 12 | Ubuntu" + apt: + name: ['postgresql-12','postgresql-client-12'] + state: present + update_cache: yes + + - name: "PRELIM | Install and Start PostgreSQL | Start and enable service | Ubuntu" + service: + name: postgresql + state: started + enabled: yes + + - name: "PRELIM | Install and Start PostgreSQL | Change postgres user default password | Ubuntu" + command: psql -c "alter user postgres with password 'StrongAdminP@ssw0rd'" + become_user: postgres + when: ansible_distribution == 'Ubuntu' diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/prelim.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/prelim.yml new file mode 100644 index 0000000..8184ac2 --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/prelim.yml @@ -0,0 +1,111 @@ +--- +- name: "PRELIM | Install and Start PostgreSQL" + include_tasks: postgresql12_install.yml + when: pgs12cis_install_postgresql + +- name: "PRELIM | Install ACL when needed" + apt: + name: acl + state: present + when: ansible_distribution == 'Ubuntu' + +- name: "PRELIM | Get postgres config file path" + shell: psql -U postgres -c 'SHOW config_file' | tail -n+3 | head -n 1 | cut -c2- + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_config_file_get + +- name: "PRELIM | Get postgres config file path" + shell: psql -U postgres -c 'SHOW hba_file' | tail -n+3 | head -n 1 | cut -c2- + changed_when: false + failed_when: false + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_hba_config_file_get + +- name: "PRELIM | Set variable for service name" + set_fact: + pgs12cis_postgresql_service_auto: "{{ (ansible_distribution == 'Ubuntu') | ternary('postgresql', 'postgresql-12') }}" + +- name: "PRELIM | 2.1 | Get postgresql user home folder" + shell: "grep {{ pgs12cis_pgs12_usr }} /etc/passwd | cut -f6 -d:" + changed_when: false + failed_when: false + register: pgs12cis_pgs12_usr_home_dir_get + when: + - pgs12cis_rule_2_1 + +- name: "PRELIM | Finding the PostgreSQL Configured Data Directory (PGDATA)" + block: + - name: "PRELIM | Finding the PostgreSQL Configured Data Directory (PGDATA) | Get path" + shell: psql -c 'show data_directory' | tail -n+3 | head -n 1 | cut -c2- + check_mode: false + changed_when: false + register: pgs12cis_pgdata_cmd + become: true + become_user: "{{ pgs12cis_pgs12_usr }}" + + - name: "PRELIM | Finding the PostgreSQL Configured Data Directory (PGDATA) | Make var easier to use" + set_fact: + pgs12cis_data_folder: "{{ pgs12cis_pgdata_cmd.stdout }}" + when: + - pgs12cis_rule_3_1_2 + +- name: "PRELIM | 6.7 | Check if /boot or /boot/efi reside on separate partitions" + shell: df --output=target /boot | tail -n 1 + changed_when: false + check_mode: false + register: pgs12cis_prelim_boot_part + when: + - pgs12cis_rule_6_7 + - pgs12cis_section6 + +- name: "PRELIM | 6.7 | Check whether machine is UEFI-based" + stat: + path: /sys/firmware/efi + register: pgs12cis_prelim_sys_firmware_efi + when: + - pgs12cis_rule_6_7 + - pgs12cis_section6 + +- name: "PRELIM | 6.8 | Generate Self-Signed Cert" + block: + - name: "PRELIM | 6.8 | Generate Self-Signed Cert | Check for existing cert" + find: + paths: "{{ pgs12cis_data_folder }}" + patterns: '*.crt,*.key' + become_user: "{{ pgs12cis_pgs12_usr }}" + register: pgs12cis_certs_status + + - name: "PRELIM | 6.8 | Generate Self-Signed Cert | Create self-signed certificates" + command: 'openssl req -new -x509 -days 365 -nodes -text -out {{ pgs12cis_data_folder }}/server.crt -keyout {{ pgs12cis_data_folder }}/server.key -subj "/CN={{ ansible_nodename }}"' + when: pgs12cis_certs_status.matched == 0 + + - name: "PRELIM | 6.8 | Generate Self-Signed Cert | Set permissions on key file" + file: + path: "{{ pgs12cis_data_folder }}/server.key" + mode: og-rwx + owner: "{{ pgs12cis_pgs12_usr }}" + group: "{{ pgs12cis_pgs12_usr }}" + when: pgs12cis_certs_status.matched == 0 + when: + - pgs12cis_rule_6_8 + - pgs12cis_section6 + - pgs12cis_create_selfsigned_cert + +- name: "PRELIM | 6.9 | Install PostgreSQL contrib package" + block: + - name: "PRELIM | 6.9 | Install PostgreSQL contrib package | RHEL 8" + dnf: + name: postgresql12-contrib + state: present + when: ansible_distribution_major_version == "8" + + - name: "PRELIM | 6.9 | Install PostgreSQL contrib package | RHEL 7" + yum: + name: postgresql12-contrib + state: present + when: ansible_distribution_major_version == "7" + when: + - pgs12cis_rule_6_9 + - pgs12cis_section6 diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/rhel7_fips.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/rhel7_fips.yml new file mode 100644 index 0000000..32e572a --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/tasks/rhel7_fips.yml @@ -0,0 +1,108 @@ +--- +- name: "SCORED | 6.7 | PATCH | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | PostgreSQL must implement NIST FIPS 140-2 validated cryptographic modules to generate and validate cryptographic hashes (RHEL7)." + block: + - name: "SCORED | 6.7 | PATCH | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | PostgreSQL must implement NIST FIPS 140-2 validated cryptographic modules to generate and validate cryptographic hashes (RHEL7)." + yum: + name: dracut-fips + state: present + notify: rebuild initramfs + + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used| Check if prelink package is installed" + command: rpm -q prelink + args: + warn: no + changed_when: no + failed_when: no + check_mode: no + register: pgs12CIS_6_7_prelink_installed + + - name: "SCORED | 6.7 | PATCH | Ensure FIPS 140-2 OpenSSL Cryptography Is Used| Disable prelinking." + lineinfile: + dest: /etc/sysconfig/prelink + regexp: ^#?PRELINKING + line: PRELINKING=no + when: pgs12CIS_6_7_prelink_installed.rc == 0 + notify: undo existing prelinking + + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Check for GRUB_CMDLINE_LINUX in /etc/default/grub" + command: grep -P '^\s*GRUB_CMDLINE_LINUX=".*"$' /etc/default/grub + check_mode: no + failed_when: no + changed_when: pgs12cis_6_7_default_grub_missing_audit.rc > 0 + register: pgs12cis_6_7_default_grub_missing_audit + + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | parse sane GRUB_CMDLINE_LINUX from /proc/cmdline" + command: grep -oP ' ro \K.*?(?= ?LANG=)' /proc/cmdline + check_mode: no + changed_when: no + failed_when: pgs12cis_grub_cmdline_linux_audit.rc > 1 + when: pgs12cis_6_7_default_grub_missing_audit is changed + register: pgs12cis_grub_cmdline_linux_audit + + - name: "SCORED | 6.7 | PATCH | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Copy over a sane /etc/default/grub" + template: + src: etc_default_grub.j2 + dest: /etc/default/grub + owner: root + group: root + mode: 0644 + vars: + grub_cmdline_linux: "{{ pgs12cis_grub_cmdline_linux_audit.stdout }}" + when: pgs12cis_grub_cmdline_linux_audit is changed + + - name: "SCORED | 6.7 | PATCH | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | fips=1 must be in /etc/default/grub" + replace: + path: /etc/default/grub + regexp: "{{ pgs12cis_regexp_quoted_params }}" + replace: "{{ pgs12cis_replace_quoted_params }}" + vars: + key: GRUB_CMDLINE_LINUX + param: fips + value: 1 + append: yes # this is the default + when: + - not ansible_check_mode or + pgs12cis_6_7_default_grub_missing_audit is not changed + notify: make grub2 config + + - name: "SCORED | 6.7 | PATCH | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | If /boot or /boot/efi reside on separate partitions, the kernel parameter boot= must be added to the kernel command line." + replace: + path: /etc/default/grub + regexp: "{{ pgs12cis_regexp_quoted_params }}" + replace: "{{ pgs12cis_replace_quoted_params }}" + with_items: + - "{{ ansible_mounts | json_query(query) }}" + vars: + query: "[?mount=='{{ pgs12cis_boot_part }}'] | [0]" + key: GRUB_CMDLINE_LINUX + param: boot + value: UUID={{ item.uuid }} + insert: yes + when: + - pgs12cis_boot_part not in ['/', ''] + - not ansible_check_mode or + pgs12cis_6_7_default_grub_missing_audit is not changed + notify: make grub2 config + register: result + + - name: "SCORED | 6.7 | AUDIT | Ensure FIPS 140-2 OpenSSL Cryptography Is Used | Verify kernel parameters in /etc/default/grub" + command: grep -P '^\s*GRUB_CMDLINE_LINUX=".*(?<=[" ]){{ item | regex_escape }}(?=[" ]).*"$' /etc/default/grub + check_mode: no + with_items: + - fips=1 + - boot=UUID={{ ansible_mounts | json_query(query) }} + vars: + query: "[?mount=='{{ pgs12cis_boot_part }}'].uuid | [0]" + register: pgs12cis_6_7_audit + when: + - not ansible_check_mode or + pgs12cis_6_7_default_grub_missing_audit is not changed + - pgs12cis_boot_part not in ['/', ''] or + 'boot=' not in item + changed_when: + - ansible_check_mode + - pgs12cis_6_7_audit is failed + failed_when: + - pgs12cis_6_7_audit is failed + - not ansible_check_mode or + pgs12cis_6_7_audit.rc > 1 diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/tests/inventory b/Linux/ansible-lockdown/POSTGRES-12-CIS/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/tests/test.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/tests/test.yml new file mode 100644 index 0000000..a31e13a --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - . \ No newline at end of file diff --git a/Linux/ansible-lockdown/POSTGRES-12-CIS/vars/main.yml b/Linux/ansible-lockdown/POSTGRES-12-CIS/vars/main.yml new file mode 100644 index 0000000..b945adb --- /dev/null +++ b/Linux/ansible-lockdown/POSTGRES-12-CIS/vars/main.yml @@ -0,0 +1,21 @@ +--- +# this allows us to insert a name=value into a line of the format: +# key="name1=value1 name2=value2 nameN=valueN" +pgs12cis_regexp_quoted_params: ^({{ pgs12cis_re_qp_key }})({{ pgs12cis_re_qp_other_params }})({{ + pgs12cis_re_qp_param }}?)({{ pgs12cis_re_qp_other_params }})({{ pgs12cis_re_qp_key_end }}) +pgs12cis_replace_quoted_params: \1\2{{ pgs12cis_re_qp_insert | ternary('', ' ') }}{{ param }}={{ + value }}{{ pgs12cis_re_qp_insert | ternary(' ', '') }}\4\5 + +# none of these regexes create capture groups +pgs12cis_re_qp_key: (?:\s*{{ key }}=") +pgs12cis_re_qp_param: (?:{{ pgs12cis_re_qp_insert | ternary('', ' ?') }}{{ + pgs12cis_re_qp_param_start }}{{ param }}=.*?{{ + pgs12cis_re_qp_param_end }}{{ pgs12cis_re_qp_insert | ternary(' ?', '') }}) +pgs12cis_re_qp_other_params: (?:(?!{{ pgs12cis_re_qp_param }}.*).)*{{ + pgs12cis_re_qp_insert | ternary('?', '') }} +pgs12cis_re_qp_param_start: (?<=[" ]) +pgs12cis_re_qp_param_end: (?=[" ]) +pgs12cis_re_qp_key_end: (?:" *) + +# insert the parameter at the beginning or append to the end, default append +pgs12cis_re_qp_insert: "{{ insert | default(not (append | default(true))) }}" \ No newline at end of file diff --git a/Linux/ansible-lockdown/RHEL7-CIS/.ansible-lint b/Linux/ansible-lockdown/RHEL7-CIS/.ansible-lint new file mode 100644 index 0000000..f2a7e7c --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/.ansible-lint @@ -0,0 +1,11 @@ +parseable: true +quiet: true +skip_list: + - '204' + - '305' + - '303' + - '403' + - '306' + - '602' +use_default_rules: true +verbosity: 0 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/.gitattributes b/Linux/ansible-lockdown/RHEL7-CIS/.gitattributes new file mode 100644 index 0000000..b2daffb --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/.gitattributes @@ -0,0 +1,6 @@ +# adding github settings to show correct language +*.sh linguist-detectable=true +*.yml linguist-detectable=true +*.ps1 linguist-detectable=true +*.j2 linguist-detectable=true +*.md linguist-documentation diff --git a/Linux/ansible-lockdown/RHEL7-CIS/.yamllint b/Linux/ansible-lockdown/RHEL7-CIS/.yamllint new file mode 100644 index 0000000..6e34374 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/.yamllint @@ -0,0 +1,22 @@ +ignore: | + tests/ + molecule/ + .gitlab-ci.yml + *molecule.yml + +extends: default + +rules: + indentation: + # Requiring 2 space indentation + spaces: 4 + # Requiring consistent indentation within a file, either indented or not + indent-sequences: consistent + truthy: disable + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable diff --git a/Linux/ansible-lockdown/RHEL7-CIS/CONTRIBUTING.rst b/Linux/ansible-lockdown/RHEL7-CIS/CONTRIBUTING.rst new file mode 100644 index 0000000..c8fa576 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/CONTRIBUTING.rst @@ -0,0 +1,66 @@ +Contributing to MindPoint Group Projects +======================================== + +Rules +----- +1) All commits must be GPG signed (details in Signing section) +2) All commits must have Signed-off-by (Signed-off-by: Joan Doe ) in the commit message (details in Signing section) +3) All work is done in your own branch +4) All pull requests go into the devel branch. There are automated checks for signed commits, signoff in commit message, and functional testing) +5) Be open and nice to eachother + +Workflow +-------- +- Your work is done in your own individual branch. Make sure to to Signed-off and GPG sign all commits you intend to merge +- All community Pull Requests are into the devel branch. There are automated checks for GPG signed, Signed-off in commits, and functional tests before being approved. If your pull request comes in from outside of our repo, the pull request will go into a staging branch. There is info needed from our repo for our CI/CD testing. +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release +Signing your contribution +------------------------- + +We've chosen to use the Developer's Certificate of Origin (DCO) method +that is employed by the Linux Kernel Project, which provides a simple +way to contribute to MindPoint Group projects. + +The process is to certify the below DCO 1.1 text +:: + + Developer's Certificate of Origin 1.1 + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +:: + +Then, when it comes time to submit a contribution, include the +following text in your contribution commit message: + +:: + + Signed-off-by: Joan Doe + +:: + + +This message can be entered manually, or if you have configured git +with the correct `user.name` and `user.email`, you can use the `-s` +option to `git commit` to automatically include the signoff message. diff --git a/Linux/ansible-lockdown/RHEL7-CIS/ChangeLog.md b/Linux/ansible-lockdown/RHEL7-CIS/ChangeLog.md new file mode 100644 index 0000000..3d99e02 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/ChangeLog.md @@ -0,0 +1,77 @@ +# release CIS RedHat Enterprise Linux 7 Benchmark v3.0.1 - 09-21-2020 + +## Whats new 1.0.0 + +- New auditing tool all controlled via defaults main. run on host using [goss](https://github.com/aelsabbahy/goss) +- reorder of rules inline with CIS changes +- If Python3 discovered adds the epel repo to install python-rpm and then disables the repo after installing +- Adding of the goss module to the library path +- Python3 now default for control node (should be backward compatible in setup) +- Grub password no longer created using passlib needs to be supplied as variable + - assert has been created if rule still enabled and password not changed +- Use of the packages facts module + +## Major 1.1 + +- Upgrade to CIS 3.1.1 + +### Highlights + +- rhel7cis_allow_reboot is now an option to reboot at the end of remediation - default false +- linting - including command replaced with shell +- section 1 + - 1.1 rewritten to providing better auditing and output + - 1.3 sudo no longer required move to section 5 + - 1.4.1 bootloader password reworked + - other groups changes increased tests + - more controls for GDM +- section 2 + - reorder of server services + - rsyncd masked + - 2.5 - 2.4 +- section 3 + - some controls now L2 + - tidy of some rules + - 3.1 disable ipv6 now via grub 9No longer sysctl +- section 4 + - tidy up +- section 5 + - sudo moved from 1.3 to 5.2 + - Other controls changed numbers + - ssh kex, mac and ciphers updates +- section 6 + - many control orders changed + - 6.2.11 create missing home dirs rewritten + +## Whats new in 1.0.3 + +- Thanks to Thulium-Drake + - 6.1.12 - rework audit (no score) control #204 + - 4.1.1.3 regex improvement #202 + +- Thanks to jlosito + - 1.2.1& 1.2.2- allow centos gpg key check #215 + - 5.1.1 & 5.2.22 -typo fixes #221 + - 5.4.1.4 - idempotence and Inactive whitelist added to defaults main #222 + - 5.5 - Idempotence improvement #213 + +- 4.2.1.4 - Idempotence improvement #217 + - thanks to andreyzher + +- 1.5.2 moved grub capture to prelim + +- 5.6 ability to supply an sugroup rather than default to wheel + - thanks to ihotz #234 + +## Whats new in 1.0.2 + +- renamed goss library and aligned ansible.cfg file + - thanks to Thulium-Drake + +- selinux variable in defaults main - default enforcing + - 1.7.1.3-5 now idempotent + +## Whats new 1.0.1 + +- Fixed typos +- Added audit output file permissions diff --git a/Linux/ansible-lockdown/RHEL7-CIS/LICENSE b/Linux/ansible-lockdown/RHEL7-CIS/LICENSE new file mode 100644 index 0000000..3ae3c23 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Mindpoint Group / Lockdown Enterprise / Lockdown Enterprise Releases + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Linux/ansible-lockdown/RHEL7-CIS/README.md b/Linux/ansible-lockdown/RHEL7-CIS/README.md new file mode 100644 index 0000000..3217d24 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/README.md @@ -0,0 +1,155 @@ +RHEL 7 CIS +================ + +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/RHEL7-CIS/CommunityToDevel?label=Devel%20Build%20Status&style=plastic) +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/RHEL7-CIS/DevelToMain?label=Main%20Build%20Status&style=plastic) +![Release](https://img.shields.io/github/v/release/ansible-lockdown/RHEL7-CIS?style=plastic) + +Configure RHEL/Centos 7 machine to be [CIS](https://www.cisecurity.org/cis-benchmarks/) compliant +Untested on OEL + +Based on [CIS RedHat Enterprise Linux 7 Benchmark v3.1.1 - 05-21-2021 ](https://www.cisecurity.org/cis-benchmarks/) + +Caution(s) +--------- + +This role **will make changes to the system** which may have unintended consequences. This is not an auditing tool but rather a remediation tool to be used after an audit has been conducted. + +Check Mode is not supported! The role will complete in check mode without errors, but it is not supported and should be used with caution. The RHEL7-CIS-Audit role or a compliance scanner should be used for compliance checking over check mode. + +This role was developed against a clean install of the Operating System. If you are implementing to an existing system please review this role for any site specific changes that are needed. + +To use release version please point to main branch and relevant release for the cis benchmark you wish to work with. + +Coming from a previous release +------------------------------ + +CIS release always contains changes, it is highly recommended to review the new references and available variables. This have changed significantly since ansible-lockdown initial release. +This is now compatible with python3 if it is found to be the default interpreter. This does come with pre-requisites which it configures the system accordingly. + +Further details can be seen in the [Changelog](./ChangeLog.md) + +Auditing (new) +-------------- + +This can be turned on or off within the defaults/main.yml file with the variable rhel7cis_run_audit. The value is false by default, please refer to the wiki for more details. The defaults file also populates the goss checks to check only the controls that have been enabled in the ansible role. + +This is a much quicker, very lightweight, checking (where possible) config compliance and live/running settings. + +A new form of auditing has been developed, by using a small (12MB) go binary called [goss](https://github.com/aelsabbahy/goss) along with the relevant configurations to check. Without the need for infrastructure or other tooling. +This audit will not only check the config has the correct setting but aims to capture if it is running with that configuration also trying to remove [false positives](https://www.mindpointgroup.com/blog/is-compliance-scanning-still-relevant/) in the process. + +Refer to [RHEL7-CIS-Audit](https://github.com/ansible-lockdown/RHEL7-CIS-Audit). + +Documentation +------------- + +- [Getting Started](https://www.lockdownenterprise.com/docs/getting-started-with-lockdown) +- [Customizing Roles](https://www.lockdownenterprise.com/docs/customizing-lockdown-enterprise) +- [Per-Host Configuration](https://www.lockdownenterprise.com/docs/per-host-lockdown-enterprise-configuration) +- [Getting the Most Out of the Role](https://www.lockdownenterprise.com/docs/get-the-most-out-of-lockdown-enterprise) +- [Wiki](https://github.com/ansible-lockdown/RHEL7-CIS/wiki) +- [Repo GitHub Page](https://ansible-lockdown.github.io/RHEL7-CIS/) + +Requirements +------------ + +**General:** + +- Basic knowledge of Ansible, below are some links to the Ansible documentation to help get started if you are unfamiliar with Ansible + + - [Main Ansible documentation page](https://docs.ansible.com) + - [Ansible Getting Started](https://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html) + - [Tower User Guide](https://docs.ansible.com/ansible-tower/latest/html/userguide/index.html) + - [Ansible Community Info](https://docs.ansible.com/ansible/latest/community/index.html) +- Functioning Ansible and/or Tower Installed, configured, and running. This includes all of the base Ansible/Tower configurations, needed packages installed, and infrastructure setup. +- Please read through the tasks in this role to gain an understanding of what each control is doing. Some of the tasks are disruptive and can have unintended consiquences in a live production system. Also familiarize yourself with the variables in the defaults/main.yml file or the [Main Variables Wiki Page](https://github.com/ansible-lockdown/RHEL7-CIS/wiki/Main-Variables). + +**Technical Dependencies:** + +- Running Ansible/Tower setup (this role is tested against Ansible version 2.9.1 and newer) +- Python3 Ansible run environment +- python-def (should be included in RHEL/CentOS 7) - First task sets up the prerequisites (Tag pre-reqs)for python3 and python2 (where required) + - libselinux-python + - python3-rpm (package used by py3 to use the rpm pkg) + +Role Variables +-------------- + +This role is designed that the end user should not have to edit the tasks themselves. All customizing should be done via the defaults/main.yml file or with extra vars within the project, job, workflow, etc. These variables can be found [here](https://github.com/ansible-lockdown/RHEL7-CIS/wiki/Main-Variables) in the Main Variables Wiki page. All variables are listed there along with descriptions. + +Tags +---- + +There are many tags available for added control precision. Each control has it's own set of tags noting what level, if it's scored/notscored, what OS element it relates to, if it's a patch or audit, and the rule number. + +Below is an example of the tag section from a control within this role. Using this example if you set your run to skip all controls with the tag services, this task will be skipped. The opposite can also happen where you run only controls tagged with services. + +```sh + tags: + - level1 + - scored + - avahi + - services + - patch + - rule_2.2.4 +``` + +Example Audit Summary +--------------------- + +The audit when run from ansible also uses all the specific variables, so will test relevant variables based on host configuration settings. +This is based on a vagrant image, based upon a pre-configured image for filesystem layout etc. e.g. No Gui or firewall. +Note: More tests are run during audit as we are checking config and running state. + +```sh +TASK [RHEL7-CIS : Show Audit Summary] ****************************************************************************************************************************************************************************** +****** +ok: [cent7_efi] => { + "msg": [ + "The pre remediation results are: Count: 380, Failed: 121, Duration: 10.399s.", + "The post remediation results are: Count: 380, Failed: 10, Duration: 12.324s.", + "Full breakdown can be found in /var/tmp", + "" + ] +} + +PLAY RECAP ****************************************************************************************************************************************************************************************************************** +****** +cent7_efi : ok=274 changed=143 unreachable=0 failed=0 skipped=140 rescued=0 ignored=0 + +``` + +Branches +-------- + +- **devel** - This is the default branch and the working development branch. Community pull requests will pull into this branch +- **main** - This is the release branch +- **reports** - This is a protected branch for our scoring reports, no code should ever go here +- **gh-pages** - This is the github pages branch +- **all other branches** - Individual community member branches + +Community Contribution +---------------------- + +We encourage you (the community) to contribute to this role. Please read the rules below. + +- Your work is done in your own individual branch. Make sure to Signed-off and GPG sign all commits you intend to merge. +- All community Pull Requests are pulled into the devel branch +- Pull Requests into devel will confirm your commits have a GPG signature, Signed-off, and a functional test before being approved +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release + +Support +------- + +This is a community project at its core and will be managed as such. + +If you would are interested in dedicated support to assist or provide bespoke setups + +- [Ansible Counselor](https://www.mindpointgroup.com/products/ansible-counselor-on-demand-ansible-services-and-consulting/) +- [Try us out](https://engage.mindpointgroup.com/try-ansible-counselor) + +Credits +------- + +This repo originated from work done by [Sam Doran](https://github.com/samdoran/ansible-role-stig) diff --git a/Linux/ansible-lockdown/RHEL7-CIS/ansible.cfg b/Linux/ansible-lockdown/RHEL7-CIS/ansible.cfg new file mode 100644 index 0000000..3939906 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/ansible.cfg @@ -0,0 +1,25 @@ +[defaults] +host_key_checking=False +display_skipped_hosts=True +system_warnings=False +deprecation_warnings=False +command_warnings=False +nocows=1 +retry_files_save_path=/dev/null +library=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules:./library + +[privilege_escalation] + +[paramiko_connection] +record_host_keys=False + +[ssh_connection] +transfer_method=scp + +[accelerate] + +[selinux] + +[colors] + +[diff] \ No newline at end of file diff --git a/Linux/ansible-lockdown/RHEL7-CIS/defaults/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/defaults/main.yml new file mode 100644 index 0000000..41ece4c --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/defaults/main.yml @@ -0,0 +1,605 @@ +--- +# rhel7cis_notauto will auto install intrusive items +rhel7cis_notauto: false + +# Disable/Enable whole sections (Default is for all) +rhel7cis_section1: true +rhel7cis_section2: true +rhel7cis_section3: true +rhel7cis_section4: true +rhel7cis_section5: true +rhel7cis_section6: true + +# Disable/Enable OS check +rhel7cis_os_check: true + +# Should always reboot after this number of changes allow this to be automated as part of remediation +rhel7cis_allow_reboot: false + +## Python Binary +## This is used for python3 Installations where python2 OS modules are used in ansible +python2_bin: /bin/python2.7 + + +## Benchmark name used by audting control role +# The audit variable found at the base +benchmark: RHEL7-CIS + +#### Basic external goss audit enablement settings #### +#### Precise details - per setting can be found at the bottom of this file #### + +### Goss is required on the remote host +setup_audit: false +# How to retrive goss +# Options are copy or download - detailed settings at the bottom of this file +# you will need to access to either github or the file already dowmloaded +get_goss_file: download + +# how to get audit files onto host options +# options are git/copy/get_url +audit_content: git + +# Timeout for those cmds that take longer to run where timeout set +audit_cmd_timeout: 30000 + +# enable audits to run - this runs the audit and get the latest content +run_audit: false + +### End Goss enablements #### +#### Detailed settings found at the end of this document #### + + +# Enable/Disable SELinux +rhel7cis_selinux_disable: false +rhel7cis_selinux_state: enforcing + +# Misc. environment variables +rhel7cis_skip_for_travis: false +rhel7cis_system_is_container: false +system_is_ec2: false + +# Change to false if using EFI boot changes 1.1.1.4 to stop vfat +rhel7cis_legacy_boot: true + +# If set true uses the tmp.mount service else using fstab configuration +rhel7cis_tmp_svc: false + +# These variables correspond with the CIS rule IDs or paragraph numbers defined in +# the CIS benchmark documents. +# PLEASE NOTE: These work in coordination with the section # group variables and tags. +# You must enable an entire section in order for the variables below to take effect. +# Section 1 rules +# Section 1 is Initial Setup (Filesystem Configuration, Configure Software Updates, Configure Sudo, Filesystem Integrity Checking, Secure Boot Settings, Additional Process Hardening, Mandatory Access Control, and Warning Banners) +rhel7cis_rule_1_1_1_1: true +rhel7cis_rule_1_1_1_2: true +rhel7cis_rule_1_1_1_3: true +rhel7cis_rule_1_1_2: true +rhel7cis_rule_1_1_3: true +rhel7cis_rule_1_1_4: true +rhel7cis_rule_1_1_5: true +rhel7cis_rule_1_1_6: true +rhel7cis_rule_1_1_7: true +rhel7cis_rule_1_1_8: true +rhel7cis_rule_1_1_9: true +rhel7cis_rule_1_1_10: true +rhel7cis_rule_1_1_11: true +rhel7cis_rule_1_1_12: true +rhel7cis_rule_1_1_13: true +rhel7cis_rule_1_1_14: true +rhel7cis_rule_1_1_15: true +rhel7cis_rule_1_1_16: true +rhel7cis_rule_1_1_17: true +rhel7cis_rule_1_1_18: true +rhel7cis_rule_1_1_19: true +rhel7cis_rule_1_1_20: true +rhel7cis_rule_1_1_21: true +rhel7cis_rule_1_1_22: true +rhel7cis_rule_1_1_23: true +rhel7cis_rule_1_1_24: true +rhel7cis_rule_1_2_1: true +rhel7cis_rule_1_2_2: true +rhel7cis_rule_1_2_3: true +rhel7cis_rule_1_2_4: true +rhel7cis_rule_1_2_5: true +rhel7cis_rule_1_3_1: true +rhel7cis_rule_1_3_2: true +rhel7cis_rule_1_4_1: true +rhel7cis_rule_1_4_2: true +rhel7cis_rule_1_4_3: true +rhel7cis_rule_1_5_1: true +rhel7cis_rule_1_5_2: true +rhel7cis_rule_1_5_3: true +rhel7cis_rule_1_5_4: true +rhel7cis_rule_1_6_1_1: true +rhel7cis_rule_1_6_1_2: true +rhel7cis_rule_1_6_1_3: true +rhel7cis_rule_1_6_1_4: true +rhel7cis_rule_1_6_1_5: true +rhel7cis_rule_1_6_1_6: true +rhel7cis_rule_1_6_1_7: true +rhel7cis_rule_1_6_1_8: true +rhel7cis_rule_1_7_1: true +rhel7cis_rule_1_7_2: true +rhel7cis_rule_1_7_3: true +rhel7cis_rule_1_7_4: true +rhel7cis_rule_1_7_5: true +rhel7cis_rule_1_7_6: true +rhel7cis_rule_1_8_1: true +rhel7cis_rule_1_8_2: true +rhel7cis_rule_1_8_3: true +rhel7cis_rule_1_8_4: true +rhel7cis_rule_1_9: true + +# Section 2 rules +# Section 2 is Services (inetd Services, Special Purpose Services, and Service Clients) +rhel7cis_rule_2_1_1: true +rhel7cis_rule_2_2_1_1: true +rhel7cis_rule_2_2_1_2: true +rhel7cis_rule_2_2_1_3: true +rhel7cis_rule_2_2_2: true +rhel7cis_rule_2_2_3: true +rhel7cis_rule_2_2_4: true +rhel7cis_rule_2_2_5: true +rhel7cis_rule_2_2_6: true +rhel7cis_rule_2_2_7: true +rhel7cis_rule_2_2_8: true +rhel7cis_rule_2_2_9: true +rhel7cis_rule_2_2_10: true +rhel7cis_rule_2_2_11: true +rhel7cis_rule_2_2_12: true +rhel7cis_rule_2_2_13: true +rhel7cis_rule_2_2_14: true +rhel7cis_rule_2_2_15: true +rhel7cis_rule_2_2_16: true +rhel7cis_rule_2_2_17: true +rhel7cis_rule_2_2_18: true +rhel7cis_rule_2_2_19: true +rhel7cis_rule_2_3_1: true +rhel7cis_rule_2_3_2: true +rhel7cis_rule_2_3_3: true +rhel7cis_rule_2_3_4: true +rhel7cis_rule_2_3_5: true +rhel7cis_rule_2_4: true + +# Section 3 rules +# Section 3 is Network Configuration (Disable unused network protocols, Network parameters (host), Network parameters (Host and Router), Uncommon Network Protocols, Firewall Configuration, and Configure iptables) +rhel7cis_rule_3_1_1: true +rhel7cis_rule_3_1_2: true +rhel7cis_rule_3_2_1: true +rhel7cis_rule_3_2_2: true +rhel7cis_rule_3_3_1: true +rhel7cis_rule_3_3_2: true +rhel7cis_rule_3_3_3: true +rhel7cis_rule_3_3_4: true +rhel7cis_rule_3_3_5: true +rhel7cis_rule_3_3_6: true +rhel7cis_rule_3_3_7: true +rhel7cis_rule_3_3_8: true +rhel7cis_rule_3_3_9: true +rhel7cis_rule_3_4_1: true +rhel7cis_rule_3_4_2: true +rhel7cis_rule_3_5_1_1: true +rhel7cis_rule_3_5_1_2: true +rhel7cis_rule_3_5_1_3: true +rhel7cis_rule_3_5_1_4: true +rhel7cis_rule_3_5_1_5: true +rhel7cis_rule_3_5_1_6: true +rhel7cis_rule_3_5_1_7: true +rhel7cis_rule_3_5_2_1: true +rhel7cis_rule_3_5_2_2: true +rhel7cis_rule_3_5_2_3: true +rhel7cis_rule_3_5_2_4: true +rhel7cis_rule_3_5_2_5: true +rhel7cis_rule_3_5_2_6: true +rhel7cis_rule_3_5_2_7: true +rhel7cis_rule_3_5_2_8: true +rhel7cis_rule_3_5_2_9: true +rhel7cis_rule_3_5_2_10: true +rhel7cis_rule_3_5_2_11: true +rhel7cis_rule_3_5_3_1_1: true +rhel7cis_rule_3_5_3_1_2: true +rhel7cis_rule_3_5_3_1_3: true +rhel7cis_rule_3_5_3_2_1: true +rhel7cis_rule_3_5_3_2_2: true +rhel7cis_rule_3_5_3_2_3: true +rhel7cis_rule_3_5_3_2_4: true +rhel7cis_rule_3_5_3_2_5: true +rhel7cis_rule_3_5_3_2_6: true +rhel7cis_rule_3_5_3_3_1: true +rhel7cis_rule_3_5_3_3_2: true +rhel7cis_rule_3_5_3_3_3: true +rhel7cis_rule_3_5_3_3_4: true +rhel7cis_rule_3_5_3_3_5: true +rhel7cis_rule_3_5_3_3_6: true + +# Section 4 rules +# Section 4 is Logging and Auditing (Configure System Accounting (auditd) and Configure Logging) +rhel7cis_rule_4_1_1_1: true +rhel7cis_rule_4_1_1_2: true +rhel7cis_rule_4_1_1_3: true +rhel7cis_rule_4_1_2_1: true +rhel7cis_rule_4_1_2_2: true +rhel7cis_rule_4_1_2_3: true +rhel7cis_rule_4_1_2_4: true +rhel7cis_rule_4_1_3: true +rhel7cis_rule_4_1_4: true +rhel7cis_rule_4_1_5: true +rhel7cis_rule_4_1_6: true +rhel7cis_rule_4_1_7: true +rhel7cis_rule_4_1_8: true +rhel7cis_rule_4_1_9: true +rhel7cis_rule_4_1_10: true +rhel7cis_rule_4_1_11: true +rhel7cis_rule_4_1_12: true +rhel7cis_rule_4_1_13: true +rhel7cis_rule_4_1_14: true +rhel7cis_rule_4_1_15: true +rhel7cis_rule_4_1_16: true +rhel7cis_rule_4_1_17: true +rhel7cis_rule_4_2_1_1: true +rhel7cis_rule_4_2_1_2: true +rhel7cis_rule_4_2_1_3: true +rhel7cis_rule_4_2_1_4: true +rhel7cis_rule_4_2_1_5: true +rhel7cis_rule_4_2_1_6: true +rhel7cis_rule_4_2_2_1: true +rhel7cis_rule_4_2_2_2: true +rhel7cis_rule_4_2_2_3: true +rhel7cis_rule_4_2_3: true +rhel7cis_rule_4_2_4: true + +# Section 5 rules +# Section 5 is Access, Authentication, and Authorization (Configure time-based job schedulers, Configure SSH Server, Configure PAM, and User Accounts and Environment) +rhel7cis_rule_5_1_1: true +rhel7cis_rule_5_1_2: true +rhel7cis_rule_5_1_3: true +rhel7cis_rule_5_1_4: true +rhel7cis_rule_5_1_5: true +rhel7cis_rule_5_1_6: true +rhel7cis_rule_5_1_7: true +rhel7cis_rule_5_1_8: true +rhel7cis_rule_5_1_9: true +rhel7cis_rule_5_2_1: true +rhel7cis_rule_5_2_2: true +rhel7cis_rule_5_2_3: true +rhel7cis_rule_5_3_1: true +rhel7cis_rule_5_3_2: true +rhel7cis_rule_5_3_3: true +rhel7cis_rule_5_3_4: true +rhel7cis_rule_5_3_5: true +rhel7cis_rule_5_3_6: true +rhel7cis_rule_5_3_7: true +rhel7cis_rule_5_3_8: true +rhel7cis_rule_5_3_9: true +rhel7cis_rule_5_3_10: true +rhel7cis_rule_5_3_12: true +rhel7cis_rule_5_3_11: true +rhel7cis_rule_5_3_13: true +rhel7cis_rule_5_3_14: true +rhel7cis_rule_5_3_15: true +rhel7cis_rule_5_3_16: true +rhel7cis_rule_5_3_17: true +rhel7cis_rule_5_3_18: true +rhel7cis_rule_5_3_19: true +rhel7cis_rule_5_3_20: true +rhel7cis_rule_5_3_21: true +rhel7cis_rule_5_3_22: true +rhel7cis_rule_5_4_1: true +rhel7cis_rule_5_4_2: true +rhel7cis_rule_5_4_3: true +rhel7cis_rule_5_4_4: true +rhel7cis_rule_5_5_1_1: true +rhel7cis_rule_5_5_1_2: true +rhel7cis_rule_5_5_1_3: true +rhel7cis_rule_5_5_1_4: true +rhel7cis_rule_5_5_1_5: true +rhel7cis_rule_5_5_2: true +rhel7cis_rule_5_5_3: true +rhel7cis_rule_5_5_4: true +rhel7cis_rule_5_5_5: true +rhel7cis_rule_5_6: true +rhel7cis_rule_5_7: true +# Section 6 rules +# Section 6 is System Maintenance (System File Permissions and User and Group Settings) +rhel7cis_rule_6_1_1: true +rhel7cis_rule_6_1_2: true +rhel7cis_rule_6_1_3: true +rhel7cis_rule_6_1_4: true +rhel7cis_rule_6_1_5: true +rhel7cis_rule_6_1_6: true +rhel7cis_rule_6_1_7: true +rhel7cis_rule_6_1_8: true +rhel7cis_rule_6_1_9: true +rhel7cis_rule_6_1_10: true +rhel7cis_rule_6_1_11: true +rhel7cis_rule_6_1_12: true +rhel7cis_rule_6_1_13: true +rhel7cis_rule_6_1_14: true +rhel7cis_rule_6_2_1: true +rhel7cis_rule_6_2_2: true +rhel7cis_rule_6_2_3: true +rhel7cis_rule_6_2_4: true +rhel7cis_rule_6_2_5: true +rhel7cis_rule_6_2_6: true +rhel7cis_rule_6_2_7: true +rhel7cis_rule_6_2_8: true +rhel7cis_rule_6_2_9: true +rhel7cis_rule_6_2_10: true +rhel7cis_rule_6_2_11: true +rhel7cis_rule_6_2_12: true +rhel7cis_rule_6_2_13: true +rhel7cis_rule_6_2_14: true +rhel7cis_rule_6_2_15: true +rhel7cis_rule_6_2_16: true +rhel7cis_rule_6_2_17: true + +# Service configuration booleans set true to keep service +rhel7cis_avahi_server: false +rhel7cis_cups_server: false +rhel7cis_dhcp_server: false +rhel7cis_ldap_server: false +rhel7cis_telnet_server: false +rhel7cis_nfs_server: false +rhel7cis_rpc_server: false +rhel7cis_ntalk_server: false +rhel7cis_rsyncd_server: false +rhel7cis_tftp_server: false +rhel7cis_rsh_server: false +rhel7cis_nis_server: false +rhel7cis_snmp_server: false +rhel7cis_squid_server: false +rhel7cis_smb_server: false +rhel7cis_dovecot_server: false +rhel7cis_httpd_server: false +rhel7cis_vsftpd_server: false +rhel7cis_named_server: false +rhel7cis_nfs_rpc_server: false +rhel7cis_is_mail_server: false +rhel7cis_bind: false +rhel7cis_vsftpd: false +rhel7cis_httpd: false +rhel7cis_dovecot: false +rhel7cis_samba: false +rhel7cis_squid: false +rhel7cis_net_snmp: false +rhel7cis_allow_autofs: false + +## Section 1 vars +# 1.3.3 var log location variable +rhel7cis_varlog_location: "/var/log/sudo.log" + +# xinetd required +rhel7cis_xinetd_required: false + +# RedHat Satellite Subscription items +rhel7cis_rhnsd_required: false + +# 1.4.2 Bootloader password +rhel7cis_set_boot_pass: false +rhel7cis_bootloader_password_hash: 'grub.pbkdf2.sha512.changethispart' + +# System network parameters (host only OR host and router) +rhel7cis_is_router: false + +# IPv6 required +rhel7cis_ipv6_required: true + +# AIDE +rhel7cis_config_aide: true +# AIDE cron settings +rhel7cis_aide_cron: + cron_user: root + cron_file: /etc/crontab + aide_job: '/usr/sbin/aide --check' + aide_minute: '0' + aide_hour: '5' + aide_day: '*' + aide_month: '*' + aide_weekday: '*' + +# SELinux policy +rhel7cis_selinux_pol: targeted + +# Whether or not to run tasks related to auditing/patching the desktop environment +rhel7cis_gui: no + +# Set to 'true' if X Windows is needed in your environment +rhel7cis_xwindows_required: no + +rhel7cis_openldap_clients_required: false +rhel7cis_telnet_required: false +rhel7cis_talk_required: false +rhel7cis_rsh_required: false +rhel7cis_ypbind_required: false + +# Time Synchronization - Either chrony or ntp +rhel7cis_time_synchronization: chrony + +rhel7cis_time_synchronization_servers: + - 0.pool.ntp.org + - 1.pool.ntp.org + - 2.pool.ntp.org + - 3.pool.ntp.org + +rhel7cis_chrony_server_options: "minpoll 8" +rhel7cis_ntp_server_options: "iburst" + +# 3.4.2 | PATCH | Ensure /etc/hosts.allow is configured +rhel7cis_host_allow: + - "10.0.0.0/255.0.0.0" + - "172.16.0.0/255.240.0.0" + - "192.168.0.0/255.255.0.0" + +# Firewall Service - either firewalld or iptables +rhel7cis_firewall: firewalld +rhel7cis_default_zone: public + +rhel7cis_firewall_services: + - ssh + - dhcpv6-client + +# 3.5.2.x +# NFT firewall +# not tested but added example for clarity - This will break connections +# If the tables dont exist automatically create the tablename below +rhel7cis_nft_tables_autoNewTable: false +# create chain if doesnt exist +rhel7cis_nft_tables_autoChainCreate: false +# create a table called +rhel7cis_nft_tables_tableName: filter + + +# Warning Banner Content (issue, issue.net, motd) +rhel7cis_warning_banner: | + Authorized uses only. All activity may be monitored and reported. +# End Banner + +## Section4 vars + +# auditd settings +rhel7cis_auditd: + space_left_action: email + action_mail_acct: root + admin_space_left_action: halt + max_log_file_action: keep_logs + +rhel7cis_logrotate: "daily" + +# RHEL-07-4.1.2.4 +# rhel7cis_audit_backlog_limit value needs to be 8192 or larger to conform to CIS standards +rhel7cis_audit_backlog_limit: 8192 + +# RHEL-07-4.2.1.4/4.2.1.5 remote and destation log server name +rhel7cis_remote_log_server: logagg.example.com + +# RHEL-07-4.2.1.5 +rhel7cis_system_is_log_server: false + +## Section5 vars + +# SSH variables +rhel7cis_ssh_loglevel: INFO +rhel7cis_ssh_maxsessions: 10 +rhel7cis_sshd: + clientalivecountmax: 3 + clientaliveinterval: 300 + ciphers: "chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr" + macs: "hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512,hmac-sha2-256" + kex: "curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256" + logingracetime: 60 + # WARNING: make sure you understand the precedence when working with these values!! + # allowusers: + # allowgroups: systems dba + # denyusers: + # denygroups: + +# pam variables +rhel7cis_pam_faillock: + attempts: 5 + interval: 900 + unlock_time: 900 + fail_for_root: no + remember: 5 + pwhash: sha512 +rhel7cis_inactivelock: + lock_days: 30 + +# Accounte listed below will not have INACTIVE field set in shadow file +rhel7cis_inactive_whitelist: + - root + - vagrant + +rhel7cis_pass: + max_days: 90 + min_days: 7 + warn_age: 7 +# Syslog system - either rsyslog or syslog-ng +rhel7cis_syslog: rsyslog +rhel7cis_rsyslog_ansiblemanaged: true + +# Var/tmp settings +rhel7cis_vartmp: + source: /tmp + fstype: none + opts: "defaults,nodev,nosuid,noexec,bind" + enabled: no + +# Interactive user UID starting point +rhel7cis_rule_5_4_2_min_uid: 1000 + +# RHEL-07-5.4.5 +# Session timeout setting file (TMOUT setting can be set in multiple files) +# Timeout value is in seconds. (60 seconds * 10 = 600) +rhel7cis_shell_session_timeout: + file: /etc/profile.d/tmout.sh + timeout: 600 +# RHEL-07-5.4.1.5 Allow ansible to expire password for account with a last changed date in the future. False will just display users in violation, true will expire those users passwords +rhel7cis_futurepwchgdate_autofix: true + +# 5.4.2 +rhel7cis_int_gid: 1000 + +# 5.6 Group to be used for su +# this group needs to exists groups will not be created for remediation this is considered sys admins + +# rhel7cis_sugroup: sugroup + +## Section6 vars + +# RHEL-07_6.1.1 +rhel7cis_rpm_audit_file: /var/tmp/rpm_file_check + +# RHEL-07_6.1.10 Allow ansible to adjust world-writable files. False will just display world-writable files, True will remove world-writable +rhel7cis_no_world_write_adjust: true +rhel7cis_passwd_label: "{{ (this_item | default(item)).id }}: {{ (this_item | default(item)).dir }}" +rhel7cis_dotperm_ansiblemanaged: true + +# RHEL-07-6.2.18 Clear users from shadow group +rhel7cis_remove_shadow_grp_usrs: true +#### Goss Configuration Settings #### + +### Goss binary settings ### +goss_version: + release: v0.3.16 + checksum: 'sha256:827e354b48f93bce933f5efcd1f00dc82569c42a179cf2d384b040d8a80bfbfb' +audit_bin_path: /usr/local/bin/ +audit_bin: "{{ audit_bin_path }}goss" +audit_format: json + +# if get_goss_file == download change accordingly +goss_url: "https://github.com/aelsabbahy/goss/releases/download/{{ goss_version.release }}/goss-linux-amd64" + +## if get_goss_file - copy the following needs to be updated for your environment +## it is expected that it will be copied from somewhere accessible to the control node +## e.g copy from ansible control node to remote host +copy_goss_from_path: /some/accessible/path + +### Goss Audit Benchmark file ### +## managed by the control audit_content +# git +audit_file_git: "https://github.com/ansible-lockdown/{{ benchmark }}-Audit.git" +audit_git_version: main + +# copy: +audit_local_copy: "some path to copy from" + +# get_url: +audit_files_url: "some url maybe s3?" + +# Where the goss audit configuration will be stored +audit_files: "/var/tmp/{{ benchmark }}-Audit/" + +## Goss configuration information +# Where the goss configs and outputs are stored +audit_out_dir: '/var/tmp' +audit_conf_dir: "{{ audit_out_dir }}/{{ benchmark }}-Audit/" +pre_audit_outfile: "{{ audit_out_dir }}/{{ ansible_hostname }}_pre_scan_{{ ansible_date_time.epoch }}.{{ audit_format }}" +post_audit_outfile: "{{ audit_out_dir }}/{{ ansible_hostname }}_post_scan_{{ ansible_date_time.epoch }}.{{ audit_format }}" + +## The following should not need changing +goss_file: "{{ audit_conf_dir }}goss.yml" +audit_vars_path: "{{ audit_conf_dir }}/vars/{{ ansible_hostname }}.yml" +audit_results: | + The pre remediation results are: {{ pre_audit_summary }}. + The post remediation results are: {{ post_audit_summary }}. + Full breakdown can be found in {{ audit_out_dir }} diff --git a/Linux/ansible-lockdown/RHEL7-CIS/group_vars/docker b/Linux/ansible-lockdown/RHEL7-CIS/group_vars/docker new file mode 100644 index 0000000..8510104 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/group_vars/docker @@ -0,0 +1,28 @@ +--- +ansible_user: root +# AIDE cron settings +rhel7cis_aide_cron: + cron_user: root + cron_file: /var/spool/cron/root + aide_job: '/usr/sbin/aide --check' + aide_minute: 0 + aide_hour: 5 + aide_day: '*' + aide_month: '*' + aide_weekday: '*' + +rhel7cis_sshd: + clientalivecountmax: 3 + clientaliveinterval: 300 + ciphers: "aes256-ctr,aes192-ctr,aes128-ctr" + macs: "hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com" + logingracetime: 60 + # - make sure you understand the precedence when working with these values!! + allowusers: vagrant + allowgroups: vagrant + denyusers: root + denygroups: root + +# Workarounds for Docker +rhel7cis_skip_for_travis: true +rhel7cis_selinux_disable: true diff --git a/Linux/ansible-lockdown/RHEL7-CIS/group_vars/vagrant b/Linux/ansible-lockdown/RHEL7-CIS/group_vars/vagrant new file mode 100644 index 0000000..e35ca5b --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/group_vars/vagrant @@ -0,0 +1,28 @@ +--- +ansible_user: vagrant +# AIDE cron settings +rhel7cis_aide_cron: + cron_user: root + cron_file: /var/spool/cron/root + aide_job: '/usr/sbin/aide --check' + aide_minute: 0 + aide_hour: 5 + aide_day: '*' + aide_month: '*' + aide_weekday: '*' + +rhel7cis_sshd: + clientalivecountmax: 3 + clientaliveinterval: 300 + ciphers: 'aes256-ctr,aes192-ctr,aes128-ctr' + macs: 'hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com' + logingracetime: 60 + # - make sure you understand the precedence when working with these values!! + allowusers: vagrant + allowgroups: vagrant + denyusers: root + denygroups: root + +# Vagrant can touch code that Docker cannot +rhel7cis_skip_for_travis: false +rhel7cis_selinux_disable: false diff --git a/Linux/ansible-lockdown/RHEL7-CIS/handlers/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/handlers/main.yml new file mode 100644 index 0000000..999831b --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/handlers/main.yml @@ -0,0 +1,103 @@ +--- +# handlers file for RHEL7-CIS + +- name: sysctl flush ipv4 route table + sysctl: + name: net.ipv4.route.flush + value: '1' + sysctl_set: yes + when: ansible_virtualization_type != "docker" + +- name: sysctl flush ipv6 route table + sysctl: + name: net.ipv6.route.flush + value: '1' + sysctl_set: yes + when: ansible_virtualization_type != "docker" + +- name: systemd restart tmp.mount + systemd: + name: tmp.mount + daemon_reload: yes + enabled: yes + masked: no + state: reloaded + +- name: remount tmp + command: mount -o remount /tmp + args: + warn: false + +- name: remount dev_shm + command: mount -o remount /dev/shm + args: + warn: false + +- name: remount var_tmp + command: mount -o remount /var/tmp + args: + warn: false + +- name: systemd restart var-tmp.mount + systemd: + name: var-tmp.mount + daemon_reload: yes + enabled: yes + masked: no + state: reloaded + +- name: remount home + command: mount -o remount /home + args: + warn: false + +- name: update dconf + command: dconf update + +- name: restart firewalld + service: + name: firewalld + state: restarted + +- name: restart xinetd + service: + name: xinetd + state: restarted + +- name: restart sshd + service: + name: sshd + state: restarted + +- name: restart postfix + service: + name: postfix + state: restarted + +- name: reload dconf + command: dconf update + +- name: restart auditd + command: /sbin/service auditd restart + changed_when: false + check_mode: false + failed_when: false + args: + warn: no + when: + - not rhel7cis_skip_for_travis + tags: + - skip_ansible_lint + +- name: grub2cfg + command: /sbin/grub2-mkconfig -o "{{ rhel7cis_bootloader_path }}grub.cfg" + +- name: restart rsyslog + service: + name: rsyslog + state: restarted + +- name: restart syslog-ng + service: + name: syslog-ng + state: restarted diff --git a/Linux/ansible-lockdown/RHEL7-CIS/local.yml b/Linux/ansible-lockdown/RHEL7-CIS/local.yml new file mode 100644 index 0000000..cc2804e --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/local.yml @@ -0,0 +1,11 @@ +--- + +- hosts: localhost + connection: local + become: true + vars: + is_container: false + roles: + - role: "{{ playbook_dir }}" + rhel7cis_system_is_container: "{{ is_container | default(false) }}" + rhel7cis_ssh_required: "{{ is_container == false }}" diff --git a/Linux/ansible-lockdown/RHEL7-CIS/meta/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/meta/main.yml new file mode 100644 index 0000000..5500ddd --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/meta/main.yml @@ -0,0 +1,19 @@ +--- + +galaxy_info: + author: "Sam Doran, Josh Springer, Daniel Shepherd, Bas Meijeri, James Cassell, Mike Renfro, DFed, Mark Bolwell, George Nalen" + description: "Apply the CIS RHEL7 role" + company: "MindPoint Group" + license: MIT + min_ansible_version: 2.9.0 + role_name: rhel7_cis + platforms: + - name: EL + versions: + - "7" + galaxy_tags: + - system + - security + - cis + - hardening +dependencies: [] diff --git a/Linux/ansible-lockdown/RHEL7-CIS/site.yml b/Linux/ansible-lockdown/RHEL7-CIS/site.yml new file mode 100644 index 0000000..0a27252 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/site.yml @@ -0,0 +1,6 @@ +--- +- hosts: all + become: true + + roles: + - role: "{{ playbook_dir }}" diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/LE_audit_setup.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/LE_audit_setup.yml new file mode 100644 index 0000000..6da81fc --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/LE_audit_setup.yml @@ -0,0 +1,22 @@ +--- + +- name: Download goss binary + get_url: + url: "{{ goss_url }}" + dest: "{{ audit_bin }}" + owner: root + group: root + checksum: "{{ goss_version.checksum }}" + mode: 0555 + when: + - get_goss_file == 'download' + +- name: copy goss binary + copy: + src: "{{ copy_goss_from_path }}" + dest: "{{ audit_bin }}" + mode: 0555 + owner: root + group: root + when: + - get_goss_file == 'copy' diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/check_prereqs.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/check_prereqs.yml new file mode 100644 index 0000000..010701d --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/check_prereqs.yml @@ -0,0 +1,47 @@ +--- + +- name: "PREREQ | Check required packages installed | Python2" + package: + list: "{{ item }}" + state: present + loop: + - rpm-python + - libselinux-python + when: ansible_python.version.major == 2 + vars: + ansible_python_interpreter: "{{ python2_bin }}" + +- name: "PREREQ | Add the required packages | Python 3" + block: + - name: Check if python36-rpm package installed + command: rpm -q python36-rpm + register: python36_rpm_present + failed_when: ( python36_rpm_present.rc not in [ 0, 1 ] ) + changed_when: false + args: + warn: false + + - name: Add the EPEL repository required for the python36-rpm pkg + package: + name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + state: present + register: epel_installed + when: + - python36_rpm_present.rc != '0' + + - name: "PREREQ | Check required packages installed | Python3 " + package: + name: "{{ item }}" + state: present + register: python3reqs_installed + loop: + - python36-rpm + - libselinux-python3 + + - name: Disable Epel repo if installed earlier + command: yum-config-manager disable epel + when: epel_installed.changed + when: + - ( ansible_python.version.major == 3 and ansible_python.version.minor == 6 ) + vars: + ansible_python_interpreter: "{{ python2_bin }}" diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/main.yml new file mode 100644 index 0000000..8dbe87e --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/main.yml @@ -0,0 +1,129 @@ +--- +# tasks file for RHEL7-CIS +- name: Check OS version and family + fail: + msg: "This role can only be run against RHEL 7. {{ ansible_distribution }} {{ ansible_distribution_major_version }} is not supported." + when: + - ansible_os_family == 'RedHat' + - ansible_distribution_major_version is version_compare('7', '!=') + - rhel7cis_os_check + tags: + - always + +- name: Check ansible version + fail: + msg: You must use ansible 2.9 or greater + when: not ansible_version.full is version_compare('2.9', '>=') + tags: + - always + +- name: Check rhel7cis_bootloader_password_hash variable has been changed + assert: + that: rhel7cis_bootloader_password_hash != 'grub.pbkdf2.sha512.changethispart' + msg: "This role will not be able to run single user password commands as rhel7cis_bootloader_password_hash variable has not been set" + when: + - ansible_distribution_version >= '7.2' + - rhel7cis_set_boot_pass + - rhel7cis_rule_1_5_1 + +- name: "check sugroup exists if used" + block: + - name: "Check su group exists if defined" + command: grep -w "{{ rhel7cis_sugroup }}" /etc/group + register: sugroup_exists + changed_when: false + failed_when: sugroup_exists.rc >= 2 + tags: + - skip_ansible_lint + + - name: Check sugroup if defined exists before continuing + assert: + that: sugroup_exists.rc == 0 + msg: "The variable rhel7cis_sugroup is defined but does not exist please rectify" + when: + - rhel7cis_sugroup is defined + - rhel7cis_rule_5_6 + tags: + - rule_5.6 + +- include: check_prereqs.yml + tags: + - always + - prereqs + +- include: prelim.yml + tags: + - prelim_tasks + - always + +- import_tasks: pre_remediation_audit.yml + when: + - run_audit + tags: + - run_audit + +- name: Gather the package facts + package_facts: + manager: auto + tags: + - always + +- include: parse_etc_password.yml + when: + - rhel7cis_section5 or + rhel7cis_section6 + +- include: section_1/main.yml + when: rhel7cis_section1 + tags: + - rhel7cis_section1 + +- include: section_2/main.yml + tags: + - rhel7cis_section2 + when: rhel7cis_section2 + +- include: section_3/main.yml + when: rhel7cis_section3 + tags: + - rhel7cis_section3 + +- include: section_4/main.yml + when: rhel7cis_section4 + tags: + - rhel7cis_section4 + +- include: section_5/main.yml + when: rhel7cis_section5 + tags: + - rhel7cis_section5 + +- include: section_6/main.yml + when: rhel7cis_section6 + tags: + - rhel7cis_section6 + +- include: post.yml + tags: + - post_tasks + - always + +- name: flush handlers + meta: flush_handlers + +- name: reboot system + reboot: + when: + - rhel7cis_allow_reboot + +- import_tasks: post_remediation_audit.yml + when: + - run_audit + tags: + - run_audit + +- name: Show Audit Summary + debug: + msg: "{{ audit_results.split('\n') }}" + when: + - run_audit diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/parse_etc_password.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/parse_etc_password.yml new file mode 100644 index 0000000..b3a19f5 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/parse_etc_password.yml @@ -0,0 +1,31 @@ +--- +- name: "PRELIM | {{ rhel7cis_passwd_tasks }} | Parse /etc/passwd" + block: + - name: "PRELIM | {{ rhel7cis_passwd_tasks }} | Parse /etc/passwd" + command: cat /etc/passwd + changed_when: false + check_mode: false + register: rhel7cis_passwd_file_audit + + - name: "PRELIM | {{ rhel7cis_passwd_tasks }} | Split passwd entries" + set_fact: + rhel7cis_passwd: "{{ rhel7cis_passwd_file_audit.stdout_lines | map('regex_replace', ld_passwd_regex, ld_passwd_yaml) | map('from_yaml') | list }}" + with_items: "{{ rhel7cis_passwd_file_audit.stdout_lines }}" + vars: + ld_passwd_regex: >- + ^(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*) + ld_passwd_yaml: | + id: >-4 + \g + password: >-4 + \g + uid: \g + gid: \g + gecos: >-4 + \g + dir: >-4 + \g + shell: >-4 + \g + tags: + - always diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/post.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/post.yml new file mode 100644 index 0000000..bb70ac1 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/post.yml @@ -0,0 +1,12 @@ +--- +# Post tasks + +- name: Perform YUM package cleanup + package: + autoremove: true + changed_when: false + ignore_errors: yes + tags: + - skip_ansible_lint + vars: + ansible_python_interpreter: /bin/python diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/post_remediation_audit.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/post_remediation_audit.yml new file mode 100644 index 0000000..17ef3f8 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/post_remediation_audit.yml @@ -0,0 +1,43 @@ +--- + +- name: "Post Audit | Run post_remediation {{ benchmark }} audit" + shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ post_audit_outfile }} -g {{ group_names }}" + vars: + warn: false + +- name: Post Audit | ensure audit files readable by users + file: + path: "{{ item }}" + mode: 0644 + state: file + loop: + - "{{ post_audit_outfile }}" + - "{{ pre_audit_outfile }}" + +- name: Post Audit | Capture audit data if json format + block: + - name: "capture data {{ post_audit_outfile }}" + command: "cat {{ post_audit_outfile }}" + register: post_audit + changed_when: false + + - name: Capture post-audit result + set_fact: + post_audit_summary: "{{ post_audit.stdout | from_json |json_query(summary) }}" + vars: + summary: 'summary."summary-line"' + when: + - audit_format == "json" + +- name: Post Audit | Capture audit data if documentation format + block: + - name: "Post Audit | capture data {{ post_audit_outfile }}" + command: "tail -2 {{ post_audit_outfile }}" + register: post_audit + changed_when: false + + - name: Post Audit | Capture post-audit result + set_fact: + post_audit_summary: "{{ post_audit.stdout_lines }}" + when: + - audit_format == "documentation" diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/pre_remediation_audit.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/pre_remediation_audit.yml new file mode 100644 index 0000000..08fb19f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/pre_remediation_audit.yml @@ -0,0 +1,111 @@ +--- + +- name: Audit Binary Setup | Setup the LE audit + include_tasks: LE_audit_setup.yml + when: + - setup_audit + tags: + - setup_audit + +- name: "Pre Audit Setup | Ensure {{ audit_conf_dir }} exists" + file: + path: "{{ audit_conf_dir }}" + state: directory + mode: '0755' + +- name: Pre Audit Setup | If using git for content set up + block: + - name: Pre Audit Setup | Install git (rh8 python3) + package: + name: git + state: present + when: ansible_distribution_major_version == '8' + + - name: Pre Audit Setup | Install git (rh7 python2) + package: + name: git + state: present + vars: + ansible_python_interpreter: "{{ python2_bin }}" + when: ansible_distribution_major_version == '7' + + - name: Pre Audit Setup | retrieve audit content files from git + git: + repo: "{{ audit_file_git }}" + dest: "{{ audit_conf_dir }}" + version: "{{ audit_git_version }}" + when: + - audit_content == 'git' + +- name: Pre Audit Setup | copy to audit content files to server + copy: + src: "{{ audit_local_copy }}" + dest: "{{ audit_conf_dir }}" + mode: 0644 + when: + - audit_content == 'copy' + +- name: Pre Audit Setup | get audit content from url + get_url: + url: "{{ audit_files_url }}" + dest: "{{ audit_conf_dir }}" + when: + - audit_content == 'get_url' + +- name: Pre Audit Setup | Check Goss is available + block: + - name: Pre Audit Setup | Check for goss file + stat: + path: "{{ audit_bin }}" + register: goss_available + + - name: Pre Audit Setup | If audit ensure goss is available + assert: + msg: "Audit has been selected: unable to find goss binary at {{ audit_bin }}" + when: + - not goss_available.stat.exists + when: + - run_audit + +- name: Pre Audit Setup | Copy ansible default vars values to test audit + template: + src: ansible_vars_goss.yml.j2 + dest: "{{ audit_vars_path }}" + mode: 0600 + when: + - run_audit + tags: + - goss_template + +- name: "Pre Audit | Run pre_remediation {{ benchmark }} audit" + shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ pre_audit_outfile }} -g {{ group_names }}" + vars: + warn: false + +- name: Pre Audit | Capture audit data if json format + block: + - name: "capture data {{ pre_audit_outfile }}" + command: "cat {{ pre_audit_outfile }}" + register: pre_audit + changed_when: false + + - name: Pre Audit | Capture pre-audit result + set_fact: + pre_audit_summary: "{{ pre_audit.stdout | from_json |json_query(summary) }}" + vars: + summary: 'summary."summary-line"' + when: + - audit_format == "json" + +- name: Pre Audit | Capture audit data if documentation format + block: + - name: "capture data {{ pre_audit_outfile }}" + command: "tail -2 {{ pre_audit_outfile }}" + register: pre_audit + changed_when: false + + - name: Pre Audit | Capture pre-audit result + set_fact: + pre_audit_summary: "{{ pre_audit.stdout_lines }}" + when: + - audit_format == "documentation" diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/prelim.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/prelim.yml new file mode 100644 index 0000000..102036f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/prelim.yml @@ -0,0 +1,87 @@ +--- +# Preliminary tasks that should always be run +# List users in order to look files inside each home directory +- name: "PRELIM | List users accounts" + command: "awk -F: '{print $1}' /etc/passwd" + register: users + changed_when: false + check_mode: false + +- name: "PRELIM | Gather accounts with empty password fields" + shell: "cat /etc/shadow | awk -F: '($2 == \"\" ) {j++;print $1; } END {exit j}'" + register: empty_password_accounts + changed_when: false + check_mode: false + tags: + - skip_ansible_lint + +- name: "PRELIM | Gather UID 0 accounts other than root" + shell: "cat /etc/passwd | awk -F: '($3 == 0 && $1 != \"root\") {i++;print $1 } END {exit i}'" + register: uid_zero_accounts_except_root + changed_when: false + check_mode: false + tags: + - skip_ansible_lint + +- name: "PRELIM | Check whether machine is UEFI-based" + stat: + path: /sys/firmware/efi + register: rhel7cis_efi_boot + changed_when: false + +- name: set bootloader type + block: + - name: "PRELIM | set fact if UEFI boot | RHEL or OEL" + set_fact: + rhel7cis_bootloader_path: /boot/efi/EFI/redhat/ + rhel7cis_legacy_boot: false + when: + - rhel7cis_efi_boot.stat.exists + - ansible_distribution != 'CentOS' # Note: rhel & OEL both use redhat path + + - name: "PRELIM | set fact if UEFI boot | CentOS " + set_fact: + rhel7cis_bootloader_path: /boot/efi/EFI/centos/ + rhel7cis_legacy_boot: false + when: + - rhel7cis_efi_boot.stat.exists + - ansible_distribution == 'CentOS' + + - name: "PRELIM | set if not UEFI boot" + set_fact: + rhel7cis_bootloader_path: /boot/grub2/ + rhel7cis_legacy_boot: true + when: not rhel7cis_efi_boot.stat.exists + + - name: output bootloader and efi state + debug: + msg: + - "bootloader path set to {{ rhel7cis_bootloader_path }}" + - "legacy boot equals {{ rhel7cis_legacy_boot }}" + +- name: "PRELIM | Section 1.1 | Create list of mount points" + set_fact: + mount_names: "{{ ansible_mounts | map(attribute='mount') | list }}" + +- name: "PRELIM | Section 1.7 | Ensure SELinux is installed" + package: + name: libselinux + state: present + when: + - not rhel7cis_selinux_disable + vars: + ansible_python_interpreter: /bin/python + +- name: "PRELIM | Section 4.1 | Configure System Accounting (auditd)" + package: + name: audit + state: present + vars: + ansible_python_interpreter: /bin/python + +- name: "PRELIM | Section 5.1 | Configure cron" + package: + name: cronie + state: present + vars: + ansible_python_interpreter: /bin/python diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.1.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.1.1.x.yml new file mode 100644 index 0000000..168e325 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.1.1.x.yml @@ -0,0 +1,70 @@ +--- + +- name: "1.1.1.1 | L1 | PATCH | Ensure mounting of cramfs filesystems is disabled " + block: + - name: "1.1.1.1 | L1 | PATCH | Ensure mounting of cramfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install cramfs(\\s|$)" + line: "install cramfs /bin/true" + create: yes + mode: '0600' + + - name: "1.1.1.1 | L1 | PATCH | Remove cramfs module" + modprobe: + name: cramfs + state: absent + when: + - rhel7cis_rule_1_1_1_1 + - ansible_connection != 'docker' + tags: + - level1 + - patch + - rule_1.1.1.1 + - cramfs + +- name: "1.1.1.2 | L2 | PATCH | Ensure mounting of squashfs filesystems is disabled" + block: + - name: "1.1.1.2 | L2 | PATCH | Ensure mounting of squashfs filesystems is disabled | Edit modprobe config " + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install squashfs(\\s|$)" + line: "install squashfs /bin/true" + create: yes + mode: '0600' + + - name: "1.1.1.2 | L2 | PATCH | Remove squashfs module | rmmod" + modprobe: + name: squashfs + state: absent + when: + - rhel7cis_rule_1_1_1_2 + - ansible_connection != 'docker' + tags: + - level2 + - patch + - rule_1.1.1.2 + - squashfs + +- name: "1.1.1.3 | L1 | PATCH | Ensure mounting of udf filesystems is disabled" + block: + - name: "1.1.1.3 | L1 | PATCH | Ensure mounting of udf filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install udf(\\s|$)" + line: "install udf /bin/true" + create: yes + mode: '0600' + + - name: "1.1.1.3 | L1 | PATCH | Remove udf module | rmmod " + modprobe: + name: udf + state: absent + when: + - rhel7cis_rule_1_1_1_3 + - ansible_connection != 'docker' + tags: + - level1 + - patch + - rule_1.1.1.3 + - udf diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.1.x.yml new file mode 100644 index 0000000..d77b04b --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.1.x.yml @@ -0,0 +1,397 @@ +--- + +- name: "1.1.2 | L1 | AUDIT | Ensure /tmp is configured" + block: + - name: "1.1.2 | L1 | AUDIT | Ensure /tmp is configured | Absent" + debug: + msg: "WARNING: {{ required_mount }} doesn't exist. This is a manual task" + register: tmp_mount_absent + changed_when: tmp_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + + - name: "1.1.2 | L1 | AUDIT | Ensure /tmp is configured | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + register: tmp_mount_present + when: + - required_mount in mount_names + vars: + required_mount: '/tmp' + when: + - rhel7cis_rule_1_1_2 + tags: + - level1-server + - level1-workstation + - scored + - audit + - mounts + - rule_1.1.2 + +- name: | + "1.1.3 | L1 | PATCH | Ensure nodev option set on /tmp partition | skips if not present" + "1.1.4 | L1 | PATCH | Ensure nosuid option set on /tmp partition | skips if not present" + "1.1.5 | L1 | PATCH | Ensure noexec option set on /tmp partition| skips if not present" + block: + - name: | + "1.1.3 | L1 | PATCH | Ensure nodev option set on /tmp partition | fstab config | skips if mount absent" + "1.1.4 | L1 | PATCH | Ensure nosuid option set on /tmp partition | fstab_config | skips if mount absent" + "1.1.5 | L1 | PATCH | Ensure noexec option set on /tmp partition| fstab_config | skips if mount absent" + mount: + name: /tmp + src: "{{ item.device }}" + fstype: "{{ item.fstype }}" + state: present + opts: defaults,{% if rhel7cis_rule_1_1_3 %}noexec,{% endif %}{% if rhel7cis_rule_1_1_4 %}nodev,{% endif %}{% if rhel7cis_rule_1_1_5 %}nosuid{% endif %} + notify: remount tmp + loop: "{{ ansible_mounts }}" + loop_control: + label: "{{ item.mount }}" + when: + - not rhel7cis_tmp_svc + - item.mount == "/tmp" + + - name: | + "1.1.3 | L1 | PATCH | Ensure noexec option set on /tmp partition | systemd | skips if mount absent" + "1.1.4 | L1 | PATCH | Ensure nodev option set on /tmp partition | systemd | skips if mount absent" + "1.1.5 | L1 | PATCH | Ensure nosuid option set on /tmp partition | systemd | skips if mount absent" + template: + src: etc/tmp_mount.j2 + dest: /etc/systemd/system/tmp.mount + owner: root + group: root + mode: 0644 + notify: systemd restart tmp.mount + when: + - rhel7cis_tmp_svc + when: + - tmp_mount_present is defined + - rhel7cis_rule_1_1_2 # This is required so the check takes place + - rhel7cis_rule_1_1_3 or + rhel7cis_rule_1_1_4 or + rhel7cis_rule_1_1_5 + tags: + - level1 + - patch + - rule_1.1.3 + - rule_1.1.4 + - rule_1.1.5 + +- name: "1.1.6 | L1 | AUDIT | Ensure /dev/shm is configured" + block: + - name: "1.1.6 | L1 | AUDIT | Ensure separate partition exists for {{ required_mount }} | Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: dev_shm_mount_absent + changed_when: dev_shm_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.6 | L1 | AUDIT | Ensure separate partition exists for {{ required_mount }} | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + register: dev_shm_mount_present + when: + - required_mount in mount_names + vars: + required_mount: '/dev/shm' + when: + - rhel7cis_rule_1_1_6 + tags: + - level1-server + - level1-workstation + - scored + - patch + - mounts + - rule_1.1.6 + +- name: | + "1.1.7 | L1 | PATCH | Ensure noexec option set on /dev/shm partition | skips if mount absent" + "1.1.8 | L1 | PATCH | Ensure nosuid option set on /dev/shm partition | skips if mount absent" + "1.1.9 | L1 | PATCH | Ensure noexec option set on /dev/shm partition | skips if mount absent" + mount: + name: /dev/shm + src: tmpfs + fstype: tmpfs + state: present + opts: defaults,{% if rhel7cis_rule_1_1_7 %}noexec,{% endif %}{% if rhel7cis_rule_1_1_8 %}nodev,{% endif %}{% if rhel7cis_rule_1_1_9 %}nosuid{% endif %},seclabel + notify: remount dev_shm + when: + - rhel7cis_rule_1_1_7 or + rhel7cis_rule_1_1_8 or + rhel7cis_rule_1_1_9 + tags: + - level1-server + - level1-workstation + - scored + - patch + - mounts + - rule_1.1.7 + - rule_1.1.8 + - rule_1.1.9 + +- name: "1.1.10 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }} | skips if mount absent" + block: + - name: "1.1.10 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }}| Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: var_mount_absent + changed_when: var_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.10 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }} | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + register: var_mount_present + when: + - required_mount in mount_names + vars: + required_mount: '/var' + when: + - rhel7cis_rule_1_1_10 + tags: + - level2-server + - level2-workstation + - scored + - audit + - mounts + - rule_1.1.10 + +- name: "1.1.11 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }} | skips if mount absent" + block: + - name: "1.1.11 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }} | Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: var_tmp_mount_absent + changed_when: var_tmp_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.11 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }} | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + register: var_tmp_mount_present + when: + - required_mount in mount_names + vars: + required_mount: '/var/tmp' + when: + - rhel7cis_rule_1_1_11 + tags: + - level2-server + - level2-workstation + - scored + - audit + - mounts + - rule_1.1.11 + +- name: | + "1.1.12 | L1 | PATCH | Ensure noexec option set on /var/tmp partition | skips if mount absent" + "1.1.13 | L1 | PATCH | Ensure nodec option set on /var/tmp partition | skips if mount absent" + "1.1.14 | L1 | PATCH | Ensure nosuid option set on /var/tmp partition | skips if mount absent" + mount: + name: /var/tmp + src: "{{ item.device }}" + fstype: "{{ item.fstype }}" + state: present + opts: defaults,{% if rhel7cis_rule_1_1_12 %}noexec,{% endif %}{% if rhel7cis_rule_1_1_13 %}nodev,{% endif %}{% if rhel7cis_rule_1_1_14 %}nosuid{% endif %} + loop: "{{ ansible_mounts }}" + loop_control: + label: "{{ item.mount }}" + when: + - var_tmp_mount_present is defined + - item.mount == "/var/tmp" + - rhel7cis_rule_1_1_11 # This is required so the check takes place + - rhel7cis_rule_1_1_12 or + rhel7cis_rule_1_1_13 or + rhel7cis_rule_1_1_14 + tags: + - level1-server + - level1-workstation + - scored + - patch + - mounts + - skip_ansible_lint + +- name: "1.1.15 | L2 | AUDIT | Ensure separate partition exists for /var/log | skips if mount absent" + block: + - name: "1.1.15 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }}| Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: var_log_mount_absent + changed_when: var_log_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.15 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }} | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + when: + - required_mount in mount_names + vars: + required_mount: '/var/log' + when: + - rhel7cis_rule_1_1_15 + tags: + - level2-server + - level2-workstation + - scored + - audit + - mounts + - rule_1.1.15 + - skip_ansible_lint + +- name: "1.1.16 | L2 | AUDIT | Ensure separate partition exists for /var/log/audit | skips if mount absent" + block: + - name: "1.1.16 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }}| Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: var_log_audit_mount_absent + changed_when: var_log_audit_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.16 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }} | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + when: + - required_mount in mount_names + vars: + required_mount: '/var/log/audit' + when: + - rhel7cis_rule_1_1_16 + tags: + - level2-server + - level2-workstation + - scored + - audit + - mounts + - rule_1.1.16 + - skip_ansible_lint + +- name: "1.1.17 | L2 | AUDIT | Ensure separate partition exists for /home" + block: + - name: "1.1.17 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }}| Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: home_mount_absent + changed_when: home_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.17 | L2 | AUDIT | Ensure separate partition exists for {{ required_mount }} | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + register: home_mount_present + when: + - required_mount in mount_names + vars: + required_mount: '/home' + when: + - rhel7cis_rule_1_1_17 + tags: + - level2-server + - level2-workstation + - scored + - audit + - mounts + - rule_1.1.17 + - skip_ansible_lint + +- name: "1.1.18 | L1 | PATCH | Ensure nodev option set on /home partition | skips if mount absent" + mount: + name: /home + src: "{{ item.device }}" + fstype: "{{ item.fstype }}" + state: present + opts: defaults,{% if rhel7cis_rule_1_1_17 %}nodev{% endif %} + loop: "{{ ansible_mounts }}" + loop_control: + label: "{{ item.mount }}" + when: + - home_mount_present is defined + - item.mount == "/home" + - rhel7cis_rule_1_1_18 + tags: + - level1-server + - level1-workstation + - scored + - patch + - mounts + - rule_1.1.18 + - skip_ansible_lint + +- name: | + "1.1.19 | L1 | PATCH | Ensure removable media partitions include noexec option" + "1.1.20 | L1 | PATCH | Ensure nodev option set on removable media partitions" + "1.1.21 | L1 | PATCH | Ensure nosuid option set on removable media partitions" + debug: + msg: "--> Not relevant" + changed_when: false + when: + - rhel7cis_rule_1_1_19 or + rhel7cis_rule_1_1_20 or + rhel7cis_rule_1_1_21 + tags: + - level1-server + - level1-workstation + - notscored + - audit + - mounts + - rule_1.1.19 + - rule_1.1.20 + - rule_1.1.21 + +- name: "1.1.22 | L1 | PATCH | Ensure sticky bit is set on all world-writable directories" + shell: df --local -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type d -perm -0002 2>/dev/null | xargs chmod a+t + args: + warn: no + changed_when: false + failed_when: false + when: + - rhel7cis_rule_1_1_22 + tags: + - skip_ansible_lint + - level1-server + - level1-workstation + - patch + - stickybits + - permissons + - rule_1.1.22 + +- name: "1.1.23 | L1 | PATCH | Disable Automounting" + service: + name: autofs + enabled: no + when: + - not rhel7cis_allow_autofs + - "'autofs' in ansible_facts.packages" + - rhel7cis_rule_1_1_23 + tags: + - level1-server + - level2-workstation + - patch + - mounts + - automounting + - rule_1.1.23 + +- name: "1.1.24 | L1 | PATCH | Disable USB Storage" + block: + - name: "1.1.24 | L1 | PATCH | Disable USB Storage | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install usb-storage(\\s|$)" + line: "install usb-storage /bin/true" + create: yes + owner: root + group: root + mode: 0640 + + - name: "1.1.24 | L1 | PATCH | Disable USB Storage | Edit modprobe config" + modprobe: + name: usb-storage + state: absent + when: + - rhel7cis_rule_1_1_24 + tags: + - level1-server + - level2-workstation + - patch + - mounts + - removable_storage + - rule_1.1.24 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.2.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.2.x.yml new file mode 100644 index 0000000..1b09389 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.2.x.yml @@ -0,0 +1,96 @@ +--- + +- name: "1.2.1 | L1 | PATCH | Ensure GPG keys are configured" + block: + - name: "1.2.1 | L1 | PATCH | Ensure GPG keys are configured | RedHat" + command: gpg --quiet --with-fingerprint /etc/pki/rpm-gpg/RPM-GPG-KEY-{{ ansible_distribution|lower }}-release + when: + - ansible_distribution == "RedHat" + + - name: "1.2.1 | L1 | PATCH | Ensure GPG keys are configured | CentOS" + command: gpg --quiet --with-fingerprint /etc/pki/rpm-gpg/RPM-GPG-KEY-{{ ansible_distribution }}-{{ ansible_distribution_major_version }} + when: + - ansible_distribution == "CentOS" + when: + - rhel7cis_rule_1_2_1 + tags: + - level1 + - patch + - rule_1.2.1 + +- name: "1.2.2 | L1 | AUDIT | Ensure package manager repositories are configured" + block: + - name: "1.2.2 | L1 | AUDIT | Ensure package manager repositories are configured" + shell: yum repolist + changed_when: false + register: repolist + check_mode: false + args: + warn: false + tags: + - skip_ansible_lint + + - name: "1.2.2 | L1 | AUDIT | Ensure package manager repositories are configured" + debug: + msg: + - "Please check against site policy repos listed below match expected:" + - "{{ repolist.stdout_lines }}" + when: + - rhel7cis_rule_1_2_2 + tags: + - level1 + - audit + - rule_1.2.2 + - skip_ansible_lint + +- name: "1.2.3 | L1 | PATCH | Ensure gpgcheck is globally activated" + block: + - name: "1.2.3 | L1 | AUDIT | Ensure gpgcheck is globally activated" + find: + paths: /etc/yum.repos.d + patterns: "*.repo" + changed_when: false + register: yum_repos + + - name: "1.2.3 | L1 | PATCH | Ensure gpgcheck is globally activated" + replace: + path: "{{ item.path }}" + regexp: "^gpgcheck=0" + replace: "gpgcheck=1" + loop: "{{ yum_repos.files }}" + loop_control: + label: "{{ item.path }}" + when: + - rhel7cis_rule_1_2_3 + tags: + - level1 + - patch + - rule_1.2.3 + + +- name: "1.2.4 | L1 | AUDIT | Ensure Red Hat Subscription Manager connection is configured" + shell: subscription-manager identity + changed_when: false + failed_when: false + when: + - ansible_distribution == "RedHat" + - rhel7cis_rule_1_2_4 + tags: + - level1 + - patch + - rule_1.2.4 + - skip_ansible_lint + +- name: "1.2.5 | L1 | PATCH | Disable the rhnsd Daemon" + service: + name: rhnsd + state: stopped + enabled: no + masked: true + when: + - ansible_distribution == "RedHat" and not rhel7cis_rhnsd_required + - rhel7cis_rule_1_2_5 + tags: + - level1 + - patch + - rule_1.2.5 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.3.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.3.x.yml new file mode 100644 index 0000000..67def49 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.3.x.yml @@ -0,0 +1,45 @@ +--- + +- name: "1.3.1 | L1 | PATCH | Ensure AIDE is installed" + block: + - name: "1.3.1 | L1 | PATCH | Ensure AIDE is installed | Install" + package: + name: aide + state: installed + + - name: "1.3.1 | L1 | PATCH | Ensure AIDE is installed | started" + shell: /usr/sbin/aide --init -B 'database_out=file:/var/lib/aide/aide.db.gz' + args: + creates: /var/lib/aide/aide.db.gz + changed_when: false + failed_when: false + async: "{{ ansible_check_mode | ternary(0, 45) }}" + poll: 0 + when: + - rhel7cis_config_aide + - rhel7cis_rule_1_3_1 + tags: + - level1 + - aide + - patch + - rule_1.3.1 + vars: + ansible_python_interpreter: /bin/python + +- name: "1.3.2 | L1 | PATCH | Ensure filesystem integrity is regularly checked | cron" + cron: + name: Run AIDE integrity check + minute: "{{ rhel7cis_aide_cron['aide_minute'] | default('0') }}" + hour: "{{ rhel7cis_aide_cron['aide_hour'] | default('5') }}" + day: "{{ rhel7cis_aide_cron['aide_day'] | default('*') }}" + month: "{{ rhel7cis_aide_cron['aide_month'] | default('*') }}" + weekday: "{{ rhel7cis_aide_cron['aide_weekday'] | default('*') }}" + job: "{{ rhel7cis_aide_cron['aide_job'] }}" + when: + - rhel7cis_rule_1_3_2 + tags: + - level1 + - aide + - file_integrity + - patch + - rule_1.3.2 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.4.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.4.x.yml new file mode 100644 index 0000000..42694ec --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.4.x.yml @@ -0,0 +1,72 @@ +--- + +- name: "1.4.1 | L1 | PATCH | Ensure bootloader password is set" + copy: + dest: "{{ rhel7cis_bootloader_path }}user.cfg" + content: "GRUB2_PASSWORD={{ rhel7cis_bootloader_password_hash }}" + owner: root + group: root + mode: "{% if rhel7cis_legacy_boot %}0600{% else %}0700{% endif %}" + notify: grub2cfg + when: + - ansible_distribution_version >= '7.2' + - rhel7cis_set_boot_pass + - rhel7cis_rule_1_4_1 + tags: + - level1 + - grub + - patch + - rule_1.4.1 + +- name: "1.4.2 | L1 | PATCH | Ensure permissions on bootloader config are configured" + block: + - name: "1.4.2 | L1 | PATCH | Ensure permissions on bootloader config are configured | Update grub config settings" + file: + path: "{{ rhel7cis_bootloader_path }}grub.cfg" + owner: root + group: root + mode: 0600 + when: + - rhel7cis_legacy_boot + + - name: "1.4.2 | L1 | PATCH | Ensure permissions on bootloader config are configured | UEFI" + mount: + name: /boot/efi + src: "UUID={{ item.uuid }}" + fstype: vfat + state: present + opts: defaults,umask=0027,fmask=0077,uid=0,gid=0 + passno: '0' + loop: "{{ ansible_mounts }}" + loop_control: + label: "{{ item.mount }}" + when: + - not rhel7cis_legacy_boot + - item.mount == "/boot/efi" + when: + - rhel7cis_rule_1_4_2 + tags: + - level1 + - grub + - patch + - rule_1.4.2 + +- name: "1.4.3 | L1 | PATCH | Ensure authentication required for single user mode" + block: + - name: "1.4.3 | L1 | PATCH | Ensure authentication required for single user mode | Emergency service" + lineinfile: + dest: /usr/lib/systemd/system/emergency.service + regexp: '/sbin/sulogin' + line: 'ExecStart=-/bin/sh -c "/sbin/sulogin; /usr/bin/systemctl --fail --no-block default"' + + - name: "1.4.3 | L1 | PATCH | Ensure authentication required for single user mode | Rescue service" + lineinfile: + dest: /usr/lib/systemd/system/rescue.service + regexp: '/sbin/sulogin' + line: 'ExecStart=-/bin/sh -c "/sbin/sulogin; /usr/bin/systemctl --fail --no-block default"' + when: + - rhel7cis_rule_1_4_3 + tags: + - level1 + - patch + - rule_1.4.3 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.5.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.5.x.yml new file mode 100644 index 0000000..1acfec7 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.5.x.yml @@ -0,0 +1,77 @@ +--- + +- name: "1.5.1 | L1 | PATCH | Ensure core dumps are restricted" + block: + - name: "1.5.1 | L1 | PATCH | Ensure core dumps are restricted | Update limits.conf file" + lineinfile: + state: present + dest: /etc/security/limits.conf + regexp: '^#?\\*.*core' + line: '* hard core 0' + insertbefore: '^# End of file' + + - name: "1.5.1 | L1 | PATCH | Ensure core dumps are restricted | Set active kernel parameter" + sysctl: + name: fs.suid_dumpable + value: '0' + state: present + reload: yes + sysctl_set: yes + ignoreerrors: yes + + - name: 1.5.1 | L1 | PATCH | Ensure core dumps are restricted | coredump.conf" + lineinfile: + dest: /etc/systemd/coredump.conf + regexp: "{{ item.regex }}" + line: "{{ item.line }}" + create: yes + mode: 0644 + loop: + - { regex: '^Storage', line: 'Storage=none' } + - { regex: '^ProcessSizeMax', line: 'ProcessSizeMax=0' } + when: + - rhel7cis_rule_1_5_1 + tags: + - level1 + - sysctl + - patch + - rule_1.5.1 + +- name: "1.5.2 | L1 | PATCH | Ensure XD/NX support is enabled" + shell: dmesg|grep -E "NX|XD" | grep " active" + changed_when: false + when: + - rhel7cis_rule_1_5_2 + tags: + - skip_ansible_lint + - level1 + - patch + - rule_1.5.2 + +- name: "1.5.3 | L1 | PATCH | Ensure address space layout randomization (ASLR) is enabled" + sysctl: + name: kernel.randomize_va_space + value: '2' + state: present + reload: yes + sysctl_set: yes + ignoreerrors: yes + when: + - rhel7cis_rule_1_5_3 + tags: + - level1 + - patch + - rule_1.5.3 + +- name: "1.5.4 | L1 | PATCH | Ensure prelink is disabled" + package: + name: prelink + state: absent + when: + - rhel7cis_rule_1_5_4 + tags: + - level1 + - patch + - rule_1.5.4 + vars: + ansible_python_interpreter: /bin/python diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.6.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.6.x.yml new file mode 100644 index 0000000..11cc413 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.6.x.yml @@ -0,0 +1,114 @@ +--- + +- name: "1.6.1.1 | L1 | PATCH | Ensure SELinux is installed" + package: + name: libselinux + state: present + when: + - not rhel7cis_selinux_disable + - rhel7cis_rule_1_6_1_1 + tags: + - level1 + - patch + - rule_1.6.1.1 + vars: + ansible_python_interpreter: /bin/python + +- name: "1.6.1.2 | L1 | PATCH | Ensure SELinux is not disabled in bootloader configuration" + replace: + dest: /etc/default/grub + regexp: '(selinux|enforcing)\s*=\s*0\s*' + register: selinux_grub_patch + ignore_errors: yes + notify: grub2cfg + when: + - not rhel7cis_selinux_disable + - rhel7cis_rule_1_6_1_2 + tags: + - level1 + - patch + - rule_1.6.1.2 + +- name: "1.6.1.3 | L1 | PATCH | Ensure SELinux policy is configured\n + 1.6.1.4 | L1 | PATCH | Ensure the SELinux state is enforcing or permissive" + selinux: + conf: /etc/selinux/config + policy: "{{ rhel7cis_selinux_pol }}" + state: "{{ rhel7cis_selinux_state }}" + when: + - not rhel7cis_selinux_disable + - rhel7cis_rule_1_6_1_3 + - rhel7cis_rule_1_6_1_4 + tags: + - level1 + - selinux + - patch + - rule_1.6.1.3 + - rule_1.6.1.4 + +- name: "1.6.1.5 | L2 | PATCH | Ensure the SELinux state is enforcing" + selinux: + conf: /etc/selinux/config + policy: "{{ rhel7cis_selinux_pol }}" + state: enforcing + when: + - not rhel7cis_selinux_disable + - not rhel7cis_selinux_state == "permissive" + - rhel7cis_rule_1_6_1_5 + tags: + - level2 + - selinux + - patch + - rule_1.6.1.5 + +- name: "1.6.1.6 | L1 | AUDIT | Ensure no unconfined daemons exist" + block: + - name: "1.6.1.6 | L1 | AUDIT | Ensure no unconfined daemons exist | Find the unconfined daemons" + shell: ps -eZ | egrep "initrc" | egrep -vw "tr|ps|egrep|bash|awk" | tr ':' ' ' | awk '{ print $NF }' + failed_when: false + changed_when: false + check_mode: false + register: rhelcis_1_6_1_6_unconf_daemons + + - name: "1.6.1.6 | L1 | AUDIT | Ensure no unconfined daemons exist | Message on no unconfined daemones" + debug: + msg: "Good News! There are no unconfined daemons found on your system" + when: rhelcis_1_6_1_6_unconf_daemons.stdout | length == 0 + + - name: "1.6.1.6 | L1 | AUDIT | Ensure no unconfined daemons exist | Message on unconfined daemones" + debug: + msg: "Warning! You have unconfined daemons: {{ rhelcis_1_6_1_6_unconf_daemons.stdout_lines }}" + when: rhelcis_1_6_1_6_unconf_daemons.stdout | length > 0 + when: + - rhel7cis_rule_1_6_1_6 + tags: + - level1 + - audit + - rule_1.6.1.6 + +- name: "1.6.1.7 | L1 | PATCH | Ensure SETroubleshoot is not installed" + package: + name: setroubleshoot + state: absent + when: + - rhel7cis_rule_1_6_1_7 + tags: + - level1 + - selinux + - patch + - rule_1.6.1.7 + vars: + ansible_python_interpreter: /bin/python + +- name: "1.6.1.8 | L1 | PATCH | Ensure the MCS Translation Service (mcstrans) is not installed" + package: + name: mcstrans + state: absent + when: + - rhel7cis_rule_1_6_1_8 + tags: + - level1 + - patch + - rule_1.6.1.8 + vars: + ansible_python_interpreter: /bin/python diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.7.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.7.x.yml new file mode 100644 index 0000000..25cd2e7 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.7.x.yml @@ -0,0 +1,91 @@ +--- + +- name: "1.7.1 | L1 | PATCH | Ensure message of the day is configured properly" + template: + src: etc/motd.j2 + dest: /etc/motd + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_1_7_1 + tags: + - level1 + - banner + - patch + - rule_1.7.1 + +- name: "1.7.2 | L1 | PATCH | Ensure local login warning banner is configured properly" + template: + src: etc/issue.j2 + dest: /etc/issue + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_1_7_2 + tags: + - level1 + - banner + - patch + - rule_1.7.2 + +- name: "1.7.3 | L1 | PATCH | Ensure remote login warning banner is configured properly" + template: + src: etc/issue.net.j2 + dest: /etc/issue.net + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_1_7_3 + tags: + - level1 + - banner + - patch + - rule_1.7.3 + +- name: "1.7.4 | L1 | PATCH | Ensure permissions on /etc/motd are configured" + file: + dest: /etc/motd + state: file + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_1_7_4 + tags: + - level1 + - perms + - patch + - rule_1.7.4 + +- name: "1.7.5 | L1 | PATCH | Ensure permissions on /etc/issue are configured" + file: + dest: /etc/issue + state: file + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_1_7_5 + tags: + - level1 + - perms + - patch + - rule_1.7.5 + +- name: "1.7.6 | L1 | PATCH | Ensure permissions on /etc/issue.net are configured" + file: + dest: /etc/issue.net + state: file + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_1_7_6 + tags: + - level1 + - perms + - patch + - rule_1.7.6 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.8.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.8.x.yml new file mode 100644 index 0000000..6b3427c --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.8.x.yml @@ -0,0 +1,114 @@ +--- + +- name: 1.8.1 | L2 | PATCH | Ensure GNOME Display Manager is removed + block: + - name: 1.8.1 | L2 | AUDIT | Ensure GNOME Display Manager is removed | check runlevel + fail: + msg: "System is at runlevel 5 and GDM is installed this can only be removed at Lvl <= 3" + when: + - ansible_env.SHLVL > 3 + + - name: 1.8.1 | L2 | AUDIT | Ensure GNOME Display Manager is removed | Remove package + package: + name: gdm + state: absent + when: + - "'gdm' in ansible_facts.packages" + - not rhel7cis_gui + - rhel7cis_rule_1_8_1 + tags: + - rule_1.8.1 + - level_1 + +- name: 1.8.2 | L1 | PATCH | Ensure GDM login banner is configured + block: + - name: 1.8.2 | L1 | PATCH | Ensure GDM login banner is configured | gdm profile + lineinfile: + path: /etc/dconf/profile/gdm + regexp: "^{{ item.regexp }}" + line: "{{ item.line }}" + state: present + create: true + mode: 0644 + owner: root + group: root + with_items: + - {regexp: 'user-db', line: 'user-db:user' } + - {regexp: 'system-db', line: 'system-db:gdm' } + - {regexp: 'file-db', line: 'file-db:/usr/share/gdm/greeter-dconf-defaults' } + + - name: 1.8.2 | L1 | PATCH | Ensure GDM login banner is configured | banner + lineinfile: + path: /etc/dconf/db/gdm.d/01-banner-message + regexp: "^{{ item.regexp }}" + line: "{{ item.line }}" + state: present + create: true + mode: 0644 + owner: root + group: root + with_items: + - { regexp: '\[org\/gnome\/login-screen\]', line: '[org/gnome/login-screen]' } + - { regexp: 'banner-message-enable', line: 'banner-message-enable=true' } + - { regexp: 'banner-message-text', line: "banner-message-text='{{ rhel7cis_warning_banner }}' " } + when: + - "'gdm' in ansible_facts.packages" + - not rhel7cis_gui + - rhel7cis_rule_1_8_2 + tags: + - rule_1.8.2 + - level_1 + +- name: 1.8.3 | L1 | PATCH | Ensure last logged in user display is disabled | banner + block: + - name: 1.8.3 | L1 | PATCH | Ensure last logged in user display is disabled | gdm profile + lineinfile: + path: /etc/dconf/profile/gdm + regexp: "^{{ item.regexp }}" + line: "{{ item.line }}" + state: present + create: true + mode: 0644 + owner: root + group: root + with_items: + - {regexp: 'user-db', line: 'user-db:user' } + - {regexp: 'system-db', line: 'system-db:gdm' } + - {regexp: 'file-db', line: 'file-db:/usr/share/gdm/greeter-dconf-defaults' } + + - name: 1.8.3 | L1 | PATCH | Ensure last logged in user display is disabled | login screen + lineinfile: + path: /etc/dconf/db/gdm.d/00-login-screen + regexp: "^{{ item.regexp }}" + line: "{{ item.line }}" + state: present + create: true + mode: 0644 + owner: root + group: root + with_items: + - { regexp: '\[org\/gnome\/login-screen\]', line: '[org/gnome/login-screen]' } + - { regexp: 'disable-user-list', line: 'disable-user-list=true' } + when: + - "'gdm' in ansible_facts.packages" + - not rhel7cis_gui + - rhel7cis_rule_1_8_3 + tags: + - rule_1.8.3 + - level_1 + +- name: 1.8.4 | L1 | PATCH | Ensure XDCMP is not enabled + lineinfile: + path: /etc/gdm/custom.conf + regexp: ^Enable=true + state: absent + create: true + owner: root + group: root + mode: 0644 + when: + - "'gdm' in ansible_facts.packages" + - not rhel7cis_gui + - rhel7cis_rule_1_8_4 + tags: + - rule_1.8.4 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.9.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.9.x.yml new file mode 100644 index 0000000..af3c6c1 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/cis_1.9.x.yml @@ -0,0 +1,16 @@ +--- + +- name: "1.9 | L1 | PATCH | Ensure updates, patches, and additional security software are installed" + package: + name: "*" + state: latest + when: + - rhel7cis_rule_1_9 + - not system_is_ec2 + tags: + - level1 + - patch + - rule_1.9 + - skip_ansible_lint + vars: + ansible_python_interpreter: /bin/python diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/main.yml new file mode 100644 index 0000000..aeeab9f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_1/main.yml @@ -0,0 +1,31 @@ +--- + +- name: "SECTION | 1.1 | FileSystem Configurations" + include: cis_1.1.1.x.yml +- include: cis_1.1.x.yml + +- name: "SECTION | 1.2 | Configure Software Updates" + include: cis_1.2.x.yml + +- name: "SECTION | 1.3 | Filesystem Integrity" + include: cis_1.3.x.yml + +- name: "SECTION | 1.4 | Secure Boot Settings" + include: cis_1.4.x.yml + when: rhel7cis_config_aide + +- name: "SECTION | 1.5 | Additional Process Hardening" + include: cis_1.5.x.yml + +- name: "SECTION | 1.6 | Mandatory Access Control" + include: cis_1.6.x.yml + when: not rhel7cis_selinux_disable + +- name: "SECTION | 1.7 | Warning Banners" + include: cis_1.7.x.yml + +- name: "SECTION | 1.8 | GDM Login" + include: cis_1.8.x.yml + +- name: "SECTION | 1.9 | Updated and Patches" + include: cis_1.9.x.yml diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.1.x.yml new file mode 100644 index 0000000..a6f723a --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.1.x.yml @@ -0,0 +1,15 @@ +--- + +- name: "2.1.1 | L1 | PATCH | Ensure xinetd is not installed" + package: + name: xinetd + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - ("'xinetd' in ansible_facts.packages") and not rhel7cis_xinetd_required + - rhel7cis_rule_2_1_1 + tags: + - level1 + - patch + - rule_2.1.1 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.2.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.2.1.x.yml new file mode 100644 index 0000000..9c6d985 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.2.1.x.yml @@ -0,0 +1,116 @@ +--- + +- name: "2.2.1.1 | L1 | PATCH | Ensure time synchronization is in use | service install" + package: + name: "{{ rhel7cis_time_synchronization }}" + state: present + when: + - rhel7cis_rule_2_2_1_1 + - not rhel7cis_system_is_container + tags: + - level1 + - patch + - rule_2.2.1.1 + vars: + ansible_python_interpreter: /bin/python + +- name: "2.2.1.1 | L1 | PATCH | Ensure time synchronization is in use | service start" + service: + name: "{{ rhel7cis_time_synchronization }}d" + state: started + enabled: yes + when: + - rhel7cis_rule_2_2_1_1 + - not rhel7cis_system_is_container + tags: + - level1 + - patch + - rule_2.2.1.1 + +- name: "2.2.1.1 | L1 | PATCH | Ensure time synchronization is in use - service stop ntp" + service: + name: ntpd + state: stopped + enabled: no + when: + - "'ntp' in ansible_facts.packages" + - rhel7cis_time_synchronization == "chrony" + - rhel7cis_rule_2_2_1_1 + - not rhel7cis_system_is_container + tags: + - level1 + - patch + - rule_2.2.1.1 + +- name: "2.2.1.1 | L1 | PATCH | Ensure time synchronization is in use - service stop chrony" + service: + name: chronyd + state: stopped + enabled: no + when: + - rhel7cis_time_synchronization == "ntp" and "'chrony' in ansible_facts.packages" + - rhel7cis_rule_2_2_1_1 + - not rhel7cis_system_is_container + tags: + - level1 + - patch + - rule_2.2.1.1 + +- name: "2.2.1.2 | L1 | PATCH | Ensure chrony is configured" + block: + - name: "2.2.1.2 | L1 | PATCH | Ensure chrony is configured | modify /etc/chrony.conf" + template: + src: chrony.conf.j2 + dest: /etc/chrony.conf + owner: root + group: root + mode: 0644 + + - name: "2.2.1.2 | L1 | PATCH | Ensure chrony is configured | modify /etc/sysconfig/chronyd" + lineinfile: + dest: /etc/sysconfig/chronyd + regexp: "^(#)?OPTIONS" + line: "OPTIONS=\"-u chrony\"" + state: present + create: yes + owner: root + group: root + mode: 0640 + when: + - rhel7cis_time_synchronization == "chrony" + - rhel7cis_rule_2_2_1_2 + - not rhel7cis_system_is_container + tags: + - level1 + - patch + - rule_2.2.1.2 + +- name: "2.2.1.3 | L1 | PATCH | Ensure ntp is configured" + block: + - name: "2.2.1.3 | L1 | PATCH | Ensure ntp is configured | modify /etc/ntp.conf" + template: + src: ntp.conf.j2 + dest: /etc/ntp.conf + owner: root + group: root + mode: 0644 + + - name: "2.2.1.3 | L1 | PATCH | Ensure ntp is configured | modify /etc/sysconfig/ntpd" + lineinfile: + dest: /etc/sysconfig/ntpd + regexp: "^(#)?OPTIONS" + line: "OPTIONS=\"-u ntp:ntp\"" + + - name: "2.2.1.3 | L1 | PATCH | Ensure ntp is configured | modify /usr/lib/systemd/system/ntpd.service" + lineinfile: + dest: /usr/lib/systemd/system/ntpd.service + regexp: "^(#)?ExecStart" + line: "ExecStart=/usr/sbin/ntpd -u ntp:ntp $OPTIONS" + when: + - rhel7cis_time_synchronization == "ntp" + - rhel7cis_rule_2_2_1_3 + - not rhel7cis_system_is_container + tags: + - level1 + - patch + - rule_2.2.1.3 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.2.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.2.x.yml new file mode 100644 index 0000000..507aa07 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.2.x.yml @@ -0,0 +1,356 @@ +--- + +- name: "2.2.2 | L1 | PATCH | Ensure X11 Server components are not installed" + package: + state: absent + name: + - "@X Window System" + - "xorg-x11*" + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_xwindows_required + - rhel7cis_rule_2_2_2 + tags: + - level1 + - scored + - xwindows + - patch + - rule_2.2.2 + +- name: "2.2.3 | L1 | PATCH | Ensure Avahi Server is not installed | disabled" + service: + name: avahi-daemon + state: stopped + enabled: no + when: + - not rhel7cis_avahi_server + - "'avahi' in ansible_facts.packages" + - rhel7cis_rule_2_2_3 + tags: + - level1 + - scored + - avahi + - services + - patch + - rule_2.2.3 + +- name: "2.2.4 | L1 | PATCH | Ensure CUPS is not installed" + package: + name: cups + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_cups_server + - "'cups' in ansible_facts.packages" + - rhel7cis_rule_2_2_4 + tags: + - level1 + - scored + - cups + - services + - patch + - rule_2.2.4 + +- name: "2.2.5 | L1 | PATCH | Ensure DHCP Server is not installed" + package: + name: dhcp + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_dhcp_server + - "'dhcp' in ansible_facts.packages" + - rhel7cis_rule_2_2_5 + tags: + - level1 + - scored + - dhcp + - services + - patch + - rule_2.2.5 + +- name: "2.2.6 | L1 | PATCH | Ensure LDAP server is not installed" + package: + name: openldap-servers + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_ldap_server + - "'openldap-servers' in ansible_facts.packages" + - rhel7cis_rule_2_2_6 + tags: + - level1 + - scored + - ldap + - services + - patch + - rule_2.2.6 + +- name: "2.2.7 | L1 | PATCH | Ensure DNS server is not installed" + package: + name: bind + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_named_server + - "'bind' in ansible_facts.packages" + - rhel7cis_rule_2_2_7 + tags: + - level1 + - scored + - dns + - services + - patch + - rule_2.2.7 + +- name: "2.2.8 | L1 | PATCH | Ensure FTP server is not installed" + package: + name: vsftpd + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_vsftpd_server + - "'vsftpd' in ansible_facts.packages" + - rhel7cis_rule_2_2_8 + tags: + - level1 + - scored + - ftp + - services + - patch + - rule_2.2.8 + +- name: "2.2.9 | L1 | PATCH | Ensure HTTP server is not installed" + package: + name: httpd + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_httpd_server + - "'httpd' in ansible_facts.packages" + - rhel7cis_rule_2_2_9 + tags: + - level1 + - scored + - ldap + - services + - patch + - rule_2.2.9 + +- name: "2.2.10 | L1 | PATCH | Ensure IMAP and POP3 server is not installed" + package: + name: dovecot + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_dovecot_server + - "'dovecot' in ansible_facts.packages" + - rhel7cis_rule_2_2_10 + tags: + - level1 + - scored + - dovecot + - services + - patch + - rule_2.2.10 + +- name: "2.2.11 | L1 | PATCH | Ensure Samba server is not installed" + package: + name: samba + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_smb_server + - "'samba' in ansible_facts.packages" + - rhel7cis_rule_2_2_11 + tags: + - level1 + - scored + - samba + - services + - patch + - rule_2.2.11 + +- name: "2.2.12 | L1 | PATCH | Ensure HTTP Proxy server is not installed" + package: + name: squid + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_squid_server + - "'squid' in ansible_facts.packages" + - rhel7cis_rule_2_2_12 + tags: + - level1 + - scored + - squid + - services + - patch + - rule_2.2.12 + +- name: "2.2.13 | L1 | PATCH | Ensure net-snmp is not installed" + package: + name: net-snmp + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_snmp_server + - "'net-snmp' in ansible_facts.packages" + - rhel7cis_rule_2_2_13 + tags: + - level1 + - scored + - squid + - services + - patch + - rule_2.2.13 + +- name: "2.2.14 | L1 | PATCH | Ensure NIS server is not installed" + package: + name: ypserv + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_nis_server + - "'ypserv' in ansible_facts.packages" + - rhel7cis_rule_2_2_14 + tags: + - level1 + - scored + - nis + - services + - patch + - rule_2.2.14 + +- name: "2.2.15 | L1 | PATCH | Ensure telnet-server is not installed" + package: + name: telnet-server + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_telnet_server + - "'telnet-server' in ansible_facts.packages" + - rhel7cis_rule_2.2_15 + tags: + - level1 + - scored + - telnet + - services + - patch + - rule_2.2.15 + +- name: "2.2.16 | L1 | PATCH | Ensure mail transfer agent is configured for local-only mode" + lineinfile: + dest: /etc/postfix/main.cf + regexp: "^(#)?inet_interfaces" + line: "inet_interfaces = loopback-only" + notify: restart postfix + when: + - not rhel7cis_is_mail_server + - "'postfix' in ansible_facts.packages" + - rhel7cis_rule_2_2_16 + tags: + - level1 + - patch + - rule_2.2.16 + +- name: "2.2.17 | L1 | PATCH | Ensure nfs-utils is not installed or the nfs-server service is masked" + block: + - name: "2.2.17 | L1 | PATCH | Ensure nfs-utils is not installed or the nfs-server service is masked | nfs-utils " + package: + name: nfs-utils + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_nfs_rpc_server and not rhel7cis_nfs_server + + - name: "2.2.17 | L1 | PATCH | Ensure nfs-utils is not installed or the nfs-server service is masked | nfs-server " + systemd: + name: nfs-server + masked: true + when: + - rhel7cis_nfs_rpc_server and not rhel7cis_nfs_server + when: + - "'nfs-utils' in ansible_facts.packages" + - rhel7cis_rule_2_2_17 + tags: + - level1 + - scored + - nfs + - services + - patch + - rule_2.2.17 + +- name: "2.2.18 | L1 | PATCH | Ensure rpcbind is not installed or the rpcbind services are masked" + block: + - name: "2.2.18 | L1 | PATCH | Ensure rpcbind is not installed or the rpcbind services are masked | rpcbind" + package: + name: rpcbind + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_nfs_rpc_server and not rhel7cis_rpc_server + + - name: "2.2.18 | L1 | PATCH | Ensure rpcbind is not installed or the rpcbind services are masked | rpcbind-server" + systemd: + name: rpcbind + masked: true + when: + - rhel7cis_nfs_rpc_server and not rhel7cis_rpc_server + + - name: "2.2.18 | L1 | PATCH | Ensure rpcbind is not installed or the rpcbind services are masked | rpcbind-server" + systemd: + name: rpcbind.socket + masked: true + when: + - rhel7cis_nfs_rpc_server and not rhel7cis_rpc_server + when: + - "'rpcbind' in ansible_facts.packages" + - rhel7cis_rule_2_2_18 + tags: + - level1 + - scored + - rpc + - services + - patch + - rule_2.2.18 + +- name: "2.2.19 | L1 | PATCH | Ensure rsync is not installed or the rsyncd services are masked" + block: + - name: "2.2.19 | L1 | PATCH | Ensure rsync is not installed or the rsyncd services are masked | pkg removal" + package: + name: rsync + state: absent + vars: + ansible_python_interpreter: /bin/python + + - name: "2.2.19 | L1 | PATCH | Ensure rsync is not installed or the rsyncd services are masked | Mask " + systemd: + name: rsyncd + masked: true + + when: + - not rhel7cis_rsyncd_server + - "'rsync' in ansible_facts.packages" + - rhel7cis_rule_2_2_19 + tags: + - level1 + - scored + - rsync + - services + - patch + - rule_2.2.19 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.3.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.3.x.yml new file mode 100644 index 0000000..d7a55b2 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.3.x.yml @@ -0,0 +1,86 @@ +--- + +- name: "2.3.1 | L1 | PATCH | Ensure NIS client is not installed" + package: + name: ypbind + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_ypbind_required + - "'ypbind' in ansible_facts.packages" + - rhel7cis_rule_2_3_1 + tags: + - level1 + - scored + - nis + - patch + - rule_2.3.1 + +- name: "2.3.2 | L1 | PATCH | Ensure rsh client is not installed" + package: + name: rsh + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_rsh_required + - "'rsh' in ansible_facts.packages" + - rhel7cis_rule_2_3_2 + tags: + - level1 + - scored + - rsh + - patch + - rule_2.3.2 + +- name: "2.3.3 | L1 | PATCH | Ensure talk client is not installed" + package: + name: talk + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_talk_required + - "'talk' in ansible_facts.packages" + - rhel7cis_rule_2_3_3 + tags: + - level1 + - scored + - talk + - patch + - rule_2.3.3 + +- name: "2.3.4 | L1 | PATCH | Ensure telnet client is not installed" + package: + name: telnet + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - "'telnet' in ansible_facts.packages" + - not rhel7cis_telnet_required + - rhel7cis_rule_2_3_4 + tags: + - level1 + - scored + - telnet + - patch + - rule_2.3.4 + +- name: "2.3.5 | L1 | PATCH | Ensure LDAP client is not installed" + package: + name: openldap-client + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - not rhel7cis_openldap_clients_required + - "'openldap-client' in ansible_facts.packages" + - rhel7cis_rule_2_3_5 + tags: + - level1 + - scored + - ldap_client + - patch + - rule_2.3.5 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.4.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.4.x.yml new file mode 100644 index 0000000..4eda3a8 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/cis_2.4.x.yml @@ -0,0 +1,11 @@ +--- + +- name: "2.4 | L1 | Ensure nonessential services are removed or masked (Manual)" + debug: + msg: "<----- not required" + when: + - rhel7cis_rule_2_4 + tags: + - level1 + - notscored + - rule_2.4 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/main.yml new file mode 100644 index 0000000..aa5ed4a --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_2/main.yml @@ -0,0 +1,16 @@ +--- + +- name: "SECTION | 2.1 | inetd Services" + include: cis_2.1.x.yml + +- name: "SECTION | 2.2.1 | Time Synchronization" + include: cis_2.2.1.x.yml + +- name: "SECTION | 2.2 | Special Purpose Services" + include: cis_2.2.x.yml + +- name: "SECTION | 2.3 | Service Clients" + include: cis_2.3.x.yml + +- name: "SECTION | 2.4 | Nonessential Services" + include: cis_2.4.x.yml diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.1.x.yml new file mode 100644 index 0000000..c4eade0 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.1.x.yml @@ -0,0 +1,36 @@ +--- + +- name: "3.1.1 | L2 | PATCH | Disable IPv6 | grub" + replace: + dest: /etc/default/grub + regexp: '(^GRUB_CMDLINE_LINUX\s*\=\s*)(?:")(.+)(? 0 + + - name: "3.5.2.5 | L1 | AUDIT | Ensure an nftables table exists | Alert on no tables" + debug: + msg: + - "Warning! You currently have no nft tables, please review your setup" + - 'Use the command "nft create table inet " to create a new table' + when: + - rhel7cis_3_5_2_5_nft_tables.stdout | length == 0 + - not rhel7cis_nft_tables_autoNewTable + + - name: "3.5.2.5 | L1 | PATCH | Ensure an nftables table exists | Create table if needed" + command: nft create table inet "{{ rhel7cis_nft_tables_tableName }}" + failed_when: false + when: + - rhel7cis_nft_tables_autoNewTable + when: + - rhel7cis_rule_3_5_2_5 + tags: + - level1 + - patch + - rule_3.5.2.5 + +- name: "3.5.2.6 | L1 | PATCH | Ensure nftables base chains exist" + block: + - name: "3.5.2.6 | L1 | AUDIT | Ensure nftables base chains exist | Get current chains for INPUT" + shell: nft list ruleset | grep 'hook input' + register: rhel7cis_3_5_2_6_input_chains + + - name: "3.5.2.6 | L1 | AUDIT | Ensure nftables base chains exist | Get current chains for FORWARD" + shell: nft list ruleset | grep 'hook forward' + register: rhel7cis_3_5_2_6_forward_chains + + - name: "3.5.2.6 | L1 | AUDIT | Ensure nftables base chains exist | Get current chains for OUTPUT" + shell: nft list ruleset | grep 'hook output' + register: rhel7cis_3_5_2_6_output_chains + + - name: "3.5.2.6 | L1 | AUDIT | Ensure nftables base chains exist | Display chains for review" + debug: + msg: + - "Below are the current INPUT chains" + - "{{ rhel7cis_3_5_2_6_input_chains.stdout_lines }}" + - "Below are the current FORWARD chains" + - "{{ rhel7cis_3_5_2_6_forward_chains.stdout_lines }}" + - "Below are teh current OUTPUT chains" + - "{{ rhel7cis_3_4_2_6_output_chains.stdout_lines }}" + when: + - not rhel7cis_nft_tables_autoChainCreate + + - name: "3.5.2.6 | L1 | PATCH | Ensure nftables base chains exist | Create chains if needed" + command: "{{ item }}" + failed_when: false + with_items: + - nft create chain inet "{{ rhel7cis_nft_tables_tableName }}" input { type filter hook input priority 0 \; } + - nft create chain inet "{{ rhel7cis_nft_tables_tableName }}" forward { type filter hook forward priority 0 \; } + - nft create chain inet "{{ rhel7cis_nft_tables_tableName }}" output { type filter hook output priority 0 \; } + when: + - rhel7cis_nft_tables_autoChainCreate + when: + - rhel7cis_rule_3_5_2_6 + tags: + - skip_ansible_lint + - level1 + - patch + - rule_3.5.2.6 + +- name: "3.5.2.7 | L1 | PATCH | Ensure nftables loopback traffic is configured" + block: + - name: "3.5.2.7 | L1 | AUDIT | Ensure nftables loopback traffic is configured | Gather iif lo accept existence" + shell: nft list ruleset | awk '/hook input/,/}/' | grep 'iif "lo" accept' + changed_when: false + failed_when: false + register: rhel7cis_3_5_2_7_iiflo + + - name: "3.5.2.7 | L1 | AUDIT | Ensure nftables loopback traffic is configured | Gather ip saddr existence" + shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip saddr' + changed_when: false + failed_when: false + register: rhel7cis_3_5_2_7_ipsaddr + + - name: "3.5.2.7 | L1 | AUDIT | Ensure nftables loopback traffic is configured | Gather ip6 saddr existence" + shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip6 saddr' + changed_when: false + failed_when: false + register: rhel7cis_3_5_2_7_ip6saddr + + - name: "3.5.2.7 | L1 | PATCH | Ensure nftables loopback traffic is configured | Set iif lo accept rule" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" input iif lo accept + when: + - '"iif \"lo\" accept" not in rhel7cis_3_5_2_7_iiflo.stdout' + + - name: "3.5.2.7 | L1 | PATCH | Ensure nftables loopback traffic is configured | Set ip sddr rule" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" input ip saddr 127.0.0.0/8 counter drop + when: + - '"ip saddr 127.0.0.0/8 counter packets 0 bytes 0 drop" not in rhel7cis_3_5_2_7_ipsaddr.stdout' + + - name: "3.5.2.7 | L1 | PATCH | Ensure nftables loopback traffic is configured | Set ip6 saddr rule" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" input ip6 saddr ::1 counter drop + when: + - '"ip6 saddr ::1 counter packets 0 bytes 0 drop" not in rhel7cis_3_5_2_7_ip6saddr.stdout' + when: + - rhel7cis_rule_3.5.2.7 + tags: + - level1 + - patch + - rule_3.5.2.7 + +- name: "3.5.2.8 | L1 | PATCH | Ensure nftables outbound and established connections are configured" + block: + - name: "3.5.2.8 | L1 | AUDIT | Ensure nftables outbound and established connections are configured | Gather incoming connection rules" + shell: nft list ruleset | awk '/hook input/,/}/' | grep -E 'ip protocol (tcp|udp|icmp) ct state' + changed_when: false + failed_when: false + register: rhel7cis_3_5_2_8_inconnectionrule + + - name: "3.5.2.8 | L1 | AUDIT | Ensure nftables outbound and established connections are configured | Gather outbound connection rules" + shell: nft list ruleset | awk '/hook output/,/}/' | grep -E 'ip protocol (tcp|udp|icmp) ct state' + changed_when: false + failed_when: false + register: rhel7cis_3_5_2_8_outconnectionrule + + - name: "3.5.2.8 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add input tcp established accpet policy" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" input ip protocol tcp ct state established accept + become: true + when: + - '"ip protocol tcp ct state established accept" not in rhel7cis_3_5_2_8_inconnectionrule.stdout' + + - name: "3.5.2.8 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add input udp established accpet policy" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" input ip protocol udp ct state established accept + become: true + when: + - '"ip protocol udp ct state established accept" not in rhel7cis_3_5_2_8_inconnectionrule.stdout' + + - name: "3.5.2.8 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add input icmp established accpet policy" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" input ip protocol icmp ct state established accept + become: true + when: + - '"ip protocol icmp ct state established accept" not in rhel7cis_3_5_2_8_inconnectionrule.stdout' + + - name: "3.5.2.8 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add output tcp new, related, established accpet policy" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" output ip protocol tcp ct state new,related,established accept + become: true + when: + - '"ip protocol tcp ct state established,related,new accept" not in rhel7cis_3_5_2_8_outconnectionrule.stdout' + + - name: "3.5.2.8 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add output udp new, related, established accpet policy" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" output ip protocol udp ct state new,related,established accept + become: true + when: + - '"ip protocol udp ct state established,related,new accept" not in rhel7cis_3_5_2_8_outconnectionrule.stdout' + + - name: "3.5.2.8 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add output icmp new, related, established accpet policy" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" output ip protocol icmp ct state new,related,established accept + become: true + when: + - '"ip protocol icmp ct state established,related,new accept" not in rhel7cis_3_5_2_8_outconnectionrule.stdout' + when: + - rhel7cis_rule_3_5_2_8 + tags: + - level1 + - patch + - rule_3.5.2.8 + +- name: "3.5.2.9 | L1 | PATCH | Ensure nftables default deny firewall policy" + block: + - name: "3.5.2.9 | L1 | AUDIT | Ensure nftables default deny firewall policy | Check for hook input deny policy" + shell: nft list table inet "{{ rhel7cis_nft_tables_tableName }}" | grep 'hook input' + become: true + failed_when: false + changed_when: false + register: rhel7cis_3_5_2_9_inputpolicy + + - name: "3.5.2.9 | L1 | AUDIT | Ensure nftables default deny firewall policy | Check for hook forward deny policy" + shell: nft list table inet "{{ rhel7cis_nft_tables_tableName }}" | grep 'hook forward' + become: true + failed_when: false + changed_when: false + register: rhel7cis_3_5_2_9_forwardpolicy + + - name: "3.5.2.9 | L1 | AUDIT | Ensure nftables default deny firewall policy | Check for hook output deny policy" + shell: nft list table inet "{{ rhel7cis_nft_tables_tableName }}" | grep 'hook output' + become: true + failed_when: false + changed_when: false + register: rhel7cis_3_5_2_9_outputpolicy + + - name: "3.5.2.9 | L1 | AUDIT | Ensure nftables default deny firewall policy | Check for SSH allow" + shell: nft list table inet "{{ rhel7cis_nft_tables_tableName }}" | grep 'ssh' + become: true + failed_when: false + changed_when: false + register: rhel7cis_3_5_2_9_sshallowcheck + + - name: "3.5.2.9 | L1 | PATCH | Ensure nftables default deny firewall policy | Enable SSH traffic" + command: nft add rule inet "{{ rhel7cis_nft_tables_tableName }}" input tcp dport ssh accept + become: true + when: + - '"tcp dport ssh accept" not in rhel7cis_3_5_2_9_sshallowcheck.stdout' + + - name: "3.5.2.9 | L1 | PATCH | Ensure nftables default deny firewall policy | Set hook input deny policy" + command: nft chain inet "{{ rhel7cis_nft_tables_tableName }}" input { policy drop \; } + become: true + when: + - '"type filter hook input priority 0; policy drop;" not in rhel7cis_3_5_2_9_inputpolicy.stdout' + + - name: "3.5.2.9 | L1 | PATCH | EEnsure nftables default deny firewall policy | Create hook forward deny policy" + command: nft chain inet "{{ rhel7cis_nft_tables_tableName }}" forward { policy drop \; } + become: true + when: + - '"type filter hook forward priority 0; policy drop;" not in rhel7cis_3_5_2_9_forwardpolicy.stdout' + + - name: "3.5.2.9 | L1 | PATCH | Ensure nftables default deny firewall policy | Create hook output deny policy" + command: nft chain inet "{{ rhel7cis_nft_tables_tableName }}" output { policy drop \; } + become: true + when: + - '"type filter hook output priority 0; policy drop;" not in rhel7cis_3_5_2_9_outputpolicy.stdout' + when: + - rhel7cis_rule_3_5_2_9 + tags: + - level1 + - patch + - rule_3.5.2.9 + +- name: "3.5.2.10 | L1 | PATCH | Ensure nftables service is enabled" + systemd: + name: nftables + enabled: yes + become: true + when: + - rhel7cis_rule_3_5_2_10 + tags: + - level1 + - patch + - rule_3.5.2.10 + +- name: "3.5.2.11 | L1 | PATCH | Ensure nftables rules are permanent" + lineinfile: + path: /etc/sysconfig/nftables.conf + state: present + insertafter: EOF + line: include "/etc/nftables/inet-{{ rhel7cis_nft_tables_tableName }}" + become: true + when: + - rhel7cis_rule_3_5_2_11 + tags: + - level1 + - patch + - rule_3.5.2.11 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.5.3.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.5.3.1.x.yml new file mode 100644 index 0000000..5f96a11 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/cis_3.5.3.1.x.yml @@ -0,0 +1,46 @@ +--- + +- name: "3.5.3.1.1 | L1 | PATCH | Ensure iptables packages are installed" + package: + name: "{{ item }}" + state: present + loop: + - iptables + - iptables-services + vars: + ansible_python_interpreter: /bin/python + when: + - "'iptables' not in ansible_facts.packages" + - rhel7cis_rule_3_5_3_1_1 + tags: + - level1 + - patch + - rule_3.5.3.1 + +- name: "3.5.3.1.2 | L1 | PATCH | Ensure nftables is not installed with iptables" + package: + name: nftables + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - "'nftables' in ansible_facts.packages" + - rhel7cis_rule_3_5_3_1_2 + tags: + - level1 + - patch + - rule_3.5.3.2 + +- name: "3.5.3.1.3 | L1 | PATCH | Ensure firewalld is either not installed or masked with iptables" + package: + name: firewalld + state: absent + vars: + ansible_python_interpreter: /bin/python + when: + - "'firewalld' in ansible_facts.packages" + - rhel7cis_rule_3_5_3_1_3 + tags: + - level1 + - patch + - rule_3.5.3.3 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/main.yml new file mode 100644 index 0000000..4bcb22d --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_3/main.yml @@ -0,0 +1,28 @@ +--- + +- name: "SECTION | 3.1 | Disable unused network protocols and devices" + include: cis_3.1.x.yml + +- name: "SECTION | 3.2 | Network Parameters (Host Only)" + include: cis_3.2.x.yml + +- name: "SECTION | 3.3 | Network Parameters (Host and Router)" + include: cis_3.3.x.yml + +- name: "SECTION | 3.4 | Uncommon Network Protocols" + include: cis_3.4.x.yml + +- name: "SECTION | 3.5.1 | Configure firewalld" + include: cis_3.5.1.x.yml + when: + - rhel7cis_firewall == "firewalld" + +- name: "SECTION | 3.5.2 | Configure nftables" + include: cis_3.5.2.x.yml + when: + - rhel7cis_firewall == "nftables" + +- name: "SECTION | 3.5.3.1.x | Configure iptables" + include: cis_3.5.3.1.x.yml + when: + - rhel7cis_firewall == "iptables" diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.1.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.1.1.x.yml new file mode 100644 index 0000000..f4a648c --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.1.1.x.yml @@ -0,0 +1,44 @@ +--- +- name: "4.1.1.1 | L2 | PATCH | Ensure auditd is installed" + package: + name: ['audit', 'audit-libs'] + state: present + vars: + ansible_python_interpreter: /bin/python + when: + - rhel7cis_rule_4_1_1_1 + notify: restart auditd + tags: + - level2 + - auditd + - rule_4.1.1.1 + +- name: "4.1.1.2 | L2 | PATCH | Ensure auditd service is enabled and running" + service: + name: auditd + state: started + enabled: yes + when: + - not rhel7cis_skip_for_travis + - rhel7cis_rule_4_1_1_2 + - ansible_connection != 'docker' + tags: + - level2 + - auditd + - patch + - rule_4.1.1.2 + +- name: "4.1.1.3 | L2 | PATCH | Ensure auditing for processes that start prior to auditd is enabled" + replace: + dest: /etc/default/grub + # regexp: '(^GRUB_CMDLINE_LINUX\s*\=\s*)(?:")(.+)(?/dev/null; done + register: priv_procs + changed_when: false + check_mode: false + + - name: "4.1.11 | L2 | PATCH | Ensure use of privileged commands is collected" + template: + src: audit/priv_commands.rules.j2 + dest: /etc/audit/rules.d/priv_commands.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel7cis_rule_4_1_11 + tags: + - level2 + - auditd + - patch + - rule_4.1.11 + +- name: "4.1.12 | L2 | PATCH | Ensure successful file system mounts are collected" + template: + src: audit/mounts.rules.j2 + dest: /etc/audit/rules.d/mounts.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel7cis_rule_4_1_12 + tags: + - level2 + - auditd + - patch + - automated + - rule_4.1.12 + +- name: "4.1.13 | L2 | PATCH | Ensure file deletion events by users are collected" + template: + src: audit/deletion.rules.j2 + dest: /etc/audit/rules.d/deletion.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel7cis_rule_4_1_13 + tags: + - level2 + - auditd + - patch + - rule_4.1.13 + +- name: "4.1.14 | L2 | PATCH | Ensure changes to system administration scope (sudoers) is collected" + template: + src: audit/scope.rules.j2 + dest: /etc/audit/rules.d/scope.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel7cis_rule_4_1_14 + tags: + - level2 + - auditd + - patch + - rule_4.1.14 + +- name: "4.1.15 | L2 | PATCH | Ensure system administrator command executions (sudo) are collected" + template: + src: audit/actions.rules.j2 + dest: /etc/audit/rules.d/actions.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel7cis_rule_4_1_15 + tags: + - level2 + - auditd + - patch + - rule_4.1.15 + +- name: "4.1.16 | L2 | PATCH | Ensure kernel module loading and unloading is collected" + template: + src: audit/modules.rules.j2 + dest: /etc/audit/rules.d/modules.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel7cis_rule_4_1_16 + tags: + - level2 + - auditd + - patch + - rule_4.1.16 + +- name: "4.1.17 | L2 | PATCH | Ensure the audit configuration is immutable" + template: + src: audit/99_finalize.rules.j2 + dest: /etc/audit/rules.d/99_finalize.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel7cis_rule_4_1_17 + tags: + - level2 + - auditd + - patch + - rule_4.1.17 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.1.x.yml new file mode 100644 index 0000000..42874d0 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.1.x.yml @@ -0,0 +1,171 @@ +--- + +- name: "4.2.1.1 | L1 | PATCH | Ensure rsyslog is installed" + package: + name: "{{ rhel7cis_syslog }}" + state: present + vars: + ansible_python_interpreter: /bin/python + when: + - rhel7cis_rule_4_2_1_1 + tags: + - level1 + - patch + - rule_4.2.1.1 + +- name: "4.2.1.2 | L1 | PATCH | Ensure rsyslog Service is enabled and running" + service: + name: rsyslog + enabled: yes + state: started + when: + - rhel7cis_rule_4_2_1_2 + - rhel7cis_syslog == 'rsyslog' + tags: + - level1 + - patch + - rsyslog + - rule_4.2.1.2 + +- name: "4.2.1.3 | L1 | PATCH | Ensure rsyslog default file permissions configured" + lineinfile: + dest: /etc/rsyslog.conf + regexp: '^\$FileCreateMode' + line: '$FileCreateMode 0640' + notify: restart rsyslog + when: + - rhel7cis_rule_4_2_1_3 + tags: + - level1 + - patch + - rule_4.2.1.3 + +- name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured" + block: + - name: "4.2.1.4 | L1 | AUDIT | Ensure logging is configured | rsyslog current config message out" + shell: cat /etc/rsyslog.conf + changed_when: false + failed_when: false + register: rhel_07_4_2_1_4_audit + + - name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured | Debug out rsyslog settings" + debug: + msg: + - "These are the current logging configurations for rsyslog, please review:" + - "{{ rhel_07_4_2_1_4_audit.stdout_lines }}" + when: + - not rhel7cis_rsyslog_ansiblemanaged + + - name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured | Set logging settings lineinfile" + lineinfile: + path: /etc/rsyslog.conf + state: present + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: '^[# ]*{{ item.insertafter }}\s*=\s*' + with_items: + - { regexp: '^\*\.emerg(.*)$', line: '*.emerg :omusrmsg:*', insertafter: '# Everybody gets emergency messages' } + - { regexp: '^authpriv(.*)$', line: 'auth,authpriv.* /var/log/secure', insertafter: '# # The authpriv file has restricted access.' } + - { regexp: '^mail\.\*(.*)$', line: 'mail.* -/var/log/mail', insertafter: '# Log all the mail messages in one place.' } + - { regexp: '^mail.info(.*)$', line: 'mail.info -/var/log/mail.info', insertafter: '# Log all the mail messages in one place.' } + - { regexp: '^mail.warning(.*)$', line: 'mail.warning -/var/log/mail.warning', insertafter: '# Log all the mail messages in one place.' } + - { regexp: '^mail.err(.*)$', line: 'mail.err /var/log/mail.err', insertafter: '# Log all the mail messages in one place.' } + - { regexp: '^news.crit(.*)$', line: 'news.crit -/var/log/news/news.crit', insertafter: '# Save news errors of level crit and higher in a special file.' } + - { regexp: '^news.err(.*)$', line: 'news.err -/var/log/news/news.err', insertafter: '# Save news errors of level crit and higher in a special file.' } + - { regexp: '^news.notice(.*)$', line: 'news.notice -/var/log/news/news.notice', insertafter: '# Save news errors of level crit and higher in a special file.' } + loop_control: + label: "{{ item.regexp }}" + notify: restart rsyslog + when: + - rhel7cis_rsyslog_ansiblemanaged + + - name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured | Misc. log setting" + blockinfile: + path: /etc/rsyslog.conf + state: present + marker: "# {mark} MISC. LOG SETTINGS (ANSIBLE MANAGED)" + block: | + # misc. logging additions to meet CIS standards + *.=warning;*.=err -/var/log/warn + *.crit /var/log/warn + *.*;mail.none;news.none /var/log/messages + insertafter: '#### RULES ####' + notify: restart rsyslog + when: + - rhel7cis_rsyslog_ansiblemanaged + + - name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured | Local log settings" + blockinfile: + path: /etc/rsyslog.conf + state: present + marker: "#{mark} LOCAL LOG SETTINGS (ANSIBLE MANAGED)" + block: | + # local log settings + local0,local1.* -/var/log/localmessages + local2,local3.* -/var/log/localmessages + local4,local5.* -/var/log/localmessages + local6,local7.* -/var/log/localmessages + insertafter: '#### RULES ####' + notify: restart rsyslog + when: + - rhel7cis_rule_4_2_1_4 + tags: + - level1 + - patch + - rsyslog + - rule_4.2.1.4 + +- name: "4.2.1.5 | L1 | PATCH | Ensure rsyslog is configured to send logs to a remote log host" + blockinfile: + path: /etc/rsyslog.conf + state: present + block: | + # remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional + *.* @@{{ rhel7cis_remote_log_server }} + insertafter: EOF + notify: restart rsyslog + register: result + failed_when: + - result is failed + - result.rc != 257 + when: + - rhel7cis_rule_4_2_1_5 + - rhel7cis_remote_log_server is defined + tags: + - level1 + - patch + - rule_4.2.1.5 + - rsyslog + +- name: "4.2.1.6 | L1 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts." + block: + - name: "4.2.1.6 | L1 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts. | When not log host" + replace: + path: /etc/rsyslog.conf + regexp: '({{ item }})' + replace: '#\1' + with_items: + - '^(\$ModLoad imtcp)' + - '^(\$InputTCPServerRun)' + notify: restart rsyslog + when: + - not rhel7cis_system_is_log_server + + - name: "4.2.1.6 | L1 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts. | When log host" + replace: + path: /etc/rsyslog.conf + regexp: '^#(.*{{ item }}.*)' + replace: '\1' + with_items: + - 'ModLoad imtcp' + - 'InputTCPServerRun' + notify: restart rsyslog + when: + - rhel7cis_system_is_log_server + when: + - rhel7cis_rule_4_2_1_6 + tags: + - level1 + - patch + - rule_4.2.1.6 + - rsyslog diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.2.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.2.x.yml new file mode 100644 index 0000000..21867e9 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.2.x.yml @@ -0,0 +1,43 @@ +--- + +- name: "4.2.2.1 | L1 | PATCH | Ensure journald is configured to send logs to rsyslog" + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^[F|f]orward[T|t]o[S|s]yslog=' + line: "ForwardToSyslog=yes" + when: + - rhel7cis_rule_4_2_2_1 + tags: + - level1 + - patch + - manual + - rule_4.2.2.1 + - journald + +- name: "4.2.2.2 | L1 | PATCH | Ensure journald is configured to compress large log files" + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^[C|c]ompress=' + line: "Compress=yes" + when: + - rhel7cis_rule_4_2_2_2 + tags: + - level1 + - patch + - automated + - rule_4.2.2.2 + - journald + +- name: "4.2.2.3 | L1 | PATCH | Ensure journald is configured to write logfiles to persistent disk" + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^[S|s]torage=' + line: "Storage=persistent" + when: + - rhel7cis_rule_4_2_2_3 + tags: + - level1 + - patch + - automated + - rule_4.2.2.3 + - journald diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.x.yml new file mode 100644 index 0000000..5e06700 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/cis_4.2.x.yml @@ -0,0 +1,35 @@ +--- + +- name: "4.2.3 | L1 | PATCH | Ensure permissions on all logfiles are configured" + command: find /var/log -type f -exec chmod g-wx,o-rwx "{}" + + changed_when: false + failed_when: false + when: + - rhel7cis_rule_4_2_3 + tags: + - level1 + - patch + - automated + - rule_4.2.3 + +- name: "4.2.4 | L1 | AUDIT | Ensure logrotate is configured" + block: + - name: "4.2.4 | L1 | AUDIT | Ensure logrotate is configured" + find: + paths: /etc/logrotate.d/ + register: log_rotate + + - name: "4.2.4 | L1 | AUDIT | Ensure logrotate is configured | output list" + debug: + msg: + - WARNING!! The following files need to confirm with site requirements + - /etc/logrotate.conf + - "{{ log_rotate.files | map(attribute='path') | list }}" + when: log_rotate.matched > 0 + when: + - rhel7cis_rule_4_2_4 + tags: + - level1 + - audit + - manual + - rule_4.2.4 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/main.yml new file mode 100644 index 0000000..7147bbb --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_4/main.yml @@ -0,0 +1,16 @@ +--- + +- name: "SECTION | 4.1| Configure System Accounting (auditd)" + include: cis_4.1.1.x.yml + +- name: "SECTION | 4.1.2.x| Configure Data Retention" + include: cis_4.1.2.x.yml + +- name: "SECTION | 4.2.x| Configure Logging" + include: cis_4.2.1.x.yml + +- name: "SECTION | 4.2.2.x| Configure journald" + include: cis_4.2.2.x.yml + +- name: "SECTION | 4.2.x | logfile configuration" + include: cis_4.2.x.yml diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.1.x.yml new file mode 100644 index 0000000..dd13f34 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.1.x.yml @@ -0,0 +1,148 @@ +--- + +- name: "5.1.1 | L1 | PATCH | Ensure cron daemon is enabled and running" + service: + name: crond + enabled: yes + state: started + when: + - rhel7cis_rule_5_1_1 + tags: + - level1 + - patch + - rule_5.1.1 + +- name: "5.1.2 | L1 | PATCH | Ensure permissions on /etc/crontab are configured" + file: + dest: /etc/crontab + owner: root + group: root + mode: 0600 + when: + - rhel7cis_rule_5_1_2 + tags: + - level1 + - patch + - rule_5.1.2 + +- name: "5.1.3 | L1 | PATCH | Ensure permissions on /etc/cron.hourly are configured" + file: + dest: /etc/cron.hourly + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel7cis_rule_5_1_3 + tags: + - level1 + - patch + - rule_5.1.3 + +- name: "5.1.4 | L1 | PATCH | Ensure permissions on /etc/cron.daily are configured" + file: + dest: /etc/cron.daily + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel7cis_rule_5_1_4 + tags: + - level1 + - patch + - rule_5.1.4 + +- name: "5.1.5 | L1 | PATCH | Ensure permissions on /etc/cron.weekly are configured" + file: + dest: /etc/cron.weekly + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel7cis_rule_5_1_5 + tags: + - level1 + - patch + - rule_5.1.5 + +- name: "5.1.6 | L1 | PATCH | Ensure permissions on /etc/cron.monthly are configured" + file: + dest: /etc/cron.monthly + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel7cis_rule_5_1_6 + tags: + - level1 + - patch + - rule_5.1.6 + +- name: "5.1.7 | L1 | PATCH | Ensure permissions on /etc/cron.d are configured" + file: + dest: /etc/cron.d + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel7cis_rule_5_1_7 + tags: + - level1 + - patch + - rule_5.1.7 + +- name: "5.1.8 | L1 | PATCH | Ensure cron is restricted to authorized users" + block: + - name: "5.1.8 | L1 | PATCH | Ensure cron is restricted to authorized users | Remove cron.deny" + file: + dest: /etc/cron.deny + state: absent + + - name: "5.1.8 | L1 | PATCH | Ensure cron is restricted to authorized users | Check if cron.allow exists" + stat: + path: "/etc/cron.allow" + register: p + + - name: "5.1.8 | L1 | PATCH | Ensure cron is restricted to authorized users | Ensure cron.allow is restricted to authorized users" + file: + dest: /etc/cron.allow + state: '{{ "file" if p.stat.exists else "touch" }}' + owner: root + group: root + mode: 0600 + when: + - rhel7cis_rule_5_1_8 + tags: + - level1 + - patch + - rule_5.1.8 + +- name: "5.1.9 | L1 | PATCH | Ensure at is restricted to authorized users" + block: + - name: "5.1.9 | L1 | PATCH | Ensure at is restricted to authorized users | Remove at.deny" + file: + dest: /etc/at.deny + state: absent + + - name: "5.1.9 | L1 | AUDIT | Ensure at is restricted to authorized users | Check if at.allow exists" + stat: + path: "/etc/at.allow" + register: p + + - name: "5.1.9 | L1 | PATCH | Ensure at is restricted to authorized users | Ensure at.allow is restricted to authorized users" + file: + dest: /etc/at.allow + state: '{{ "file" if p.stat.exists else "touch" }}' + owner: root + group: root + mode: 0600 + when: + - rhel7cis_rule_5_1_9 + tags: + - level1 + - patch + - rule_5.1.9 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.2.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.2.x.yml new file mode 100644 index 0000000..1fd6070 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.2.x.yml @@ -0,0 +1,46 @@ +--- + +- name: "5.2.1 | L1 | PATCH | Ensure sudo is installed" + package: + name: sudo + state: present + become: true + when: + - rhel7cis_rule_5_2_1 + tags: + - level1 + - sudo + - patch + - rule_5.2.1 + vars: + ansible_python_interpreter: /bin/python + +- name: "5.2.2 | L1 | AUDIT | Ensure sudo commands use pty" + lineinfile: + dest: /etc/sudoers + regexp: '^Defaults use_pty' + line: 'Defaults use_pty' + state: present + validate: '/usr/sbin/visudo -cf %s' + become: true + when: + - rhel7cis_rule_5_2_2 + tags: + - level1 + - patch + - rule_5.2.2 + +- name: "5.2.3 | L1 | PATCH | Ensure sudo log file exists" + lineinfile: + dest: /etc/sudoers + regexp: '^Defaults logfile="{{ rhel7cis_varlog_location }}"' + line: 'Defaults logfile="{{ rhel7cis_varlog_location }}"' + state: present + validate: '/usr/sbin/visudo -cf %s' + become: true + when: + - rhel7cis_rule_5_2_1 and rhel7cis_rule_5_2_3 + tags: + - level1 + - patch + - rule_5.2.3 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.3.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.3.x.yml new file mode 100644 index 0000000..a954c2e --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.3.x.yml @@ -0,0 +1,458 @@ +--- + +- name: "5.3.1 | L1 | PATCH | Ensure permissions on /etc/ssh/sshd_config are configured" + file: + dest: /etc/ssh/sshd_config + state: file + owner: root + group: root + mode: 0600 + when: + - rhel7cis_rule_5_3_1 + tags: + - level1 + - patch + - ssh + - rule_5.3.1 + +- name: "5.3.2 | L1 | PATCH | Ensure permissions on SSH private host key files are configured" + block: + - name: "5.3.2 | L1 | AUDIT | Ensure permissions on SSH private host key files are configured" + find: + paths: + - /etc/ssh + patterns: 'ssh_host_*_key' + recurse: true + file_type: any + register: rhel_07_5_3_2_priv_results + + - name: "5.3.2 | L1 | AUDIT | Ensure permissions on SSH private host key files are configured" + file: + path: "{{ item.path }}" + owner: root + group: root + mode: 0600 + loop: "{{ rhel_07_5_3_2_priv_results.files }}" + loop_control: + label: "{{ item.path }}" + when: + - rhel7cis_rule_5_3_2 + tags: + - level1 + - patch + - ssh + - rule_5.3.2 + +- name: "5.3.3 | L1 | PATCH | Ensure permissions on SSH public host key files are configured" + block: + - name: "5.3.3 | L1 | AUDIT | Ensure permissions on SSH public host key files are configured" + find: + paths: + - /etc/ssh + patterns: 'ssh_host_*_key' + recurse: true + file_type: any + register: rhel_07_5_3_3_pub_results + + - name: "5.3.3 | L1 | AUDIT | Ensure permissions on SSH public host key files are configured" + file: + path: "{{ item.path }}" + owner: root + group: root + mode: 0600 + loop: "{{ rhel_07_5_3_3_pub_results.files }}" + loop_control: + label: "{{ item.path }}" + when: + - rhel7cis_rule_5_3_3 + tags: + - level1 + - patch + - ssh + - rule_5.3.3 + +- name: "5.3.4 | L1 | PATCH | Ensure SSH access is limited" + block: + - name: "5.3.4 | L1 | PATCH | Ensure SSH access is limited | Add line to sshd_config for allowusers" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^AllowUsers" + insertbefore: "# Accept locale-related environment variables" + line: AllowUsers {{ rhel7cis_sshd['allowusers'] }} + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - "rhel7cis_sshd['allowusers']|default('') | length > 0" + + - name: "5.3.4 | L1 | PATCH | Ensure SSH access is limited | Add line to sshd_config for allowgroups" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^AllowGroups" + insertbefore: "# Accept locale-related environment variables" + line: AllowGroups {{ rhel7cis_sshd['allowgroups'] }} + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - "rhel7cis_sshd['allowgroups']|default('') | length > 0" + + - name: "5.3.4 | L1 | PATCH | Ensure SSH access is limited | Add line to sshd_config for denyusers" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^DenyUsers" + insertbefore: "# Accept locale-related environment variables" + line: DenyUsers {{ rhel7cis_sshd['denyusers'] }} + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - "rhel7cis_sshd['denyusers']|default('') | length > 0" + + - name: "5.3.4 | L1 | PATCH | Ensure SSH access is limited | Add line to sshd_config for denygroups" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^DenyGroups" + insertbefore: "# Accept locale-related environment variables" + line: DenyGroups {{ rhel7cis_sshd['denygroups'] }} + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - "rhel7cis_sshd['denygroups']|default('') | length > 0" + when: + - rhel7cis_rule_5_3_4 + tags: + - level1 + - patch + - ssh + - rule_5.3.4 + +- name: "5.3.5 | L1 | PATCH | Ensure SSH LogLevel is appropriate" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#LogLevel|^LogLevel" + line: 'LogLevel {{ rhel7cis_ssh_loglevel }}' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_5 + tags: + - level1 + - patch + - ssh + - rule_5.3.5 + +- name: "5.3.6 | L2 | PATCH | Ensure SSH X11 forwarding is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#X11Forwarding|^X11Forwarding" + line: 'X11Forwarding no' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_6 + tags: + - level2 + - patch + - ssh + - rule_5.3.6 + +- name: "5.3.7 | L1 | PATCH | Ensure SSH MaxAuthTries is set to 4 or less" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^(#)?MaxAuthTries \d' + line: 'MaxAuthTries 4' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_7 + tags: + - level1 + - patch + - ssh + - rule_5.3.7 + +- name: "5.3.8 | L1 | PATCH | Ensure SSH IgnoreRhosts is enabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#IgnoreRhosts|^IgnoreRhosts" + line: 'IgnoreRhosts yes' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_8 + tags: + - level1 + - patch + - ssh + - rule_5.3.8 + +- name: "5.3.9 | L1 | PATCH | Ensure SSH HostbasedAuthentication is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: ^#HostbasedAuthentication|^HostbasedAuthentication" + line: 'HostbasedAuthentication no' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_9 + tags: + - level1 + - patch + - ssh + - rule_5.3.9 + +- name: "5.3.10 | L1 | PATCH | Ensure SSH root login is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#PermitRootLogin|^PermitRootLogin" + line: 'PermitRootLogin no' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_10 + tags: + - level1 + - patch + - ssh + - rule_5.3.10 + +- name: "5.3.11 | L1 | PATCH | Ensure SSH PermitEmptyPasswords is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#PermitEmptyPasswords|^PermitEmptyPasswords" + line: 'PermitEmptyPasswords no' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_11 + tags: + - level1 + - patch + - ssh + - rule_5.3.11 + +- name: "5.3.12 | L1 | PATCH | Ensure SSH PermitUserEnvironment is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#PermitUserEnvironment|^PermitUserEnvironment" + line: 'PermitUserEnvironment no' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_12 + tags: + - level1 + - patch + - ssh + - rule_5.3.12 + +- name: "5.3.13 | L1 | PATCH | Ensure only strong Ciphers are used" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^Ciphers' + insertbefore: "# Accept locale-related environment variables" + line: "Ciphers {{ rhel7cis_sshd['ciphers'] }}" + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_13 + tags: + - level1 + - patch + - ssh + - rule_5.3.13 + +- name: "5.3.14 | L1 | PATCH | Ensure only strong MAC algorithms are used" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^MACs' + insertbefore: "# Accept locale-related environment variables" + line: "MACs {{ rhel7cis_sshd['macs'] }}" + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_14 + tags: + - level1 + - patch + - ssh + - rule_5.3.14 + +- name: "5.3.15 | L1 | PATCH | Ensure only strong Key Exchange algorithms are used" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^KexAlgorithms' + line: "KexAlgorithms {{ rhel7cis_sshd['kex'] }}" + insertafter: '^MACs' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_15 + tags: + - level1 + - patch + - ssh + - rule_5.3.15 + +- name: "5.3.16 | L1 | PATCH | Ensure SSH Idle Timeout Interval is configured" + block: + - name: "5.3.16 | L1 | PATCH | Ensure SSH Idle Timeout Interval is configured | Add line in sshd_config for ClientAliveInterval" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#ClientAliveInterval|^ClientAliveInterval" + line: "ClientAliveInterval {{ rhel7cis_sshd['clientaliveinterval'] }}" + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + + - name: "5.3.16 | L1 | PATCH | Ensure SSH Idle Timeout Interval is configured | Ensure SSH ClientAliveCountMax set to <= 3" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#ClientAliveCountMax|^ClientAliveCountMax" + line: "ClientAliveCountMax {{ rhel7cis_sshd['clientalivecountmax'] }}" + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_16 + tags: + - level1 + - patch + - ssh + - rule_5.3.16 + +- name: "5.3.17 | L1 | PATCH | Ensure SSH LoginGraceTime is set to one minute or less" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#LoginGraceTime|^LoginGraceTime" + line: "LoginGraceTime {{ rhel7cis_sshd['logingracetime'] }}" + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_17 + tags: + - level1 + - patch + - ssh + - rule_5.3.17 + +- name: "5.3.18 | L1 | PATCH | Ensure SSH warning banner is configured" + lineinfile: + dest: /etc/ssh/sshd_config + regexp: '^Banner ((?!/etc/issue.net$)).*$' + line: "{{ item.line|default(omit) }}" + insertafter: "{{ item.after|default(omit)}}" + state: "{{ item.state }}" + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + with_items: + - { state: absent } + - { state: present, line: 'Banner /etc/issue.net', after: '#Banner none' } + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_18 + tags: + - level1 + - patch + - ssh + - rule_5.3.18 + +- name: "5.3.19 | L1 | PATCH | Ensure SSH PAM is enabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#UsePAM|^UsePAM" + line: 'UsePAM yes' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_19 + tags: + - level1 + - patch + - ssh + - rule_5.3.19 + +- name: "5.3.20 | L2 | PATCH | Ensure SSH AllowTcpForwarding is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#AllowTcpForwarding|^AllowTcpForwarding" + line: 'AllowTcpForwarding no' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_20 + tags: + - level2 + - patch + - ssh + - rule_5.3.20 + +- name: "5.3.21 | L1 | PATCH | Ensure SSH MaxStartups is configured" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#MaxStartups|^MaxStartups" + line: 'MaxStartups 10:30:60' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_21 + tags: + - level1 + - patch + - ssh + - rule_5.3.21 + +- name: "5.3.22 | L1 | PATCH | Ensure SSH MaxSessions is limited" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#MaxSessions|^MaxSessions" + line: 'MaxSessions {{ rhel7cis_ssh_maxsessions }}' + validate: /usr/sbin/sshd -T -C user=root -C host="$(hostname)" -C addr="$(grep $(hostname) /etc/hosts | awk '{print $1}')" -f %s + notify: + - restart sshd + when: + - rhel7cis_rule_5_3_22 + tags: + - level1 + - patch + - ssh + - rule_5.3.22 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.4.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.4.x.yml new file mode 100644 index 0000000..5ab523f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.4.x.yml @@ -0,0 +1,133 @@ +--- + +- name: "5.4.1 | L1 | PATCH | Ensure password creation requirements are configured" + lineinfile: + state: present + dest: "/etc/security/pwquality.conf" + regexp: '^{{ item.key }}' + line: '{{ item.key }} = {{ item.value }}' + with_items: + - { key: 'minlen', value: '14' } + - { key: 'minclass', value: '4' } + when: + - rhel7cis_rule_5_4_1 + tags: + - level1 + - patch + - rule_5.4.1 + +- name: | + " 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured + 5.4.3 | L1 | PATCH | Ensure password hashing algorithm is SHA-512 + 5.4.4 | L1 | PATCH | Ensure password reuse is limited" + block: + - name: " 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured | Add deny count and unlock time for preauth" + lineinfile: + dest: "/etc/pam.d/{{ item }}" + state: present + line: "auth required pam_faillock.so preauth audit silent deny={{ rhel7cis_pam_faillock.attempts }}{{ (rhel7cis_pam_faillock.fail_for_root) | ternary(' even_deny_root ',' ') }}unlock_time={{ rhel7cis_pam_faillock.unlock_time }}" + insertafter: '^#?auth ?' + loop: + - "system-auth" + - "password-auth" + + - name: " 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured | Add success and default settings to pam_unix.so" + lineinfile: + dest: "/etc/pam.d/{{ item }}" + state: present + line: "auth [success=1 default=bad] pam_unix.so" + insertafter: '^#?auth ?' + loop: + - "system-auth" + - "password-auth" + + - name: " 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured | Add default, deny count, and unlock times for authfail" + lineinfile: + dest: "/etc/pam.d/{{ item }}" + state: present + line: "auth [default=die] pam_faillock.so authfail audit deny={{ rhel7cis_pam_faillock.attempts }}{{ (rhel7cis_pam_faillock.fail_for_root) | ternary(' even_deny_root ',' ') }}unlock_time={{ rhel7cis_pam_faillock.unlock_time }}" + insertafter: '^#?auth ?' + loop: + - "system-auth" + - "password-auth" + + - name: " 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured | Add deny count and unlock times to authsucc" + lineinfile: + dest: "/etc/pam.d/{{ item }}" + state: present + line: "auth sufficient pam_faillock.so authsucc audit deny={{ rhel7cis_pam_faillock.attempts }}{{ (rhel7cis_pam_faillock.fail_for_root) | ternary(' even_deny_root ',' ') }}unlock_time={{ rhel7cis_pam_faillock.unlock_time }}" + insertafter: '^#?auth ?' + loop: + - "system-auth" + - "password-auth" + + - name: "SCORED | 5.4.2 | PATCH | Ensure lockout for failed password attempts is configured | Activate deny count and unlock times to failed password" + lineinfile: + dest: "/etc/pam.d/{{ item }}" + state: present + line: "account required pam_faillock.so" + firstmatch: yes + regexp: '^\s*account\s+required\s+pam_faillock.so\s*' + insertbefore: '^#?account ?' + loop: + - "system-auth" + - "password-auth" + + - name: " 5.4.3 | L1 | PATCH | Ensure password hashing algorithm is SHA-512 | add sha512 settings" + lineinfile: + dest: "/etc/pam.d/{{ item }}" + state: present + line: "password sufficient pam_unix.so {{ rhel7cis_pam_faillock.pwhash }} shadow nullok try_first_pass use_authtok" + insertafter: '^#?password ?' + loop: + - "system-auth" + - "password-auth" + + - name: " 5.4.4 | L1 | PATCH | Ensure password reuse is limited | add remember settings" + lineinfile: + dest: "/etc/pam.d/{{ item }}" + state: present + line: "password required pam_pwhistory.so remember={{ rhel7cis_pam_faillock.remember }}" + insertafter: '^#?password ?' + loop: + - "system-auth" + - "password-auth" + + # The two steps below were added to keep authconfig from overwritting the above configs. This follows steps from here: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/chap-hardening_your_system_with_tools_and_services + # With the steps below you will score five (5) points lower due to false positive results + + - name: | + " 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured | Copy system/passowrd-auth to system/password-auth-local + 5.4.3 | L1 | PATCH | Ensure password hashing algorithm is SHA-512" + 5.4.4 | L1 | PATCH | Ensure password reuse is limited | Copy system/password-auth to system/password-auth-local" + copy: + src: "/etc/pam.d/{{ item }}" + dest: "/etc/pam.d/{{ item }}-local" + remote_src: yes + owner: root + group: root + mode: '0644' + loop: + - "system-auth" + - "password-auth" + + - name: | + " 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured | Setup symbolic link + 5.4.4 | L1 | PATCH | Ensure password reuse is limited | Setup symbolic link" + file: + src: "/etc/pam.d/{{ item }}-local" + dest: "/etc/pam.d/{{ item }}" + state: link + force: yes + loop: + - "system-auth" + - "password-auth" + when: + - rhel7cis_rule_5_4_2 + - rhel7cis_rule_5_4_3 + - rhel7cis_rule_5_4_4 + tags: + - level1 + - patch + - rule_5.4.2 + - rule_5.4.4 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.5.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.5.1.x.yml new file mode 100644 index 0000000..d76354f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.5.1.x.yml @@ -0,0 +1,109 @@ +--- + +- name: "5.5.1.1 | L1 | PATCH | Ensure password expiration is 365 days or less" + lineinfile: + state: present + dest: /etc/login.defs + regexp: '^PASS_MAX_DAYS' + line: "PASS_MAX_DAYS {{ rhel7cis_pass['max_days'] }}" + when: + - rhel7cis_rule_5_5_1_1 + tags: + - level1 + - patch + - rule_5.5.1.1 + +- name: "5.5.1.2 | L1 | PATCH | Ensure minimum days between password changes is configured" + lineinfile: + state: present + dest: /etc/login.defs + regexp: '^PASS_MIN_DAYS' + line: "PASS_MIN_DAYS {{ rhel7cis_pass['min_days'] }}" + when: + - rhel7cis_rule_5_5_1_2 + tags: + - level1 + - patch + - rule_5.5.1.2 + +- name: "5.5.1.3 | L1 | PATCH | Ensure password expiration warning days is 7 or more" + lineinfile: + state: present + dest: /etc/login.defs + regexp: '^PASS_WARN_AGE' + line: "PASS_WARN_AGE {{ rhel7cis_pass['warn_age'] }}" + when: + - rhel7cis_rule_5_5_1_3 + tags: + - level1 + - patch + - rule_5.5.1.3 + +- name: "5.5.1.4 | L1 | PATCH | Ensure inactive password lock is 30 days or less" + block: + - name: "5.5.1.4 | L1 | PATCH | Ensure inactive password lock is 30 days or less | Set default inactive setting" + lineinfile: + path: /etc/default/useradd + regexp: '^INACTIVE=' + line: "INACTIVE={{ rhel7cis_inactivelock['lock_days'] }}" + state: present + + - name: "5.5.1.4 | L1 | AUDIT | Ensure inactive password lock is 30 days or less | Getting user list" + # shell: 'egrep ^[^:]+:[^\!*] /etc/shadow | cut -d: -f1' + shell: "egrep ^[^:]+:[^\\!*] /etc/shadow | awk -F: '{print $1 \",\" $7}' | egrep -v ',\\d|,[1-2][0-9]|,30|{{ rhel7cis_inactive_whitelist | join('|') }}' | cut -d , -f1" + changed_when: false + check_mode: false + register: rhel_07_5_5_1_4_audit + + - name: "5.5.1.4 | L1 | PATCH | Ensure inactive password lock is 30 days or less | Apply Inactive setting to existing accounts" + shell: "chage --inactive {{ rhel7cis_inactivelock.lock_days }} {{ item }}" + with_items: + - "{{ rhel_07_5_5_1_4_audit.stdout_lines }}" + when: + - rhel_07_5_5_1_4_audit.stdout | length > 0 + when: + - rhel7cis_rule_5_5_1_4 + tags: + - level1 + - patch + - rule_5.5.1.4 + +- name: "5.5.1.5 | L1 | PATCH | Ensure all users last password change date is in the past" + block: + - name: "5.5.1.5 | L1 | AUDIT | Ensure all users last password change date is in the past | Get current date in Unix Time" + shell: echo $(($(date --utc --date "$1" +%s)/86400)) + failed_when: false + changed_when: false + check_mode: false + register: rhel7cis_5_5_1_5_current_unix_time + + - name: "5.5.1.5 | L1 | AUDIT | Ensure all users last password change date is in the past | Get list of users with last changed pw date in the future" + shell: "cat /etc/shadow | awk -F: '{if($3>{{ rhel7cis_5_5_1_5_current_unix_time.stdout }})print$1}'" + check_mode: false + register: rhel7cis_5_5_1_5_user_list + + - name: "5.5.1.5 | L1 | AUDIT | Ensure all users last password change date is in the past | Alert no pw change in the future exist" + debug: + msg: "Good News! All accounts have PW change dates that are in the past" + when: rhel7cis_5_5_1_5_user_list.stdout | length == 0 + + - name: "5.5.1.5 | L1 | AUDIT | Ensure all users last password change date is in the past | Alert on accounts with pw change in the future" + debug: + msg: "Warning! The following accounts have the last PW change date in the future: {{ rhel7cis_5_5_1_5_user_list.stdout_lines }}" + when: + - rhel7cis_5_5_1_5_user_list.stdout | length > 0 + - not rhel7cis_futurepwchgdate_autofix + + - name: "5.5.1.5 | L1 | PATCH | Ensure all users last password change date is in the past | Fix accounts with pw change in the future" + shell: passwd --expire {{ item }} + when: + - rhel7cis_5_5_1_5_user_list | length > 0 + - rhel7cis_futurepwchgdate_autofix + with_items: + - "{{ rhel7cis_5_5_1_5_user_list.stdout_lines }}" + when: + - rhel7cis_rule_5_5_1_5 + tags: + - level1 + - patch + - rule_5.5.1.5 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.5.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.5.x.yml new file mode 100644 index 0000000..b4cc7ac --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.5.x.yml @@ -0,0 +1,96 @@ +--- + +- name: "5.5.2 | L1 | PATCH | Ensure system accounts are secured" + block: + - name: "5.5.2 | L1 | PATCH | Ensure system accounts are secured | Set nologin" + user: + name: "{{ item.id }}" + shell: /usr/sbin/nologin + loop: "{{ rhel7cis_passwd }}" + loop_control: + label: "{{ item.id }}" + when: + - item.id != "root" + - item.id != "sync" + - item.id != "shutdown" + - item.id != "halt" + - item.gid < rhel7cis_int_gid + - item.shell != " /bin/false" + - item.shell != " /usr/sbin/nologin" + + - name: "5.5.2 | L1 | PATCH | Ensure system accounts are secured | Lock accounts" + user: + name: "{{ item.id }}" + password_lock: true + loop: "{{ rhel7cis_passwd }}" + loop_control: + label: "{{ item.id }}" + when: + - item.id != "halt" + - item.id != "shutdown" + - item.id != "sync" + - item.id != "root" + - item.gid < rhel7cis_int_gid + - item.shell != " /bin/false" + - item.shell != " /usr/sbin/nologin" + when: + - rhel7cis_rule_5_5_2 + tags: + - level1 + - patch + - rule_5.5.2 + +- name: "5.5.3 | L1 | PATCH | Ensure default group for the root account is GID 0" + shell: usermod -g 0 root + changed_when: false + failed_when: false + when: + - rhel7cis_rule_5_5_3 + tags: + - level1 + - patch + - rule_5.5.3 + +- name: "5.5.4 | L1 | PATCH | Ensure default user shell timeout is configured" + blockinfile: + create: yes + mode: 0644 + dest: "{{ item.dest }}" + state: "{{ item.state }}" + marker: "# {mark} ANSIBLE MANAGED" + block: | + # Set session timeout - CIS ID RHEL-07-5.5.4 + TMOUT={{ rhel7cis_shell_session_timeout.timeout }} + readonly TMOUT + export TMOUT + loop: + - dest: "{{ rhel7cis_shell_session_timeout.file }}" + state: present + - dest: /etc/profile + state: "{{ (rhel7cis_shell_session_timeout.file == '/etc/profile') | ternary('present', 'absent') }}" + when: + - rhel7cis_rule_5_5_4 + tags: + - level2 + - patch + - rule_5.5.4 + +- name: "5.5.5 | L1 | PATCH | Ensure default user umask is configured" + block: + - name: "5.5.5 | L1 | PATCH | Ensure default user umask is configured | Set umask for /etc/bashrc" + replace: + path: /etc/bashrc + regexp: '(^\s+umask) 0[012][0-6]' + replace: '\1 027' + + - name: "5.5.5 | L1 | PATCH | Ensure default user umask is configured | Set umask for /etc/profile" + replace: + path: /etc/profile + regexp: '(^\s+umask) 0[012][0-6]' + replace: '\1 027' + when: + - rhel7cis_rule_5_5_5 + tags: + - level1 + - patch + - rule_5.5.5 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.6.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.6.yml new file mode 100644 index 0000000..595bf07 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.6.yml @@ -0,0 +1,21 @@ +--- + +- name: "5.6 | L1 | AUDIT | Ensure root login is restricted to system console " + block: + - name: "5.6 | L1 | AUDIT | Ensure root login is restricted to system console" + command: cat /etc/securetty + changed_when: false + check_mode: false + register: rhel_07_5_6_audit + + - name: "5.6 | L1 | AUDIT | Ensure root login is restricted to system console" + debug: + msg: + - "These are the consoles with root login access, please review:" + - "{{ rhel_07_5_6_audit.stdout_lines }}" + when: + - rhel7cis_rule_5_6 + tags: + - level1 + - audit + - rule_5.6 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.7.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.7.yml new file mode 100644 index 0000000..5b75ec8 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/cis_5.7.yml @@ -0,0 +1,28 @@ +--- + +- name: "5.7 | L1 | PATCH | Ensure access to the su command is restricted" + block: + - name: "5.7 | L1 | PATCH | Ensure access to the su command is restricted | Setting pam_wheel to use_uid" + lineinfile: + state: present + dest: /etc/pam.d/su + regexp: '^(#)?auth\s+required\s+pam_wheel\.so' + line: 'auth required pam_wheel.so use_uid {% if rhel7cis_sugroup is defined %}group={{ rhel7cis_sugroup }}{% endif %}' + when: + - rhel7cis_rule_5_7 + tags: + - level1 + - patch + - rule_5.7 + + - name: "5.7 | L1 | PATCH | Ensure access to the su command is restricted | wheel group contains root" + group: + name: root + groups: "{{ rhel7cis_sugroup }}" + when: + - rhel7cis_rule_5_7 + - rhel7cis_sugroup is defined + tags: + - level1 + - patch + - rule_5.7 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/main.yml new file mode 100644 index 0000000..4eb80f0 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_5/main.yml @@ -0,0 +1,22 @@ +--- + +- name: "SECTION | 5.1 | Configure time-based job schedulers" + include: cis_5.1.x.yml + +- name: "SECTION | 5.2 | Configure Sudo" + include: cis_5.2.x.yml + +- name: "SECTION | 5.3 | Configure SSH Server" + include: cis_5.3.x.yml + +- name: "SECTION | 5.4 | Configure PAM" + include: cis_5.4.x.yml + +- name: "SECTION | 5.5.1 | Set Shadow Password Suite Parameters" + include: cis_5.5.1.x.yml + +- name: "SECTION | 5.5 | User Accounts and Environment" + include: cis_5.5.x.yml + +- include: cis_5.6.yml +- include: cis_5.7.yml diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/cis_6.1.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/cis_6.1.x.yml new file mode 100644 index 0000000..f6b068f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/cis_6.1.x.yml @@ -0,0 +1,290 @@ +--- +- name: "6.1.1 | L2 | PATCH | Audit system file permissions" + block: + - name: "6.1.1 | AUDIT | Audit system file permissions | Audit the packages" + shell: rpm -Va --nomtime --nosize --nomd5 --nolinkto + changed_when: false + failed_when: false + check_mode: false + register: rhel7cis_6_1_1_packages_rpm + tags: + - skip_ansible_lint + + - name: "6.1.1 | L2 | AUDIT | Audit system file permissions | Create list and warning" + block: + - name: "6.1.1 | L2 | AUDIT | Audit system file permissions | Add file discrepancy list to system" + copy: + dest: "{{ rhel7cis_rpm_audit_file }}" + content: "{{ rhel7cis_6_1_1_packages_rpm.stdout }}" + changed_when: rhel7cis_6_1_1_packages_rpm.stdout|length > 0 + + - name: "6.1.1 | L2 | AUDIT | Audit system file permissions | Message out alert for package descrepancies" + debug: + msg: | + "Warning! You have some package descrepancies issues. + The file list can be found in {{ rhel7cis_rpm_audit_file }}" + changed_when: rhel7cis_6_1_1_packages_rpm.stdout|length > 0 + when: rhel7cis_6_1_1_packages_rpm.stdout|length > 0 + + - name: "6.1.1 | L2 | AUDIT | Audit system file permissions | Message out no package descrepancies" + debug: + msg: "Good News! There are no package descrepancies" + when: rhel7cis_6_1_1_packages_rpm.stdout|length == 0 + when: + - rhel7cis_rule_6_1_1 + tags: + - level2 + - patch + - rule_6.1.1 + +- name: "6.1.2 | L1 | PATCH | Ensure permissions on /etc/passwd are configured" + file: + dest: /etc/passwd + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_6_1_2 + tags: + - level1 + - patch + - rule_6.1.2 + +- name: "6.1.3 | L1 | PATCH | Ensure permissions on /etc/passwd- are configured" + file: + dest: /etc/passwd- + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_6_1_3 + tags: + - level1 + - patch + - rule_6.1.3 + +- name: "6.1.4 | L1 | PATCH | Ensure permissions on /etc/shadow are configured" + file: + dest: /etc/shadow + owner: root + group: root + mode: 0000 + when: + - rhel7cis_rule_6_1_4 + tags: + - level1 + - patch + - rule_6.1.4 + +- name: "6.1.5 | L1 | PATCH | Ensure permissions on /etc/shadow- are configured" + file: + dest: /etc/shadow- + owner: root + group: root + mode: 0000 + when: + - rhel7cis_rule_6_1_5 + tags: + - level1 + - patch + - rule_6.1.5 + +- name: "6.1.6 | L1 | PATCH | Ensure permissions on /etc/gshadow- are configured" + file: + dest: /etc/gshadow- + owner: root + group: root + mode: 0000 + when: + - rhel7cis_rule_6_1_6 + tags: + - level1 + - patch + - rule_6.1.6 + +- name: "6.1.7 | L1 | PATCH | Ensure permissions on /etc/gshadow are configured" + file: + dest: /etc/gshadow + owner: root + group: root + mode: 0000 + when: + - rhel7cis_rule_6_1_7 + tags: + - level1 + - patch + - rule_6.1.7 + +- name: "6.1.8 | L1 | PATCH | Ensure permissions on /etc/group are configured" + file: + dest: /etc/group + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_6_1_8 + tags: + - level1 + - patch + - rule_6.1.8 + +- name: "6.1.9 | L1 | PATCH | Ensure permissions on /etc/group- are configured" + file: + dest: /etc/group- + owner: root + group: root + mode: 0644 + when: + - rhel7cis_rule_6_1_9 + tags: + - level1 + - patch + - rule_6.1.9 + +- name: "6.1.10 | L1 | PATCH | Ensure no world writable files exist" + block: + - name: "6.1.10 | L1 | AUDIT | Ensure no world writable files exist | Get list of world-writable files" + shell: df --local -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type f -perm -0002 + failed_when: false + changed_when: false + register: rhel_07_6_1_10_perms_results + + - name: "6.1.10 | L1 | AUDIT | Ensure no world writable files exist | Alert no world-writable files exist" + debug: + msg: "Good news! We have not found any world-writable files on your system" + failed_when: false + changed_when: false + when: + - rhel_07_6_1_10_perms_results.stdout is not defined + + - name: "6.1.10 | L1 | PATCH | Ensure no world writable files exist | Adjust world-writable files if they exist (Configurable)" + file: + path: '{{ item }}' + mode: o-w + state: touch + with_items: "{{ rhel_07_6_1_10_perms_results.stdout_lines }}" + when: + - rhel_07_6_1_10_perms_results.stdout_lines is defined + - rhel7cis_no_world_write_adjust + when: + - rhel7cis_rule_6_1_10 + tags: + - level1 + - patch + - rule_6.1.10 + +- name: "6.1.11 | L1 | PATCH | Ensure no unowned files or directories exist" + block: + - name: "6.1.11 | L1 | AUDIT | Ensure no unowned files or directories exist | Finding all unowned files or directories" + command: find "{{ item.mount }}" -xdev -nouser + check_mode: false + register: rhel_07_6_1_11_audit + failed_when: false + changed_when: false + when: item['device'].startswith('/dev') and not 'bind' in item['options'] + with_items: "{{ ansible_mounts }}" + loop_control: + label: "{{ item.mount }}" + + - name: "6.1.11 | L1 | AUDIT | Ensure no unowned files or directories exist | Displaying any unowned files or directories" + debug: + msg: "Warning - Manual intervention is required -- missing owner on items in {{ item.item.mount }}: {{ item.stdout_lines | join(', ') }}" + when: + - item.stdout_lines is defined + - item.stdout_lines | length > 0 + with_items: "{{ rhel_07_6_1_11_audit.results }}" + when: + - rhel7cis_rule_6_1_11 + tags: + - level1 + - patch + - rule_6.1.11 + +- name: "6.1.12 | L1 | PATCH | Ensure no ungrouped files or directories exist" + block: + - name: "6.1.12 | L1 | AUDIT | Ensure no ungrouped files or directories exist | Finding all ungrouped files or directories" + command: find "{{ item.mount }}" -xdev -nogroup + check_mode: false + register: rhel_07_6_1_12_audit + failed_when: false + changed_when: false + when: item['device'].startswith('/dev') and not 'bind' in item['options'] + with_items: "{{ ansible_mounts }}" + loop_control: + label: "{{ item.mount }}" + + - name: "6.1.12 | L1 | AUDIT | Ensure no ungrouped files or directories exist | Displaying all ungrouped files or directories" + debug: + msg: "Warning - Manual intervention is required -- missing group on items in {{ item }}!" + changed_when: item | length > 0 + with_items: "{{ rhel_07_6_1_12_audit.results |map(attribute='stdout_lines') }}" + when: + - rhel7cis_rule_6_1_12 + tags: + - level1 + - patch + - rule_6.1.12 + +- name: "6.1.13 | L1 | PATCH | Audit SUID executables" + block: + - name: "6.1.13 | L1 | AUDIT | Audit SUID executables | Find all SUID executables" + shell: df {{ item.mount }} -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type f -perm -4000 + failed_when: false + changed_when: false + register: rhel_07_6_1_13_perms_results + with_items: "{{ ansible_mounts }}" + loop_control: + label: "{{ item.mount }}" + + - name: "6.1.13 | L1 | AUDIT | Audit SUID executables | Alert no SUID executables exist" + debug: + msg: "Good news! We have not found any SUID executable files on your system" + failed_when: false + changed_when: false + when: + - rhel_07_6_1_13_perms_results.stdout is not defined + + - name: "6.1.13 | L1 | AUDIT | Audit SUID executables | Alert SUID executables exist" + debug: + msg: "Warning - Manual intervention is required -- SUID set on items in {{ item.item.mount }}: {{ item.stout_lines | join(', ') }}" + with_items: "{{ rhel_07_6_1_13_perms_results.stdout_lines }}" + when: + - rhel_07_6_1_13_perms_results.stdout is defined + when: + - rhel7cis_rule_6_1_13 + tags: + - level1 + - patch + - rule_6.1.13 + +- name: "6.1.14 | L1 | PATCH | Audit SGID executables" + block: + - name: "6.1.14 | L1 | AUDIT | Audit SGID executables | Find all SGID executables" + shell: df {{ item.mount }} -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type f -perm -2000 + failed_when: false + changed_when: false + register: rhel_07_6_1_14_perms_results + with_items: "{{ ansible_mounts }}" + loop_control: + label: "{{ item.mount }}" + + - name: "6.1.14 | L1 | AUDIT | Audit SGID executables | Alert no SGID executables exist" + debug: + msg: "Good news! We have not found any SGID executable files on your system" + failed_when: false + changed_when: false + when: + - rhel_07_6_1_14_perms_results.stdout is not defined + + - name: "6.1.14 | L1 | AUDIT | Audit SGID executables | Alert SGID executables exist" + debug: + msg: "Warning - Manual intervention is required -- SGID set on items in {{ item.item.mount }}: {{ item.stout_lines | join(', ') }}" + with_items: "{{ rhel_07_6_1_14_perms_results.stdout_lines }}" + when: + - rhel_07_6_1_14_perms_results.stdout is defined + when: + - rhel7cis_rule_6_1_14 + tags: + - level1 + - patch + - rule_6.1.14 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/cis_6.2.x.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/cis_6.2.x.yml new file mode 100644 index 0000000..4462d16 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/cis_6.2.x.yml @@ -0,0 +1,426 @@ +--- + +- name: "6.2.1 | L1 | PATCH | Ensure accounts in /etc/passwd use shadow passwords" + block: + - name: "6.2.1 | L1 | PATCH | Ensure accounts in /etc/passwd use shadow passwords" + shell: sed -e 's/^\([a-zA-Z0-9_]*\):[^:]*:/\1:x:/' -i /etc/passwd + loop: "{{ rhel7cis_passwd | selectattr('password', '!=', 'x') | list }}" + args: + warn: false + register: rhel7_6_2_1_shadow + when: + - rhel7cis_passwd | selectattr('password', '!=', 'x') + + - name: "6.2.1 | L1 | PATCH | Ensure accounts in /etc/passwd use shadow passwords | Good News" + debug: + msg: "Good News!! No Unshadowed passwords have been found" + when: rhel7_6_2_1_shadow is not changed + when: + - rhel7cis_rule_6_2_1 + tags: + - level1 + - patch + - rule_6.2.1 + +- name: "6.2.2 | L1 | PATCH | Ensure /etc/shadow password fields are not empty" + command: passwd -l {{ item }} + changed_when: false + failed_when: false + loop: "{{ empty_password_accounts.stdout_lines }}" + when: + - empty_password_accounts.rc + - rhel7cis_rule_6_2_2 + tags: + - level1 + - patch + - rule_6.2.2 + +- name: "6.2.3 | L1 | PATCH | Ensure all groups in /etc/passwd exist in /etc/group" + block: + - name: "6.2.3 | L1 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Check /etc/passwd entries" + shell: pwck -r | grep 'no group' | awk '{ gsub("[:\47]",""); print $2}' + changed_when: false + failed_when: false + check_mode: false + register: passwd_gid_check + + - name: "6.2.3 | L1 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Print message that all groups match between passwd and group files" + debug: + msg: "Good News! There are no users that have non-existent GUIDs (Groups)" + when: passwd_gid_check.stdout is not defined + + - name: "6.2.3 | L1 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Print warning about users with invalid GIDs missing GID entries in /etc/group" + debug: + msg: "WARNING: The following users have non-existent GIDs (Groups): {{ passwd_gid_check.stdout_lines | join (', ') }}" + changed_when: false + when: passwd_gid_check.stdout is defined + when: + - rhel7cis_rule_6_2_3 + tags: + - level1 + - patch + - rule_6.2.3 + +- name: "6.2.4 | L1 | AUDIT | Ensure shadow group is empty" + block: + - name: "6.2.4 | L1 | AUDIT | Ensure shadow group is empty" + shell: "grep ^shadow:[^:]*:[^:]*:[^:]+ /etc/group" + changed_when: false + failed_when: false + register: users_in_shadow_group + + - name: "6.2.4 | L1 | AUDIT | Ensure shadow group is empty | Print no user in shadow group" + debug: + msg: "Good News! There are no users in the shadow group" + when: users_is_shadow_group.stdout is undefined + + - name: "6.2.4 L1 | AUDIT | Ensure shadow group is empty | Print no user in shadow group" + debug: + msg: "Warning: The following users are in the shadow group: {{ users_is_shadow_group.stdout_lines }}" + when: users_is_shadow_group.stdout is defined + when: + - rhel7cis_rule_6_2_4 + tags: + - level1 + - patch + - rule_6.2.4 + +- name: "6.2.5 | L1 | PATCH | Ensure no duplicate user names exist" + block: + - name: "6.2.5 | L1 | AUDIT | Ensure no duplicate user names exist | Check for duplicate User Names" + shell: "pwck -r | awk -F: '{if ($1 in users) print $1 ; else users[$1]}' /etc/passwd" + changed_when: false + failed_when: false + register: user_username_check + + - name: "6.2.5 | L1 | AUDIT | Ensure no duplicate user names exist | Print message that no duplicate user names exist" + debug: + msg: "Good News! There are no duplicate user names in the system" + when: user_username_check.stdout is not defined + + - name: "6.2.5 | L1 | AUDIT | Ensure no duplicate user names exist | Print warning about users with duplicate User Names" + debug: + msg: "Warning: The following user names are duplicates: {{ user_username_check.stdout_lines }}" + when: user_username_check.stdout is defined + when: + - rhel7cis_rule_6_2_5 + tags: + - level1 + - patch + - rule_6.2.5 + +- name: "6.2.6 | L1 | PATCH | Ensure no duplicate group names exist" + block: + - name: "6.2.6 | L1 | AUDIT | Ensure no duplicate group names exist | Check for duplicate group names" + shell: 'getent passwd | cut -d: -f1 | sort -n | uniq -d' + changed_when: false + failed_when: false + check_mode: false + register: group_group_check + + - name: "6.2.6 | L1 | AUDIT | Ensure no duplicate group names exist | Print message that no duplicate groups exist" + debug: + msg: "Good News! There are no duplicate group names in the system" + when: group_group_check.stdout is defined + + - name: "6.2.6 | L1 | AUDIT | Ensure no duplicate group names exist | Print warning about users with duplicate group names" + debug: + msg: "Warning: The following group names are duplicates: {{ group_group_check.stdout_lines }}" + when: group_group_check.stdout is not defined + when: + - rhel7cis_rule_6_2_6 + tags: + - level1 + - patch + - rule_6.2.6 + +- name: "6.2.7 | L1 | PATCH | Ensure no duplicate UIDs exist" + block: + - name: "6.2.7 | L1 | AUDIT | Ensure no duplicate UIDs exist | Check for duplicate UIDs" + shell: "pwck -r | awk -F: '{if ($3 in uid) print $1 ; else uid[$3]}' /etc/passwd" + changed_when: false + failed_when: false + register: user_uid_check + + - name: "6.2.7 | L1 | AUDIT | Ensure no duplicate UIDs exist | Print message that no duplicate UIDs exist" + debug: + msg: "Good News! There are no duplicate UID's in the system" + when: user_uid_check.stdout is not defined + + - name: "6.2.7 | L1 | AUDIT | Ensure no duplicate UIDs exist | Print warning about users with duplicate UIDs" + debug: + msg: "Warning: The following users have UIDs that are duplicates: {{ user_uid_check.stdout_lines }}" + when: user_uid_check.stdout is defined + when: + - rhel7cis_rule_6_2_7 + tags: + - level1 + - patch + - rule_6.2.7 + + +- name: "6.2.8 | L1 | PATCH | Ensure no duplicate GIDs exist" + block: + - name: "6.2.8 | L1 | AUDIT | Ensure no duplicate GIDs exist | Check for duplicate GIDs" + shell: "pwck -r | awk -F: '{if ($3 in users) print $1 ; else users[$3]}' /etc/group" + changed_when: false + failed_when: false + register: user_user_check + + - name: "6.2.8 | L1 | AUDIT | Ensure no duplicate GIDs exist | Print message that no duplicate GID's exist" + debug: + msg: "Good News! There are no duplicate GIDs in the system" + when: user_user_check.stdout is not defined + + - name: "6.2.8 | L1 | AUDIT | Ensure no duplicate GIDs exist | Print warning about users with duplicate GIDs" + debug: + msg: "Warning: The following groups have duplicate GIDs: {{ user_user_check.stdout_lines }}" + when: user_user_check.stdout is defined + when: + - rhel7cis_rule_6_2_8 + tags: + - level1 + - patch + - rule_6.2.8 + +- name: "6.2.9 | L1 | PATCH | Ensure root is the only UID 0 account" + command: passwd -l {{ item }} + changed_when: false + failed_when: false + loop: "{{ uid_zero_accounts_except_root.stdout_lines }}" + when: + - uid_zero_accounts_except_root.rc + - rhel7cis_rule_6_2_9 + tags: + - level1 + - patch + - rule_6.2.9 + +- name: "6.2.10 | L1 | PATCH | Ensure root PATH Integrity" + block: + - name: "6.2.10 | L1 | AUDIT | Ensure root PATH Integrity | Determine empty value" + shell: 'echo $PATH | grep ::' + changed_when: false + failed_when: path_colon.rc == 0 + check_mode: false + register: path_colon + + - name: "6.2.10 | L1 | AUDIT | Ensure root PATH Integrity | Determin colon end" + shell: 'echo $PATH | grep :$' + changed_when: false + failed_when: path_colon_end.rc == 0 + check_mode: false + register: path_colon_end + + - name: "6.2.10 | L1 | AUDIT | Ensure root PATH Integrity | Determine dot in path" + shell: "/bin/bash --login -c 'env | grep ^PATH=' | grep ^PATH | sed -e 's/PATH=//' -e 's/::/:/' -e 's/:$//' -e 's/:/\\n/g'" + changed_when: false + failed_when: '"." in dot_in_path.stdout_lines' + check_mode: false + register: dot_in_path + + - name: "6.2.10 | L1 | AUDIT | Ensure root PATH Integrity | Alert on empty value, colon end, and dot in path" + debug: + msg: + - "The following paths have an empty value: {{ path_colon.stdout_lines }}" + - "The following paths have colon end: {{ path_colon_end.stdout_lines }}" + - "The following paths have a dot in the path: {{ dot_in_path.stdout_lines }}" + + - name: "6.2.10 | L1 | PATCH | Ensure root PATH Integrity | Determine rights and owner" + file: > + path='{{ item }}' + follow=yes + state=directory + owner=root + mode='o-w,g-w' + loop: "{{ dot_in_path.stdout_lines }}" + when: + - rhel7cis_rule_6_2_10 + tags: + - level1 + - patch + - rule_6.2.10 + +- name: "6.2.11 | L1 | PATCH | Ensure all users' home directories exist" + block: + - name: "6.2.11 | L1 | AUDIT | Ensure all users' home directories exist | Find users missing home directories" + shell: set -o pipefail ; pwck -r | grep -P {{ ld_regex | quote }} + changed_when: rhel_07_6_2_11_audit | length > 0 + # failed_when: 0: success, 1: no grep match, 2: pwck found something + failed_when: rhel7cis_users_missing_home.rc not in [0,1,2] + check_mode: false + register: rhel7cis_users_missing_home + + - name: "6.2.11 | L1 | AUDIT | Ensure all users' home directories exist | set fact missinghome dirs" + set_fact: + missing_home_dirs: "{{ rhel_07_6_2_11_audit | map(attribute='id') | list }}" + + - name: "6.2.11 | L1 | PATCH | Ensure all users' home directories exist | create missing home dirs" + shell: "mkhomedir_helper {{ item }}" + with_items: + - "{{ missing_home_dirs }}" + when: rhel7cis_users_missing_home is changed + + - name: "6.2.11 | L1 | Audit| Ensure all users' home directories exist | Warning" + debug: + msg: "WARNING! {{ item }} user home directory has been created please ensure any SELINUX settings are applied" + with_items: + - "{{ missing_home_dirs }}" + when: rhel7cis_users_missing_home is changed + + vars: + ld_regex: >- + ^user '(?P.*)': directory '(?P.*)' does not exist$ + ld_users: "{{ rhel7cis_users_missing_home.stdout_lines | map('regex_replace', ld_regex, '\\g') | list }}" + rhel_07_6_2_11_audit: "{{ rhel7cis_passwd | selectattr('uid', '>=', 1000) | selectattr('id', 'in', ld_users) | list }}" + when: + - rhel7cis_rule_6_2_11 + tags: + - level1 + - patch + - rule_6.2.11 + +- name: "6.2.12 | L1 | PATCH | Ensure users own their home directories" + file: + path: "{{ item.dir }}" + owner: "{{ item.id }}" + state: directory + with_items: "{{ rhel7cis_passwd | selectattr('uid', '>=', 1000) | selectattr('uid', '!=', 65534) | list }}" + loop_control: + label: "{{ rhel7cis_passwd_label }}" + when: + - rhel7cis_rule_6_2_12 + tags: + - skip_ansible_lint + - level1 + - patch + - rule_6.2.12 + +- name: "6.2.13 | L1 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + block: + - name: "6.2.13 | L1 | AUDIT | Ensure users' home directories permissions are 750 or more restrictive" + stat: + path: "{{ item }}" + with_items: "{{ rhel7cis_passwd | selectattr('uid', '>=', 1000) | selectattr('uid', '!=', 65534) | map(attribute='dir') | list }}" + register: rhel_07_6_2_13_audit + + - name: "6.2.13 | L1 | AUDIT | Ensure users' home directories permissions are 750 or more restrictive" + command: find -H {{ item.0 | quote }} -not -type l -perm /027 + check_mode: false + register: rhel_07_6_2_13_patch_audit + changed_when: rhel_07_6_2_13_patch_audit.stdout != "" + when: + - ansible_check_mode + - item.1.exists + with_together: + - "{{ rhel_07_6_2_13_audit.results | map(attribute='item') | list }}" + - "{{ rhel_07_6_2_13_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + - name: "6.2.13 | L1 | AUDIT | Ensure users' home directories permissions are 750 or more restrictive" + file: + path: "{{ item.0 }}" + recurse: yes + mode: a-st,g-w,o-rwx + register: rhel_07_6_2_13_patch + when: + - not ansible_check_mode + - item.1.exists + with_together: + - "{{ rhel_07_6_2_13_audit.results | map(attribute='item') | list }}" + - "{{ rhel_07_6_2_13_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + # set default ACLs so the homedir has an effective umask of 0027 + - name: "6.2.13 | L1 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + acl: + path: "{{ item.0 }}" + default: yes + state: present + recursive: yes + etype: "{{ item.1.etype }}" + permissions: "{{ item.1.mode }}" + when: not rhel7cis_system_is_container + with_nested: + - "{{ (ansible_check_mode | ternary(rhel_07_6_2_13_patch_audit, rhel_07_6_2_13_patch)).results | + rejectattr('skipped', 'defined') | map(attribute='item') | map('first') | list }}" + - + - etype: group + mode: rx + - etype: other + mode: '0' + when: + - rhel7cis_rule_6_2_13 + tags: + - level1 + - patch + - rule_6.2.13 + +- name: "6.2.14 | L1 | PATCH | Ensure users' dot files are not group or world-writable" + block: + - name: "6.2.14 | L1 | AUDIT | Ensure users' dot files are not group or world-writable | Check for files" + shell: find /home/ -name "\.*" -perm /g+w,o+w + changed_when: false + failed_when: false + register: rhel7cis_6_2_14_audit + + - name: "6.2.14 | L1 | AUDIT | Ensure users' dot files are not group or world-writable | Alert on files found" + debug: + msg: "Good news! We have not found any group or world-writable dot files on your sytem" + failed_when: false + changed_when: false + when: + - rhel7cis_6_2_14_audit.stdout is not defined + + - name: "6.2.14 | L1 | PATCH | Ensure users' dot files are not group or world-writable | Changes files if configured" + file: + path: '{{ item }}' + mode: go-w + with_items: "{{ rhel7cis_6_2_14_audit.stdout_lines }}" + when: + - rhel7cis_6_2_14_audit.stdout is defined + - rhel7cis_dotperm_ansiblemanaged + when: + - rhel7cis_rule_6_2_14 + tags: + - level1 + - patch + - rule_6.2.14 + +- name: "6.2.15 | L1 | PATCH | Ensure no users have .forward files" + file: + state: absent + dest: "~{{ item }}/.forward" + loop: "{{ users.stdout_lines }}" + when: + - rhel7cis_rule_6_2_15 + tags: + - level1 + - patch + - rule_6.2.15 + +- name: "6.2.16 | L1 | PATCH | Ensure no users have .netrc files" + file: + state: absent + dest: "~{{ item }}/.netrc" + with_items: "{{ users.stdout_lines }}" + when: + - rhel7cis_rule_6_2_16 + tags: + - level1 + - patch + - rule_6.2.16 + +- name: "6.2.17 | L1 | PATCH | Ensure no users have .rhosts files" + file: + state: absent + dest: "~{{ item }}/.rhosts" + with_items: "{{ users.stdout_lines }}" + when: + - rhel7cis_rule_6_2_17 + tags: + - level1 + - patch + - rule_6.2.17 \ No newline at end of file diff --git a/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/main.yml new file mode 100644 index 0000000..bf6943a --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/tasks/section_6/main.yml @@ -0,0 +1,7 @@ +--- + +- name: "SECTION | 6.1 | System File Permissions" + include: cis_6.1.x.yml + +- name: "SECTION | 6.2 | User and Group Settings" + include: cis_6.2.x.yml diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/ansible_vars_goss.yml.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/ansible_vars_goss.yml.j2 new file mode 100644 index 0000000..8b775d6 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/ansible_vars_goss.yml.j2 @@ -0,0 +1,455 @@ +--- +## metadata for Audit benchmark +benchmark_version: '3.1.1' + +# Set if genuine RHEL (subscription manager check) not for derivatives e.g. CentOS +is_redhat_os: {% if ansible_distribution == "RedHat" %}true{% else %}false{% endif %} + +# timeout for each command to run where set - default = 10seconds/10000ms +timeout_ms: {{ audit_cmd_timeout }} + +# Taken from LE rhel7-cis +rhel7cis_notauto: {{ rhel7cis_notauto }} +rhel7cis_section1: {{ rhel7cis_section1 }} +rhel7cis_section2: {{ rhel7cis_section2 }} +rhel7cis_section3: {{ rhel7cis_section3 }} +rhel7cis_section4: {{ rhel7cis_section4 }} +rhel7cis_section5: {{ rhel7cis_section5 }} +rhel7cis_section6: {{ rhel7cis_section6 }} + +rhel7cis_selinux_disable: {{ rhel7cis_selinux_disable }} +rhel7cis_selinux_state: {{ rhel7cis_selinux_state }} + + +rhel7cis_level1: true +rhel7cis_level2: true +# to enable rules that may have IO impact on a system e.g. full filesystem scans or CPU heavy +run_heavy_tests: true + +rhel7cis_legacy_boot: {{ rhel7cis_legacy_boot }} + +# These variables correspond with the CIS rule IDs or paragraph numbers defined in +# the CIS benchmark documents. +# PLEASE NOTE: These work in coordination with the section # group variables and tags. +# You must enable an entire section in order for the variables below to take effect. +# Section 1 rules +rhel7cis_rule_1_1_1_1: {{ rhel7cis_rule_1_1_1_1 }} +rhel7cis_rule_1_1_1_2: {{ rhel7cis_rule_1_1_1_2 }} +rhel7cis_rule_1_1_1_3: {{ rhel7cis_rule_1_1_1_3 }} +rhel7cis_rule_1_1_2: {{ rhel7cis_rule_1_1_2 }} +rhel7cis_rule_1_1_3: {{ rhel7cis_rule_1_1_3 }} +rhel7cis_rule_1_1_4: {{ rhel7cis_rule_1_1_4 }} +rhel7cis_rule_1_1_5: {{ rhel7cis_rule_1_1_5 }} +rhel7cis_rule_1_1_6: {{ rhel7cis_rule_1_1_6 }} +rhel7cis_rule_1_1_7: {{ rhel7cis_rule_1_1_7 }} +rhel7cis_rule_1_1_8: {{ rhel7cis_rule_1_1_8 }} +rhel7cis_rule_1_1_9: {{ rhel7cis_rule_1_1_9 }} +rhel7cis_rule_1_1_10: {{ rhel7cis_rule_1_1_10 }} +rhel7cis_rule_1_1_11: {{ rhel7cis_rule_1_1_11 }} +rhel7cis_rule_1_1_12: {{ rhel7cis_rule_1_1_12 }} +rhel7cis_rule_1_1_13: {{ rhel7cis_rule_1_1_13 }} +rhel7cis_rule_1_1_14: {{ rhel7cis_rule_1_1_14 }} +rhel7cis_rule_1_1_15: {{ rhel7cis_rule_1_1_15 }} +rhel7cis_rule_1_1_16: {{ rhel7cis_rule_1_1_16 }} +rhel7cis_rule_1_1_17: {{ rhel7cis_rule_1_1_17 }} +rhel7cis_rule_1_1_18: {{ rhel7cis_rule_1_1_18 }} +rhel7cis_rule_1_1_19: {{ rhel7cis_rule_1_1_19 }} +rhel7cis_rule_1_1_20: {{ rhel7cis_rule_1_1_20 }} +rhel7cis_rule_1_1_21: {{ rhel7cis_rule_1_1_21 }} +rhel7cis_rule_1_1_22: {{ rhel7cis_rule_1_1_22 }} +rhel7cis_rule_1_1_23: {{ rhel7cis_rule_1_1_23 }} +rhel7cis_rule_1_1_24: {{ rhel7cis_rule_1_1_24 }} +rhel7cis_rule_1_2_1: {{ rhel7cis_rule_1_2_1 }} +rhel7cis_rule_1_2_2: {{ rhel7cis_rule_1_2_2 }} +rhel7cis_rule_1_2_3: {{ rhel7cis_rule_1_2_3 }} +rhel7cis_rule_1_2_4: {{ rhel7cis_rule_1_2_4 }} +rhel7cis_rule_1_2_5: {{ rhel7cis_rule_1_2_5 }} +rhel7cis_rule_1_3_1: {{ rhel7cis_rule_1_3_1 }} +rhel7cis_rule_1_3_2: {{ rhel7cis_rule_1_3_2 }} +rhel7cis_rule_1_4_1: {{ rhel7cis_rule_1_4_1 }} +rhel7cis_rule_1_4_2: {{ rhel7cis_rule_1_4_2 }} +rhel7cis_rule_1_4_3: {{ rhel7cis_rule_1_4_3 }} +rhel7cis_rule_1_5_1: {{ rhel7cis_rule_1_5_1 }} +rhel7cis_rule_1_5_2: {{ rhel7cis_rule_1_5_2 }} +rhel7cis_rule_1_5_3: {{ rhel7cis_rule_1_5_3 }} +rhel7cis_rule_1_5_4: {{ rhel7cis_rule_1_5_4 }} +rhel7cis_rule_1_6_1_1: {{ rhel7cis_rule_1_6_1_1 }} +rhel7cis_rule_1_6_1_2: {{ rhel7cis_rule_1_6_1_2 }} +rhel7cis_rule_1_6_1_3: {{ rhel7cis_rule_1_6_1_3 }} +rhel7cis_rule_1_6_1_4: {{ rhel7cis_rule_1_6_1_4 }} +rhel7cis_rule_1_6_1_5: {{ rhel7cis_rule_1_6_1_5 }} +rhel7cis_rule_1_6_1_6: {{ rhel7cis_rule_1_6_1_6 }} +rhel7cis_rule_1_6_1_7: {{ rhel7cis_rule_1_6_1_7 }} +rhel7cis_rule_1_6_1_8: {{ rhel7cis_rule_1_6_1_8 }} +rhel7cis_rule_1_7_1: {{ rhel7cis_rule_1_7_1 }} +rhel7cis_rule_1_7_2: {{ rhel7cis_rule_1_7_2 }} +rhel7cis_rule_1_7_3: {{ rhel7cis_rule_1_7_3 }} +rhel7cis_rule_1_7_4: {{ rhel7cis_rule_1_7_4 }} +rhel7cis_rule_1_7_5: {{ rhel7cis_rule_1_7_5 }} +rhel7cis_rule_1_7_6: {{ rhel7cis_rule_1_7_6 }} +rhel7cis_rule_1_8_1: {{ rhel7cis_rule_1_8_1 }} +rhel7cis_rule_1_8_2: {{ rhel7cis_rule_1_8_2 }} +rhel7cis_rule_1_8_3: {{ rhel7cis_rule_1_8_3 }} +rhel7cis_rule_1_8_4: {{ rhel7cis_rule_1_8_4 }} +rhel7cis_rule_1_9: {{ rhel7cis_rule_1_9 }} + +# section 2 rules +rhel7cis_rule_2_1_1: {{ rhel7cis_rule_2_1_1 }} +rhel7cis_rule_2_2_1_1: {{ rhel7cis_rule_2_2_1_1 }} +rhel7cis_rule_2_2_1_2: {{ rhel7cis_rule_2_2_1_2 }} +rhel7cis_rule_2_2_1_3: {{ rhel7cis_rule_2_2_1_3 }} +rhel7cis_rule_2_2_2: {{ rhel7cis_rule_2_2_2 }} +rhel7cis_rule_2_2_3: {{ rhel7cis_rule_2_2_3 }} +rhel7cis_rule_2_2_4: {{ rhel7cis_rule_2_2_4 }} +rhel7cis_rule_2_2_5: {{ rhel7cis_rule_2_2_5 }} +rhel7cis_rule_2_2_6: {{ rhel7cis_rule_2_2_6 }} +rhel7cis_rule_2_2_7: {{ rhel7cis_rule_2_2_7 }} +rhel7cis_rule_2_2_8: {{ rhel7cis_rule_2_2_8 }} +rhel7cis_rule_2_2_9: {{ rhel7cis_rule_2_2_9 }} +rhel7cis_rule_2_2_10: {{ rhel7cis_rule_2_2_10 }} +rhel7cis_rule_2_2_11: {{ rhel7cis_rule_2_2_11 }} +rhel7cis_rule_2_2_12: {{ rhel7cis_rule_2_2_12 }} +rhel7cis_rule_2_2_13: {{ rhel7cis_rule_2_2_13 }} +rhel7cis_rule_2_2_14: {{ rhel7cis_rule_2_2_14 }} +rhel7cis_rule_2_2_15: {{ rhel7cis_rule_2_2_15 }} +rhel7cis_rule_2_2_16: {{ rhel7cis_rule_2_2_16 }} +rhel7cis_rule_2_2_17: {{ rhel7cis_rule_2_2_17 }} +rhel7cis_rule_2_2_18: {{ rhel7cis_rule_2_2_18 }} +rhel7cis_rule_2_2_19: {{ rhel7cis_rule_2_2_19 }} +rhel7cis_rule_2_3_1: {{ rhel7cis_rule_2_3_1 }} +rhel7cis_rule_2_3_2: {{ rhel7cis_rule_2_3_2 }} +rhel7cis_rule_2_3_3: {{ rhel7cis_rule_2_3_3 }} +rhel7cis_rule_2_3_4: {{ rhel7cis_rule_2_3_4 }} +rhel7cis_rule_2_3_5: {{ rhel7cis_rule_2_3_5 }} + +# Section 3 rules +rhel7cis_rule_3_1_1: {{ rhel7cis_rule_3_1_1 }} +rhel7cis_rule_3_1_2: {{ rhel7cis_rule_3_1_2 }} +rhel7cis_rule_3_2_1: {{ rhel7cis_rule_3_2_1 }} +rhel7cis_rule_3_2_2: {{ rhel7cis_rule_3_2_2 }} +rhel7cis_rule_3_3_1: {{ rhel7cis_rule_3_3_1 }} +rhel7cis_rule_3_3_2: {{ rhel7cis_rule_3_3_2 }} +rhel7cis_rule_3_3_3: {{ rhel7cis_rule_3_3_3 }} +rhel7cis_rule_3_3_4: {{ rhel7cis_rule_3_3_4 }} +rhel7cis_rule_3_3_5: {{ rhel7cis_rule_3_3_5 }} +rhel7cis_rule_3_3_6: {{ rhel7cis_rule_3_3_6 }} +rhel7cis_rule_3_3_7: {{ rhel7cis_rule_3_3_7 }} +rhel7cis_rule_3_3_8: {{ rhel7cis_rule_3_3_8 }} +rhel7cis_rule_3_3_9: {{ rhel7cis_rule_3_3_9 }} +rhel7cis_rule_3_4_1: {{ rhel7cis_rule_3_4_1 }} +rhel7cis_rule_3_4_2: {{ rhel7cis_rule_3_4_2 }} +rhel7cis_rule_3_5_1_1: {{ rhel7cis_rule_3_5_1_1 }} +rhel7cis_rule_3_5_1_2: {{ rhel7cis_rule_3_5_1_2 }} +rhel7cis_rule_3_5_1_3: {{ rhel7cis_rule_3_5_1_3 }} +rhel7cis_rule_3_5_1_4: {{ rhel7cis_rule_3_5_1_4 }} +rhel7cis_rule_3_5_1_5: {{ rhel7cis_rule_3_5_1_5 }} +rhel7cis_rule_3_5_1_6: {{ rhel7cis_rule_3_5_1_6 }} +rhel7cis_rule_3_5_1_7: {{ rhel7cis_rule_3_5_1_7 }} +rhel7cis_rule_3_5_2_1: {{ rhel7cis_rule_3_5_2_1 }} +rhel7cis_rule_3_5_2_2: {{ rhel7cis_rule_3_5_2_2 }} +rhel7cis_rule_3_5_2_3: {{ rhel7cis_rule_3_5_2_3 }} +rhel7cis_rule_3_5_2_4: {{ rhel7cis_rule_3_5_2_4 }} +rhel7cis_rule_3_5_2_5: {{ rhel7cis_rule_3_5_2_5 }} +rhel7cis_rule_3_5_2_6: {{ rhel7cis_rule_3_5_2_6 }} +rhel7cis_rule_3_5_2_7: {{ rhel7cis_rule_3_5_2_7 }} +rhel7cis_rule_3_5_2_8: {{ rhel7cis_rule_3_5_2_8 }} +rhel7cis_rule_3_5_2_9: {{ rhel7cis_rule_3_5_2_9 }} +rhel7cis_rule_3_5_2_10: {{ rhel7cis_rule_3_5_2_10 }} +rhel7cis_rule_3_5_2_11: {{ rhel7cis_rule_3_5_2_11 }} +rhel7cis_rule_3_5_3_1_1: {{ rhel7cis_rule_3_5_3_1_1 }} +rhel7cis_rule_3_5_3_1_2: {{ rhel7cis_rule_3_5_3_1_2 }} +rhel7cis_rule_3_5_3_1_3: {{ rhel7cis_rule_3_5_3_1_3 }} + +# Section 4 rules +rhel7cis_rule_4_1_1_1: {{ rhel7cis_rule_4_1_1_1 }} +rhel7cis_rule_4_1_1_2: {{ rhel7cis_rule_4_1_1_2 }} +rhel7cis_rule_4_1_1_3: {{ rhel7cis_rule_4_1_1_3 }} +rhel7cis_rule_4_1_2_1: {{ rhel7cis_rule_4_1_2_1 }} +rhel7cis_rule_4_1_2_2: {{ rhel7cis_rule_4_1_2_2 }} +rhel7cis_rule_4_1_2_3: {{ rhel7cis_rule_4_1_2_3 }} +rhel7cis_rule_4_1_2_4: {{ rhel7cis_rule_4_1_2_4 }} +rhel7cis_rule_4_1_3: {{ rhel7cis_rule_4_1_3 }} +rhel7cis_rule_4_1_4: {{ rhel7cis_rule_4_1_4 }} +rhel7cis_rule_4_1_5: {{ rhel7cis_rule_4_1_5 }} +rhel7cis_rule_4_1_6: {{ rhel7cis_rule_4_1_6 }} +rhel7cis_rule_4_1_7: {{ rhel7cis_rule_4_1_7 }} +rhel7cis_rule_4_1_8: {{ rhel7cis_rule_4_1_8 }} +rhel7cis_rule_4_1_9: {{ rhel7cis_rule_4_1_9 }} +rhel7cis_rule_4_1_10: {{ rhel7cis_rule_4_1_10 }} +rhel7cis_rule_4_1_11: {{ rhel7cis_rule_4_1_11 }} +rhel7cis_rule_4_1_12: {{ rhel7cis_rule_4_1_12 }} +rhel7cis_rule_4_1_13: {{ rhel7cis_rule_4_1_13 }} +rhel7cis_rule_4_1_14: {{ rhel7cis_rule_4_1_14 }} +rhel7cis_rule_4_1_15: {{ rhel7cis_rule_4_1_15 }} +rhel7cis_rule_4_1_16: {{ rhel7cis_rule_4_1_16 }} +rhel7cis_rule_4_1_17: {{ rhel7cis_rule_4_1_17 }} +rhel7cis_rule_4_2_1_1: {{ rhel7cis_rule_4_2_1_1 }} +rhel7cis_rule_4_2_1_2: {{ rhel7cis_rule_4_2_1_2 }} +rhel7cis_rule_4_2_1_3: {{ rhel7cis_rule_4_2_1_3 }} +rhel7cis_rule_4_2_1_4: {{ rhel7cis_rule_4_2_1_4 }} +rhel7cis_rule_4_2_1_5: {{ rhel7cis_rule_4_2_1_5 }} +rhel7cis_rule_4_2_2_1: {{ rhel7cis_rule_4_2_2_1 }} +rhel7cis_rule_4_2_2_2: {{ rhel7cis_rule_4_2_2_2 }} +rhel7cis_rule_4_2_2_3: {{ rhel7cis_rule_4_2_2_3 }} +rhel7cis_rule_4_2_3: {{ rhel7cis_rule_4_2_3 }} + +# Section 5 +rhel7cis_rule_5_1_1: {{ rhel7cis_rule_5_1_1 }} +rhel7cis_rule_5_1_2: {{ rhel7cis_rule_5_1_2 }} +rhel7cis_rule_5_1_3: {{ rhel7cis_rule_5_1_3 }} +rhel7cis_rule_5_1_4: {{ rhel7cis_rule_5_1_4 }} +rhel7cis_rule_5_1_5: {{ rhel7cis_rule_5_1_5 }} +rhel7cis_rule_5_1_6: {{ rhel7cis_rule_5_1_6 }} +rhel7cis_rule_5_1_7: {{ rhel7cis_rule_5_1_7 }} +rhel7cis_rule_5_1_8: {{ rhel7cis_rule_5_1_8 }} +rhel7cis_rule_5_1_9: {{ rhel7cis_rule_5_1_9 }} +rhel7cis_rule_5_2_1: {{ rhel7cis_rule_5_2_1 }} +rhel7cis_rule_5_2_2: {{ rhel7cis_rule_5_2_2 }} +rhel7cis_rule_5_2_3: {{ rhel7cis_rule_5_2_3 }} +rhel7cis_rule_5_3_1: {{ rhel7cis_rule_5_3_1 }} +rhel7cis_rule_5_3_2: {{ rhel7cis_rule_5_3_2 }} +rhel7cis_rule_5_3_3: {{ rhel7cis_rule_5_3_3 }} +rhel7cis_rule_5_3_4: {{ rhel7cis_rule_5_3_4 }} +rhel7cis_rule_5_3_5: {{ rhel7cis_rule_5_3_5 }} +rhel7cis_rule_5_3_6: {{ rhel7cis_rule_5_3_6 }} +rhel7cis_rule_5_3_7: {{ rhel7cis_rule_5_3_7 }} +rhel7cis_rule_5_3_8: {{ rhel7cis_rule_5_3_8 }} +rhel7cis_rule_5_3_9: {{ rhel7cis_rule_5_3_9 }} +rhel7cis_rule_5_3_10: {{ rhel7cis_rule_5_3_10 }} +rhel7cis_rule_5_3_11: {{ rhel7cis_rule_5_3_11 }} +rhel7cis_rule_5_3_12: {{ rhel7cis_rule_5_3_12 }} +rhel7cis_rule_5_3_13: {{ rhel7cis_rule_5_3_13 }} +rhel7cis_rule_5_3_14: {{ rhel7cis_rule_5_3_14 }} +rhel7cis_rule_5_3_15: {{ rhel7cis_rule_5_3_15 }} +rhel7cis_rule_5_3_16: {{ rhel7cis_rule_5_3_16 }} +rhel7cis_rule_5_3_17: {{ rhel7cis_rule_5_3_17 }} +rhel7cis_rule_5_3_18: {{ rhel7cis_rule_5_3_18 }} +rhel7cis_rule_5_3_19: {{ rhel7cis_rule_5_3_19 }} +rhel7cis_rule_5_3_20: {{ rhel7cis_rule_5_3_20 }} +rhel7cis_rule_5_3_21: {{ rhel7cis_rule_5_3_21 }} +rhel7cis_rule_5_3_22: {{ rhel7cis_rule_5_3_22 }} +rhel7cis_rule_5_4_1: {{ rhel7cis_rule_5_4_1 }} +rhel7cis_rule_5_4_2: {{ rhel7cis_rule_5_4_2 }} +rhel7cis_rule_5_4_3: {{ rhel7cis_rule_5_4_3 }} +rhel7cis_rule_5_4_4: {{ rhel7cis_rule_5_4_4 }} +rhel7cis_rule_5_5_1_1: {{ rhel7cis_rule_5_5_1_1 }} +rhel7cis_rule_5_5_1_2: {{ rhel7cis_rule_5_5_1_2 }} +rhel7cis_rule_5_5_1_3: {{ rhel7cis_rule_5_5_1_3 }} +rhel7cis_rule_5_5_1_4: {{ rhel7cis_rule_5_5_1_4 }} +rhel7cis_rule_5_5_1_5: {{ rhel7cis_rule_5_5_1_5 }} +rhel7cis_rule_5_5_2: {{ rhel7cis_rule_5_5_2 }} +rhel7cis_rule_5_5_3: {{ rhel7cis_rule_5_5_3 }} +rhel7cis_rule_5_5_4: {{ rhel7cis_rule_5_5_4 }} +rhel7cis_rule_5_5_5: {{ rhel7cis_rule_5_5_5 }} +rhel7cis_rule_5_7: {{ rhel7cis_rule_5_7 }} + +# Section 6 +rhel7cis_rule_6_1_1: {{ rhel7cis_rule_6_1_1 }} +rhel7cis_rule_6_1_2: {{ rhel7cis_rule_6_1_2 }} +rhel7cis_rule_6_1_3: {{ rhel7cis_rule_6_1_3 }} +rhel7cis_rule_6_1_4: {{ rhel7cis_rule_6_1_4 }} +rhel7cis_rule_6_1_5: {{ rhel7cis_rule_6_1_5 }} +rhel7cis_rule_6_1_6: {{ rhel7cis_rule_6_1_6 }} +rhel7cis_rule_6_1_7: {{ rhel7cis_rule_6_1_7 }} +rhel7cis_rule_6_1_8: {{ rhel7cis_rule_6_1_8 }} +rhel7cis_rule_6_1_9: {{ rhel7cis_rule_6_1_9 }} +rhel7cis_rule_6_1_10: {{ rhel7cis_rule_6_1_10 }} +rhel7cis_rule_6_1_11: {{ rhel7cis_rule_6_1_11 }} +rhel7cis_rule_6_1_12: {{ rhel7cis_rule_6_1_12 }} +rhel7cis_rule_6_1_13: {{ rhel7cis_rule_6_1_13 }} +rhel7cis_rule_6_1_14: {{ rhel7cis_rule_6_1_14 }} + +rhel7cis_rule_6_2_1: {{ rhel7cis_rule_6_2_1 }} +rhel7cis_rule_6_2_2: {{ rhel7cis_rule_6_2_2 }} +rhel7cis_rule_6_2_3: {{ rhel7cis_rule_6_2_3 }} +rhel7cis_rule_6_2_4: {{ rhel7cis_rule_6_2_4 }} +rhel7cis_rule_6_2_5: {{ rhel7cis_rule_6_2_5 }} +rhel7cis_rule_6_2_6: {{ rhel7cis_rule_6_2_6 }} +rhel7cis_rule_6_2_7: {{ rhel7cis_rule_6_2_7 }} +rhel7cis_rule_6_2_8: {{ rhel7cis_rule_6_2_8 }} +rhel7cis_rule_6_2_9: {{ rhel7cis_rule_6_2_9 }} +rhel7cis_rule_6_2_10: {{ rhel7cis_rule_6_2_10 }} +rhel7cis_rule_6_2_11: {{ rhel7cis_rule_6_2_11 }} +rhel7cis_rule_6_2_12: {{ rhel7cis_rule_6_2_12 }} +rhel7cis_rule_6_2_13: {{ rhel7cis_rule_6_2_13 }} +rhel7cis_rule_6_2_14: {{ rhel7cis_rule_6_2_14 }} +rhel7cis_rule_6_2_15: {{ rhel7cis_rule_6_2_15 }} +rhel7cis_rule_6_2_16: {{ rhel7cis_rule_6_2_16 }} +rhel7cis_rule_6_2_17: {{ rhel7cis_rule_6_2_17 }} + + +# Service configuration booleans set true to keep service +rhel7cis_avahi_server: {{ rhel7cis_avahi_server }} +rhel7cis_cups_server: {{ rhel7cis_cups_server }} +rhel7cis_dhcp_server: {{ rhel7cis_dhcp_server }} +rhel7cis_ldap_server: {{ rhel7cis_ldap_server }} +rhel7cis_telnet_server: {{ rhel7cis_telnet_server }} +rhel7cis_nfs_server: {{ rhel7cis_nfs_server }} +rhel7cis_rpc_server: {{ rhel7cis_rpc_server }} +rhel7cis_ntalk_server: {{ rhel7cis_ntalk_server }} +rhel7cis_rsyncd_server: {{ rhel7cis_rsyncd_server }} +rhel7cis_tftp_server: {{ rhel7cis_tftp_server }} +rhel7cis_rsh_server: {{ rhel7cis_rsh_server }} +rhel7cis_nis_server: {{ rhel7cis_nis_server }} +rhel7cis_snmp_server: {{ rhel7cis_snmp_server }} +rhel7cis_squid_server: {{ rhel7cis_squid_server }} +rhel7cis_smb_server: {{ rhel7cis_smb_server }} +rhel7cis_dovecot_server: {{ rhel7cis_dovecot_server }} +rhel7cis_httpd_server: {{ rhel7cis_httpd_server }} +rhel7cis_vsftpd_server: {{ rhel7cis_vsftpd_server }} +rhel7cis_named_server: {{ rhel7cis_named_server }} +rhel7cis_nfs_rpc_server: {{ rhel7cis_nfs_rpc_server }} +rhel7cis_is_mail_server: {{ rhel7cis_is_mail_server }} +rhel7cis_bind: {{ rhel7cis_bind }} +rhel7cis_vsftpd: {{ rhel7cis_vsftpd }} +rhel7cis_httpd: {{ rhel7cis_httpd }} +rhel7cis_dovecot: {{ rhel7cis_dovecot }} +rhel7cis_samba: {{ rhel7cis_samba }} +rhel7cis_squid: {{ rhel7cis_squid }} +rhel7cis_net_snmp: {{ rhel7cis_net_snmp}} +rhel7cis_allow_autofs: {{ rhel7cis_allow_autofs }} + +# client services +rhel7cis_openldap_clients_required: {{ rhel7cis_openldap_clients_required }} +rhel7cis_telnet_required: {{ rhel7cis_telnet_required }} +rhel7cis_talk_required: {{ rhel7cis_talk_required }} +rhel7cis_rsh_required: {{ rhel7cis_rsh_required }} +rhel7cis_ypbind_required: {{ rhel7cis_ypbind_required }} + +# AIDE +rhel7cis_config_aide: {{ rhel7cis_config_aide }} +# aide setup via - cron, timer +rhel7_aide_scan: cron + +# AIDE cron settings +rhel7cis_aide_cron: + cron_user: {{ rhel7cis_aide_cron.cron_user }} + cron_file: '{{ rhel7cis_aide_cron.cron_file }}' + aide_job: ' {{ rhel7cis_aide_cron.aide_job }}' + aide_minute: '{{ rhel7cis_aide_cron.aide_minute }}' + aide_hour: '{{ rhel7cis_aide_cron.aide_hour }}' + aide_day: '{{ rhel7cis_aide_cron.aide_day }}' + aide_month: '{{ rhel7cis_aide_cron.aide_month }}' + aide_weekday: '{{ rhel7cis_aide_cron.aide_weekday }}' + +# 1.4.1 Bootloader password +rhel7cis_bootloader_file: {{ rhel7cis_bootloader_path }}grub.cfg +rhel7cis_bootloader_user: {{ rhel7cis_bootloader_path }}user.cfg +rhel7cis_bootloader_password: {{ rhel7cis_bootloader_password_hash }} +rhel7cis_set_boot_pass: {{ rhel7cis_set_boot_pass }} + +# Warning Banner Content (issue, issue.net, motd) +rhel7cis_warning_banner: {{ rhel7cis_warning_banner }} +# End Banner + +# Set to 'true' if X Windows is needed in your environment +rhel7cis_xwindows_required: {{ rhel7cis_xwindows_required }} + +# Whether or not to run tasks related to auditing/patching the desktop environment +rhel7cis_gui: {{ rhel7cis_gui }} + +# xinetd required +rhel7cis_xinetd_required: {{ rhel7cis_xinetd_required }} + +# IPv6 required +rhel7cis_ipv6_required: {{ rhel7cis_ipv6_required }} + +# System network parameters (host only OR host and router) +rhel7cis_is_router: {{ rhel7cis_is_router }} + +# Time Synchronization +rhel7cis_time_synchronization: {{ rhel7cis_time_synchronization }} + +rhel7cis_firewall: {{ rhel7cis_firewall }} +# rhel7cis_firewall: iptables +rhel7cis_default_firewall_zone: {{ rhel7cis_default_zone }} +rhel7cis_firewall_interface: +- ['ens224'] +- ['ens192'] +rhel7cis_firewall_services: {{ rhel7cis_firewall_services }} + +# Section 4 +# auditd settings +rhel7cis_auditd: + space_left_action: {{ rhel7cis_auditd.space_left_action}} + action_mail_acct: {{ rhel7cis_auditd.action_mail_acct }} + admin_space_left_action: {{ rhel7cis_auditd.admin_space_left_action }} + max_log_file_action: {{ rhel7cis_auditd.max_log_file_action }} + auditd_backlog_limit: {{ rhel7cis_audit_backlog_limit }} + +# syslog +{% if rhel7cis_syslog == "rsyslog" %} +rhel7cis_rsyslog: true +{% endif %} + +# Section 5 +rhel7cis_sshd_limited: false +#Note the following to understand precedence and layout +rhel7cis_sshd_access: +- AllowUser +- AllowGroup +- DenyUser +- DenyGroup + +rhel7cis_ssh_strong_ciphers: Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128- gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr +rhel7cis_ssh_weak_ciphers: +- 3des-cbc +- aes128-cbc +- aes192-cbc +- aes256-cbc +- arcfour +- arcfour128 +- arcfour256 +- blowfish-cbc +- cast128-cbc +- rijndael-cbc@lysator.liu.se + +rhel7cis_ssh_strong_macs: MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2- 512,hmac-sha2-256 +rhel7cis_ssh_weak_macs: +- hmac-md5 +- hmac-md5-96 +- hmac-ripemd160 +- hmac-sha1 +- hmac-sha1-96 +- umac-64@openssh.com +- umac-128@openssh.com +- hmac-md5-etm@openssh.com +- hmac-md5-96-etm@openssh.com +- hmac-ripemd160-etm@openssh.com +- hmac-sha1-etm@openssh.com +- hmac-sha1-96-etm@openssh.com +- umac-64-etm@openssh.com +- umac-128-etm@openssh.com + +rhel7cis_ssh_strong_kex: KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman- group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256 +rhel7cis_ssh_weak_kex: +- diffie-hellman-group1-sha1 +- diffie-hellman-group14-sha1 +- diffie-hellman-group-exchange-sha1 + +rhel7cis_ssh_aliveinterval: "300" +rhel7cis_ssh_countmax: "3" + +rhel7cis_sugroup: {{ rhel7cis_sugroup| default('wheel') }} + + +## PAM +rhel7cis_pam_password: + - minclass = 4 +rhel7cis_pam_passwd_retry: "3" +# faillock or tally2 +rhel7cis_accountlock: faillock + +## note this is to skip tests +skip_rhel7cis_pam_passwd_auth: true +skip_rhel7cis_pam_system_auth: true + +# choose one of below +rhel7cis_pwhistory_so: "14" +rhel7cis_unix_so: false +rhel7cis_passwd_remember: "{{ rhel7cis_pam_faillock.remember }}" + +# logins.def password settings +rhel7cis_pass_max_days: {{ rhel7cis_pass.max_days }} +rhel7cis_pass_min_days: {{ rhel7cis_pass.min_days }} +rhel7cis_pass_warn_age: {{ rhel7cis_pass.warn_age }} diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/99_finalize.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/99_finalize.rules.j2 new file mode 100644 index 0000000..bc95eba --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/99_finalize.rules.j2 @@ -0,0 +1 @@ +-e 2 diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/MAC_policy.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/MAC_policy.rules.j2 new file mode 100644 index 0000000..640c21a --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/MAC_policy.rules.j2 @@ -0,0 +1,2 @@ +-w /etc/selinux/ -p wa -k MAC-policy +-w /usr/share/selinux/ -p wa -k MAC-policy diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/access.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/access.rules.j2 new file mode 100644 index 0000000..d877a3b --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/access.rules.j2 @@ -0,0 +1,4 @@ +-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/actions.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/actions.rules.j2 new file mode 100644 index 0000000..9eafbd3 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/actions.rules.j2 @@ -0,0 +1,2 @@ +-a always,exit -F arch=b64 -C euid!=uid -F euid=0 -F auid>=1000 -F auid!=4294967295 -S execve -k actions +-a always,exit -F arch=b32 -C euid!=uid -F euid=0 -F auid>=1000 -F auid!=4294967295 -S execve -k actions diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/deletion.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/deletion.rules.j2 new file mode 100644 index 0000000..39fedff --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/deletion.rules.j2 @@ -0,0 +1,2 @@ +-a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete +-a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/identity.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/identity.rules.j2 new file mode 100644 index 0000000..358f999 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/identity.rules.j2 @@ -0,0 +1,5 @@ +-w /etc/group -p wa -k identity +-w /etc/passwd -p wa -k identity +-w /etc/gshadow -p wa -k identity +-w /etc/shadow -p wa -k identity +-w /etc/security/opasswd -p wa -k identity diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/logins.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/logins.rules.j2 new file mode 100644 index 0000000..c47bf6e --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/logins.rules.j2 @@ -0,0 +1,3 @@ +-w /var/log/faillog -p wa -k logins +-w /var/log/lastlog -p wa -k logins +-w /var/run/faillock/ -p wa -k logins diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/modules.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/modules.rules.j2 new file mode 100644 index 0000000..5fae54e --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/modules.rules.j2 @@ -0,0 +1,4 @@ +-w /sbin/insmod -p x -k modules +-w /sbin/rmmod -p x -k modules +-w /sbin/modprobe -p x -k modules +-a always,exit -F arch=b64 -S init_module -S delete_module -k modules diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/mounts.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/mounts.rules.j2 new file mode 100644 index 0000000..c70add1 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/mounts.rules.j2 @@ -0,0 +1,2 @@ +-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts +-a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/perm_mod.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/perm_mod.rules.j2 new file mode 100644 index 0000000..2bab6dd --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/perm_mod.rules.j2 @@ -0,0 +1,6 @@ +-a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/priv_commands.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/priv_commands.rules.j2 new file mode 100644 index 0000000..92eb78e --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/priv_commands.rules.j2 @@ -0,0 +1,4 @@ +{% for proc in priv_procs.stdout_lines -%} +-a always,exit -F path={{ proc }} -F perm=x -F auid>=1000 -F auid!=4294967295 -k privileged + +{% endfor %} diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/scope.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/scope.rules.j2 new file mode 100644 index 0000000..0ae21fd --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/scope.rules.j2 @@ -0,0 +1,2 @@ +-w /etc/sudoers -p wa -k scope +-w /etc/sudoers.d/ -p wa -k scope diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/session.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/session.rules.j2 new file mode 100644 index 0000000..ea5c489 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/session.rules.j2 @@ -0,0 +1,3 @@ +-w /var/run/utmp -p wa -k session +-w /var/log/wtmp -p wa -k logins +-w /var/log/btmp -p wa -k logins diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/system_local.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/system_local.rules.j2 new file mode 100644 index 0000000..32fb308 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/system_local.rules.j2 @@ -0,0 +1,6 @@ +-a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale +-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale +-w /etc/issue -p wa -k system-locale +-w /etc/issue.net -p wa -k system-locale +-w /etc/hosts -p wa -k system-locale +-w /etc/sysconfig/network -p wa -k system-locale diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/time_change.rules.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/time_change.rules.j2 new file mode 100644 index 0000000..625a117 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/audit/time_change.rules.j2 @@ -0,0 +1,7 @@ +-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change +-a always,exit -F arch=b32 -S clock_settime -k time-change +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change +-a always,exit -F arch=b64 -S clock_settime -k time-change +{% endif %} +-w /etc/localtime -p wa -k time-change diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/chrony.conf.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/chrony.conf.j2 new file mode 100644 index 0000000..f86b20e --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/chrony.conf.j2 @@ -0,0 +1,93 @@ +# This the default chrony.conf file for the Debian chrony package. After +# editing this file use the command 'invoke-rc.d chrony restart' to make +# your changes take effect. John Hasler 1998-2008 + +# See www.pool.ntp.org for an explanation of these servers. Please +# consider joining the project if possible. If you can't or don't want to +# use these servers I suggest that you try your ISP's nameservers. We mark +# the servers 'offline' so that chronyd won't try to connect when the link +# is down. Scripts in /etc/ppp/ip-up.d and /etc/ppp/ip-down.d use chronyc +# commands to switch it on when a dialup link comes up and off when it goes +# down. Code in /etc/init.d/chrony attempts to determine whether or not +# the link is up at boot time and set the online status accordingly. If +# you have an always-on connection such as cable omit the 'offline' +# directive and chronyd will default to online. +# +# Note that if Chrony tries to go "online" and dns lookup of the servers +# fails they will be discarded. Thus under some circumstances it is +# better to use IP numbers than host names. + +{% for server in rhel7cis_time_synchronization_servers -%} +server {{ server }} {{ rhel7cis_chrony_server_options }} +{% endfor %} + +# Look here for the admin password needed for chronyc. The initial +# password is generated by a random process at install time. You may +# change it if you wish. + +keyfile /etc/chrony/chrony.keys + +# Set runtime command key. Note that if you change the key (not the +# password) to anything other than 1 you will need to edit +# /etc/ppp/ip-up.d/chrony, /etc/ppp/ip-down.d/chrony, /etc/init.d/chrony +# and /etc/cron.weekly/chrony as these scripts use it to get the password. + +commandkey 1 + +# I moved the driftfile to /var/lib/chrony to comply with the Debian +# filesystem standard. + +driftfile /var/lib/chrony/chrony.drift + +# Comment this line out to turn off logging. + +log tracking measurements statistics +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. + +maxupdateskew 100.0 + +# Dump measurements when daemon exits. + +dumponexit + +# Specify directory for dumping measurements. + +dumpdir /var/lib/chrony + +# Let computer be a server when it is unsynchronised. + +local stratum 10 + +# Allow computers on the unrouted nets to use the server. + +#allow 10/8 +#allow 192.168/16 +#allow 172.16/12 + +# This directive forces `chronyd' to send a message to syslog if it +# makes a system clock adjustment larger than a threshold value in seconds. + +logchange 0.5 + +# This directive defines an email address to which mail should be sent +# if chronyd applies a correction exceeding a particular threshold to the +# system clock. + +# mailonchange root@localhost 0.5 + +# This directive tells chrony to regulate the real-time clock and tells it +# Where to store related data. It may not work on some newer motherboards +# that use the HPET real-time clock. It requires enhanced real-time +# support in the kernel. I've commented it out because with certain +# combinations of motherboard and kernel it is reported to cause lockups. + +# rtcfile /var/lib/chrony/chrony.rtc + +# If the last line of this file reads 'rtconutc' chrony will assume that +# the CMOS clock is on UTC (GMT). If it reads '# rtconutc' or is absent +# chrony will assume local time. The line (if any) was written by the +# chrony postinst based on what it found in /etc/default/rcS. You may +# change it if necessary. +rtconutc diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/issue.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/issue.j2 new file mode 100644 index 0000000..ca6c531 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/issue.j2 @@ -0,0 +1 @@ +{{ rhel7cis_warning_banner }} diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/issue.net.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/issue.net.j2 new file mode 100644 index 0000000..ca6c531 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/issue.net.j2 @@ -0,0 +1 @@ +{{ rhel7cis_warning_banner }} diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/motd.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/motd.j2 new file mode 100644 index 0000000..ca6c531 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/motd.j2 @@ -0,0 +1 @@ +{{ rhel7cis_warning_banner }} diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/tmp_mount.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/tmp_mount.j2 new file mode 100644 index 0000000..e216054 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/etc/tmp_mount.j2 @@ -0,0 +1,25 @@ +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. + +[Unit] +Description=Temporary Directory +Documentation=man:hier(7) +Documentation=http://www.freedesktop.org/wiki/Software/systemd/APIFileSystems +ConditionPathIsSymbolicLink=!/tmp +DefaultDependencies=no +Conflicts=umount.target +Before=local-fs.target umount.target + +[Mount] +What=tmpfs +Where=/tmp +Type=tmpfs +Options=mode=1777,strictatime,{% if rhel7cis_rule_1_1_3 %}noexec,{% endif %}{% if rhel7cis_rule_1_1_4 %}nodev,{% endif %}{% if rhel7cis_rule_1_1_5 %}nosuid{% endif %} + +# Make 'systemctl enable tmp.mount' work: +[Install] +WantedBy=local-fs.target diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/hosts.allow.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/hosts.allow.j2 new file mode 100644 index 0000000..9055481 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/hosts.allow.j2 @@ -0,0 +1,11 @@ +# +# hosts.allow This file contains access rules which are used to +# allow or deny connections to network services that +# either use the tcp_wrappers library or that have been +# started through a tcp_wrappers-enabled xinetd. +# +# See 'man 5 hosts_options' and 'man 5 hosts_access' +# for information on rule syntax. +# See 'man tcpd' for information on tcp_wrappers +# +ALL: {% for iprange in rhel7cis_host_allow -%}{{ iprange }}{% if not loop.last %}, {% endif %}{% endfor %} diff --git a/Linux/ansible-lockdown/RHEL7-CIS/templates/ntp.conf.j2 b/Linux/ansible-lockdown/RHEL7-CIS/templates/ntp.conf.j2 new file mode 100644 index 0000000..6e11739 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/templates/ntp.conf.j2 @@ -0,0 +1,59 @@ +# For more information about this file, see the man pages +# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). + +driftfile /var/lib/ntp/drift + +# Permit time synchronization with our time source, but do not +# permit the source to query or modify the service on this system. +#restrict default nomodify notrap nopeer noquery +restrict -4 default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery + +# Permit all access over the loopback interface. This could +# be tightened as well, but to do so would effect some of +# the administrative functions. +restrict 127.0.0.1 +restrict ::1 + +# Hosts on local network are less restricted. +#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap + +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% for server in rhel7cis_time_synchronization_servers -%} +server {{ server }} {{ rhel7cis_ntp_server_options }} +{% endfor %} + +#broadcast 192.168.1.255 autokey # broadcast server +#broadcastclient # broadcast client +#broadcast 224.0.1.1 autokey # multicast server +#multicastclient 224.0.1.1 # multicast client +#manycastserver 239.255.254.254 # manycast server +#manycastclient 239.255.254.254 autokey # manycast client + +# Enable public key cryptography. +#crypto + +includefile /etc/ntp/crypto/pw + +# Key file containing the keys and key identifiers used when operating +# with symmetric key cryptography. +keys /etc/ntp/keys + +# Specify the key identifiers which are trusted. +#trustedkey 4 8 42 + +# Specify the key identifier to use with the ntpdc utility. +#requestkey 8 + +# Specify the key identifier to use with the ntpq utility. +#controlkey 8 + +# Enable writing of statistics records. +#statistics clockstats cryptostats loopstats peerstats + +# Disable the monitoring facility to prevent amplification attacks using ntpdc +# monlist command when default restrict does not include the noquery flag. See +# CVE-2013-5211 for more details. +# Note: Monitoring will not be disabled with the limited restriction flag. +disable monitor diff --git a/Linux/ansible-lockdown/RHEL7-CIS/vars/main.yml b/Linux/ansible-lockdown/RHEL7-CIS/vars/main.yml new file mode 100644 index 0000000..77594b3 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL7-CIS/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for RHEL7-CIS diff --git a/Linux/ansible-lockdown/RHEL8-CIS/.ansible-lint b/Linux/ansible-lockdown/RHEL8-CIS/.ansible-lint new file mode 100644 index 0000000..f2a7e7c --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/.ansible-lint @@ -0,0 +1,11 @@ +parseable: true +quiet: true +skip_list: + - '204' + - '305' + - '303' + - '403' + - '306' + - '602' +use_default_rules: true +verbosity: 0 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/.gitattributes b/Linux/ansible-lockdown/RHEL8-CIS/.gitattributes new file mode 100644 index 0000000..b2daffb --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/.gitattributes @@ -0,0 +1,6 @@ +# adding github settings to show correct language +*.sh linguist-detectable=true +*.yml linguist-detectable=true +*.ps1 linguist-detectable=true +*.j2 linguist-detectable=true +*.md linguist-documentation diff --git a/Linux/ansible-lockdown/RHEL8-CIS/.yamllint b/Linux/ansible-lockdown/RHEL8-CIS/.yamllint new file mode 100644 index 0000000..fdea629 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/.yamllint @@ -0,0 +1,23 @@ +--- +ignore: | + tests/ + molecule/ + .gitlab-ci.yml + *molecule.yml + +extends: default + +rules: + indentation: + # Requiring 4 space indentation + spaces: 4 + # Requiring consistent indentation within a file, either indented or not + indent-sequences: consistent + truthy: disable + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable diff --git a/Linux/ansible-lockdown/RHEL8-CIS/CONTRIBUTING.rst b/Linux/ansible-lockdown/RHEL8-CIS/CONTRIBUTING.rst new file mode 100644 index 0000000..23ce2fb --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/CONTRIBUTING.rst @@ -0,0 +1,67 @@ +Contributing to MindPoint Group Projects +======================================== + +Rules +----- +1) All commits must be GPG signed (details in Signing section) +2) All commits must have Signed-off-by (Signed-off-by: Joan Doe ) in the commit message (details in Signing section) +3) All work is done in your own branch +4) All pull requests go into the devel branch. There are automated checks for signed commits, signoff in commit message, and functional testing) +5) Be open and nice to eachother + +Workflow +-------- +- Your work is done in your own individual branch. Make sure to to Signed-off and GPG sign all commits you intend to merge +- All community Pull Requests are into the devel branch. There are automated checks for GPG signed, Signed-off in commits, and functional tests before being approved. If your pull request comes in from outside of our repo, the pull request will go into a staging branch. There is info needed from our repo for our CI/CD testing. +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release + +Signing your contribution +------------------------- + +We've chosen to use the Developer's Certificate of Origin (DCO) method +that is employed by the Linux Kernel Project, which provides a simple +way to contribute to MindPoint Group projects. + +The process is to certify the below DCO 1.1 text +:: + + Developer's Certificate of Origin 1.1 + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +:: + +Then, when it comes time to submit a contribution, include the +following text in your contribution commit message: + +:: + + Signed-off-by: Joan Doe + +:: + + +This message can be entered manually, or if you have configured git +with the correct `user.name` and `user.email`, you can use the `-s` +option to `git commit` to automatically include the signoff message. diff --git a/Linux/ansible-lockdown/RHEL8-CIS/Changelog.md b/Linux/ansible-lockdown/RHEL8-CIS/Changelog.md new file mode 100644 index 0000000..de3186a --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/Changelog.md @@ -0,0 +1,93 @@ +# Changes to rhel8CIS + +## 1.3.3 + +- update to audit script + - variable for audit OS agnostic + - removal of included library module (not required) + +- Issues included + - #135 - running levels - upadted tags + - #138 - auditd immutable + - #139 - 5.2.13 valus updated + - #140 + - #141 - check mode update + - #142 + - #143 - labels added + - #144 + - #146 - undefined variable added + - #147 - removed warn statement + - #149 - shell timeout + +## 1.3.2 + +- issues with crypto policies on ec2 - added skip for rules if system_is_ec2 variable + - cis_1.10 ## Change crypto breaks installing products + - cis_1.11 ## Change crypto breaks installing products + +## 1.3.1 + +- CIS 1.0.1 updates +- Added Issue and PR templates +- Added better reboot logic +- Added options to ensure idempotence +- Enhanced flush handlers +- Typo fixes +- mount check improvements +- Linting fixes +- Added systemd tmp mount +- Added systemd tmpfs block +- #110 tmp.mount support + - thanks to @erpadmin + +## 1.3 + +- extentions to LE audit capability +- more lint and layout changes +- sugroup assertion added 5.7 +- added extra logic variable to authselect/config section 5.3 related +- AlmaLinux and Rocky tested (comments in readme - also rsyslog installed at build or will fail) +- section 1.1 mount work has been rewritten and systemd tmp mount options added + +## 1.2.3 + +- #117 sugroup enhancements + - thanks to @ihotz +- #112 use of dnf module not shell + - thanks to @wolskie + +## 1.2.2 + +- #33 mkgrub missing variable issues - efi and bios path resolution + - thanks to @mrampant & @mickey1928geo +- #102 2.2.2 xorg pkg removal extended + - thanks to @RosarioVinoth +- #104 5.4.1 pwquality logic + - thanks to @RosarioVinoth +- #107 Idempotence improvement for 4.1.1.3 and 4.1.1.4 + - thanks to @andreyzher +- lint changes and updates to sync with ansible-galaxy + +## v1.2.1 + +- bootloader and default variables +- empty strings lint updates +- #87 +- rule 6.1.1 - audit only - outputs file discrepancies to {{ rhel8cis_rpm_audit_file }} +- #88 +- checkmode_improvements added to relevant tasks +- PR #96 +- crypto policy idempotency + +## v1.2.0 + +- #86 +- Adding on the goss auditing tool +- remove deprecated warnings +- format and layout +- general improvements +- readme updates +- use ansible package_facts +- #90 +- cis fix - nfs-server not nfs + - Thanks to danderemer diff --git a/Linux/ansible-lockdown/RHEL8-CIS/LICENSE b/Linux/ansible-lockdown/RHEL8-CIS/LICENSE new file mode 100644 index 0000000..5b33d4a --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Mindpoint Group / Lockdown Enterprise / Lockdown Enterprise Releases + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Linux/ansible-lockdown/RHEL8-CIS/README.md b/Linux/ansible-lockdown/RHEL8-CIS/README.md new file mode 100644 index 0000000..e654fe1 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/README.md @@ -0,0 +1,129 @@ +RHEL 8 CIS +================ + +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/RHEL8-CIS/CommunityToDevel?label=Devel%20Build%20Status&style=plastic) +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/RHEL8-CIS/DevelToMain?label=Main%20Build%20Status&style=plastic) +![Release](https://img.shields.io/github/v/release/ansible-lockdown/RHEL8-CIS?style=plastic) + +Configure RHEL/Centos 8 machine to be [CIS](https://www.cisecurity.org/cis-benchmarks/) compliant + +Based on [CIS RedHat Enterprise Linux 8 Benchmark v1.0.1 - 05-19-2021 ](https://www.cisecurity.org/cis-benchmarks/) + +Caution(s) +------- + +This role **will make changes to the system** which may have unintended concequences. This is not an auditing tool but rather a remediation tool to be used after an audit has been conducted. + +This role was developed against a clean install of the Operating System. If you are implimenting to an existing system please review this role for any site specific changes that are needed. + +To use release version please point to main branch + +Documentation +------------- + +- [Getting Started](https://www.lockdownenterprise.com/docs/getting-started-with-lockdown) +- [Customizing Roles](https://www.lockdownenterprise.com/docs/customizing-lockdown-enterprise) +- [Per-Host Configuration](https://www.lockdownenterprise.com/docs/per-host-lockdown-enterprise-configuration) +- [Getting the Most Out of the Role](https://www.lockdownenterprise.com/docs/get-the-most-out-of-lockdown-enterprise) +- [Wiki](https://github.com/ansible-lockdown/RHEL8-CIS/wiki) +- [Repo GitHub Page](https://ansible-lockdown.github.io/RHEL8-CIS/) + +Auditing (new) +-------------- + +This can be turned on or off within the defaults/main.yml file with the variable rhel8cis_run_audit. The value is false by default, please refer to the wiki for more details. + +This is a much quicker, very lightweight, checking (where possible) config compliance and live/running settings. + +A new form of auditing has been develeoped, by using a small (12MB) go binary called [goss](https://github.com/aelsabbahy/goss) along with the relevant configurations to check. Without the need for infrastructure or other tooling. +This audit will not only check the config has the correct setting but aims to capture if it is running with that configuration also trying to remove [false positives](https://www.mindpointgroup.com/blog/is-compliance-scanning-still-relevant/) in the process. + +Refer to [RHEL8-CIS-Audit](https://github.com/ansible-lockdown/RHEL8-CIS-Audit). + +Requirements +------------ + +RHEL/AlmaLinux/CentOS/Rocky 8 - Other versions are not supported. + +- AlmaLinux/Rocky Has been tested on 8.4(enabling crypto (sections 1.10&1.11) breaks updating or installs 01Jul2021 +- Access to download or add the goss binary and content to the system if using auditing (other options are available on how to get the content to the system.) + +**General:** + +- Basic knowledge of Ansible, below are some links to the Ansible documentation to help get started if you are unfamiliar with Ansible + - [Main Ansible documentation page](https://docs.ansible.com) + - [Ansible Getting Started](https://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html) + - [Tower User Guide](https://docs.ansible.com/ansible-tower/latest/html/userguide/index.html) + - [Ansible Community Info](https://docs.ansible.com/ansible/latest/community/index.html) +- Functioning Ansible and/or Tower Installed, configured, and running. This includes all of the base Ansible/Tower configurations, needed packages installed, and infrastructure setup. +- Please read through the tasks in this role to gain an understanding of what each control is doing. Some of the tasks are disruptive and can have unintended consiquences in a live production system. Also familiarize yourself with the variables in the defaults/main.yml file or the [Main Variables Wiki Page](https://github.com/ansible-lockdown/RHEL8-CIS/wiki/Main-Variables). + +Dependencies +------------ + +- Python3 +- Ansible 2.9+ +- python-def (should be included in RHEL/CentOS 8) +- libselinux-python + +Role Variables +-------------- + +This role is designed that the end user should not have to edit the tasks themselves. All customizing should be done via the defaults/main.yml file or with extra vars within the project, job, workflow, etc. These variables can be found [here](https://github.com/ansible-lockdown/RHEL8-CIS/wiki/Main-Variables) in the Main Variables Wiki page. All variables are listed there along with descriptions. + +Tags +---- + +There are many tags available for added control precision. Each control has it's own set of tags noting what level, if it's scored/notscored, what OS element it relates to, if it's a patch or audit, and the rule number. + +Below is an example of the tag section from a control within this role. Using this example if you set your run to skip all controls with the tag services, this task will be skipped. The opposite can also happen where you run only controls tagged with services. + +```txt + tags: + - level1-server + - level1-workstation + - scored + - avahi + - services + - patch + - rule_2.2.4 +``` + +Example Audit Summary +--------------------- + +This is based on a vagrant image with selections enabled. e.g. No Gui or firewall. +Note: More tests are run during audit as we check config and running state. + +```txt + +ok: [default] => { + "msg": [ + "The pre remediation results are: ['Total Duration: 5.454s', 'Count: 338, Failed: 47, Skipped: 5'].", + "The post remediation results are: ['Total Duration: 5.007s', 'Count: 338, Failed: 46, Skipped: 5'].", + "Full breakdown can be found in /var/tmp", + "" + ] +} + +PLAY RECAP ******************************************************************************************************************************************* +default : ok=270 changed=23 unreachable=0 failed=0 skipped=140 rescued=0 ignored=0 +``` + +Branches +------- + +- devel - This is the default branch and the working development branch. Community pull requests will pull into this branch +- main - This is the release branch +- reports - This is a protected branch for our scoring reports, no code should ever go here +- all other branches** - Individual community member branches + +Community Contribution +---------------------- + +We encourage you (the community) to contribute to this role. Please read the rules below. + +- Your work is done in your own individual branch. Make sure to Signed-off and GPG sign all commits you intend to merge. +- All community Pull Requests are pulled into the devel branch +- Pull Requests into devel will confirm your commits have a GPG signature, Signed-off, and a functional test before being approved +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release diff --git a/Linux/ansible-lockdown/RHEL8-CIS/ansible.cfg b/Linux/ansible-lockdown/RHEL8-CIS/ansible.cfg new file mode 100644 index 0000000..831f01d --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/ansible.cfg @@ -0,0 +1,24 @@ +[defaults] +host_key_checking=False +display_skipped_hosts=True +system_warnings=False +command_warnings=False +nocows=1 +retry_files_save_path=/dev/null +library=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules:./library + +[privilege_escalation] + +[paramiko_connection] +record_host_keys=False + +[ssh_connection] +transfer_method=scp + +[accelerate] + +[selinux] + +[colors] + +[diff] diff --git a/Linux/ansible-lockdown/RHEL8-CIS/defaults/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/defaults/main.yml new file mode 100644 index 0000000..1ee8d95 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/defaults/main.yml @@ -0,0 +1,622 @@ +--- +# defaults file for rhel8-cis + +rhel8cis_skip_for_travis: false +rhel8cis_system_is_container: false +# rhel8cis is left off the front of this var for consistency in testing pipeline +# system_is_ec2 toggle will disable tasks that fail on Amazon EC2 instances. Set true to skip and false to run tasks +system_is_ec2: false + +rhel8cis_notauto: false +rhel8cis_section1: true +rhel8cis_section2: true +rhel8cis_section3: true +rhel8cis_section4: true +rhel8cis_section5: true +rhel8cis_section6: true + +rhel8cis_level_1: true +rhel8cis_level_2: true + +rhel8cis_selinux_disable: false +rhel8cis_legacy_boot: false + +## Python Binary +## This is used for python3 Installations where python2 OS modules are used in ansible +python2_bin: /bin/python2.7 + +## Benchmark name used by audting control role +# The audit variable found at the base +benchmark: RHEL8-CIS + +# Whether to skip the reboot +rhel8cis_skip_reboot: true + +#### Basic external goss audit enablement settings #### +#### Precise details - per setting can be found at the bottom of this file #### + +### Goss is required on the remote host +setup_audit: false +# How to retrive goss +# Options are copy or download - detailed settings at the bottom of this file +# you will need to access to either github or the file already dowmloaded +get_goss_file: download + +# how to get audit files onto host options +# options are git/copy/get_url - use local if already available to to the host (adjust paths accordingly) +audit_content: git + +# enable audits to run - this runs the audit and get the latest content +run_audit: false + +# Timeout for those cmds that take longer to run where timeout set +audit_cmd_timeout: 30000 + +### End Goss enablements #### +#### Detailed settings found at the end of this document #### + +# These variables correspond with the CIS rule IDs or paragraph numbers defined in +# the CIS benchmark documents. +# PLEASE NOTE: These work in coordination with the section # group variables and tags. +# You must enable an entire section in order for the variables below to take effect. +# Section 1 rules +rhel8cis_rule_1_1_1_1: true +rhel8cis_rule_1_1_1_2: true +rhel8cis_rule_1_1_1_3: true +rhel8cis_rule_1_1_1_4: true +rhel8cis_rule_1_1_1_5: true +rhel8cis_rule_1_1_2: true +rhel8cis_rule_1_1_3: true +rhel8cis_rule_1_1_4: true +rhel8cis_rule_1_1_5: true +rhel8cis_rule_1_1_6: true +rhel8cis_rule_1_1_7: true +rhel8cis_rule_1_1_8: true +rhel8cis_rule_1_1_9: true +rhel8cis_rule_1_1_10: true +rhel8cis_rule_1_1_11: true +rhel8cis_rule_1_1_12: true +rhel8cis_rule_1_1_13: true +rhel8cis_rule_1_1_14: true +rhel8cis_rule_1_1_15: true +rhel8cis_rule_1_1_16: true +rhel8cis_rule_1_1_17: true +rhel8cis_rule_1_1_18: true +rhel8cis_rule_1_1_19: true +rhel8cis_rule_1_1_20: true +rhel8cis_rule_1_1_21: true +rhel8cis_rule_1_1_22: true +rhel8cis_rule_1_1_23: true +rhel8cis_rule_1_2_1: true +rhel8cis_rule_1_2_2: true +rhel8cis_rule_1_2_3: true +rhel8cis_rule_1_2_4: true +rhel8cis_rule_1_2_5: true +rhel8cis_rule_1_3_1: true +rhel8cis_rule_1_3_2: true +rhel8cis_rule_1_3_3: true +rhel8cis_rule_1_4_1: true +rhel8cis_rule_1_4_2: true +rhel8cis_rule_1_5_1: true +rhel8cis_rule_1_5_2: true +rhel8cis_rule_1_5_3: true +rhel8cis_rule_1_6_1: true +rhel8cis_rule_1_6_2: true +rhel8cis_rule_1_7_1_1: true +rhel8cis_rule_1_7_1_2: true +rhel8cis_rule_1_7_1_3: true +rhel8cis_rule_1_7_1_4: true +rhel8cis_rule_1_7_1_5: true +rhel8cis_rule_1_7_1_6: true +rhel8cis_rule_1_7_1_7: true +rhel8cis_rule_1_8_1_1: true +rhel8cis_rule_1_8_1_2: true +rhel8cis_rule_1_8_1_3: true +rhel8cis_rule_1_8_1_4: true +rhel8cis_rule_1_8_1_5: true +rhel8cis_rule_1_8_1_6: true +rhel8cis_rule_1_8_2: true +rhel8cis_rule_1_9: true +rhel8cis_rule_1_10: true +rhel8cis_rule_1_11: true + +# Section 2 rules +rhel8cis_rule_2_1_1: true +rhel8cis_rule_2_1_2: true +rhel8cis_rule_2_1_3: true +rhel8cis_rule_2_1_4: true +rhel8cis_rule_2_1_5: true +rhel8cis_rule_2_1_6: true +rhel8cis_rule_2_1_7: true +rhel8cis_rule_2_2_1_1: true +rhel8cis_rule_2_2_1_2: true +rhel8cis_rule_2_2_1_3: true +rhel8cis_rule_2_2_2: true +rhel8cis_rule_2_2_3: true +rhel8cis_rule_2_2_4: true +rhel8cis_rule_2_2_5: true +rhel8cis_rule_2_2_6: true +rhel8cis_rule_2_2_7: true +rhel8cis_rule_2_2_8: true +rhel8cis_rule_2_2_9: true +rhel8cis_rule_2_2_10: true +rhel8cis_rule_2_2_11: true +rhel8cis_rule_2_2_12: true +rhel8cis_rule_2_2_13: true +rhel8cis_rule_2_2_14: true +rhel8cis_rule_2_2_15: true +rhel8cis_rule_2_2_16: true +rhel8cis_rule_2_2_17: true +rhel8cis_rule_2_2_18: true +rhel8cis_rule_2_3_1: true +rhel8cis_rule_2_3_2: true +rhel8cis_rule_2_3_3: true + +# Section 3 rules +rhel8cis_rule_3_1_1: true +rhel8cis_rule_3_1_2: true +rhel8cis_rule_3_2_1: true +rhel8cis_rule_3_2_2: true +rhel8cis_rule_3_2_3: true +rhel8cis_rule_3_2_4: true +rhel8cis_rule_3_2_5: true +rhel8cis_rule_3_2_6: true +rhel8cis_rule_3_2_7: true +rhel8cis_rule_3_2_8: true +rhel8cis_rule_3_2_9: true +rhel8cis_rule_3_3_1: true +rhel8cis_rule_3_3_2: true +rhel8cis_rule_3_3_3: true +rhel8cis_rule_3_3_4: true +rhel8cis_rule_3_4_1_1: true +rhel8cis_rule_3_4_2_1: true +rhel8cis_rule_3_4_2_2: true +rhel8cis_rule_3_4_2_3: true +rhel8cis_rule_3_4_2_4: true +rhel8cis_rule_3_4_2_5: true +rhel8cis_rule_3_4_2_6: true +rhel8cis_rule_3_4_3_1: true +rhel8cis_rule_3_4_3_2: true +rhel8cis_rule_3_4_3_3: true +rhel8cis_rule_3_4_3_4: true +rhel8cis_rule_3_4_3_5: true +rhel8cis_rule_3_4_3_6: true +rhel8cis_rule_3_4_3_7: true +rhel8cis_rule_3_4_3_8: true +rhel8cis_rule_3_4_4_1_1: true +rhel8cis_rule_3_4_4_1_2: true +rhel8cis_rule_3_4_4_1_3: true +rhel8cis_rule_3_4_4_1_4: true +rhel8cis_rule_3_4_4_1_5: true +rhel8cis_rule_3_4_4_2_1: true +rhel8cis_rule_3_4_4_2_2: true +rhel8cis_rule_3_4_4_2_3: true +rhel8cis_rule_3_4_4_2_4: true +rhel8cis_rule_3_4_4_2_5: true +rhel8cis_rule_3_5: true +rhel8cis_rule_3_6: true + +# Section 4 rules +rhel8cis_rule_4_1_1_1: true +rhel8cis_rule_4_1_1_2: true +rhel8cis_rule_4_1_1_3: true +rhel8cis_rule_4_1_1_4: true +rhel8cis_rule_4_1_2_1: true +rhel8cis_rule_4_1_2_2: true +rhel8cis_rule_4_1_2_3: true +rhel8cis_rule_4_1_3: true +rhel8cis_rule_4_1_4: true +rhel8cis_rule_4_1_5: true +rhel8cis_rule_4_1_6: true +rhel8cis_rule_4_1_7: true +rhel8cis_rule_4_1_8: true +rhel8cis_rule_4_1_9: true +rhel8cis_rule_4_1_10: true +rhel8cis_rule_4_1_11: true +rhel8cis_rule_4_1_12: true +rhel8cis_rule_4_1_13: true +rhel8cis_rule_4_1_14: true +rhel8cis_rule_4_1_15: true +rhel8cis_rule_4_1_16: true +rhel8cis_rule_4_1_17: true +rhel8cis_rule_4_2_1_1: true +rhel8cis_rule_4_2_1_2: true +rhel8cis_rule_4_2_1_3: true +rhel8cis_rule_4_2_1_4: true +rhel8cis_rule_4_2_1_5: true +rhel8cis_rule_4_2_1_6: true +rhel8cis_rule_4_2_2_1: true +rhel8cis_rule_4_2_2_2: true +rhel8cis_rule_4_2_2_3: true +rhel8cis_rule_4_2_3: true +rhel8cis_rule_4_3: true + +# Section 5 rules +rhel8cis_rule_5_1_1: true +rhel8cis_rule_5_1_2: true +rhel8cis_rule_5_1_3: true +rhel8cis_rule_5_1_4: true +rhel8cis_rule_5_1_5: true +rhel8cis_rule_5_1_6: true +rhel8cis_rule_5_1_7: true +rhel8cis_rule_5_1_8: true +rhel8cis_rule_5_2_1: true +rhel8cis_rule_5_2_2: true +rhel8cis_rule_5_2_3: true +rhel8cis_rule_5_2_4: true +rhel8cis_rule_5_2_5: true +rhel8cis_rule_5_2_6: true +rhel8cis_rule_5_2_7: true +rhel8cis_rule_5_2_8: true +rhel8cis_rule_5_2_9: true +rhel8cis_rule_5_2_10: true +rhel8cis_rule_5_2_12: true +rhel8cis_rule_5_2_11: true +rhel8cis_rule_5_2_13: true +rhel8cis_rule_5_2_14: true +rhel8cis_rule_5_2_15: true +rhel8cis_rule_5_2_16: true +rhel8cis_rule_5_2_17: true +rhel8cis_rule_5_2_18: true +rhel8cis_rule_5_2_19: true +rhel8cis_rule_5_2_20: true +rhel8cis_rule_5_3_1: true +rhel8cis_rule_5_3_2: true +rhel8cis_rule_5_3_3: true +rhel8cis_rule_5_4_1: true +rhel8cis_rule_5_4_2: true +rhel8cis_rule_5_4_3: true +rhel8cis_rule_5_4_4: true +rhel8cis_rule_5_5_1_1: true +rhel8cis_rule_5_5_1_2: true +rhel8cis_rule_5_5_1_3: true +rhel8cis_rule_5_5_1_4: true +rhel8cis_rule_5_5_1_5: true +rhel8cis_rule_5_5_2: true +rhel8cis_rule_5_5_3: true +rhel8cis_rule_5_5_4: true +rhel8cis_rule_5_5_5: true +rhel8cis_rule_5_6: true +rhel8cis_rule_5_7: true + +# Section 6 rules +rhel8cis_rule_6_1_1: true +rhel8cis_rule_6_1_2: true +rhel8cis_rule_6_1_3: true +rhel8cis_rule_6_1_4: true +rhel8cis_rule_6_1_5: true +rhel8cis_rule_6_1_6: true +rhel8cis_rule_6_1_7: true +rhel8cis_rule_6_1_8: true +rhel8cis_rule_6_1_9: true +rhel8cis_rule_6_1_10: true +rhel8cis_rule_6_1_11: true +rhel8cis_rule_6_1_12: true +rhel8cis_rule_6_1_13: true +rhel8cis_rule_6_1_14: true +rhel8cis_rule_6_2_1: true +rhel8cis_rule_6_2_2: true +rhel8cis_rule_6_2_3: true +rhel8cis_rule_6_2_4: true +rhel8cis_rule_6_2_5: true +rhel8cis_rule_6_2_6: true +rhel8cis_rule_6_2_7: true +rhel8cis_rule_6_2_8: false +rhel8cis_rule_6_2_9: true +rhel8cis_rule_6_2_10: true +rhel8cis_rule_6_2_11: true +rhel8cis_rule_6_2_12: true +rhel8cis_rule_6_2_13: true +rhel8cis_rule_6_2_14: true +rhel8cis_rule_6_2_15: true +rhel8cis_rule_6_2_16: true +rhel8cis_rule_6_2_17: true +rhel8cis_rule_6_2_18: true +rhel8cis_rule_6_2_19: true +rhel8cis_rule_6_2_20: true + +# Service configuration booleans set true to keep service +rhel8cis_avahi_server: false +rhel8cis_cups_server: false +rhel8cis_dhcp_server: false +rhel8cis_ldap_server: false +rhel8cis_telnet_server: false +rhel8cis_nfs_server: false +rhel8cis_rpc_server: false +rhel8cis_ntalk_server: false +rhel8cis_rsyncd_server: false +rhel8cis_tftp_server: false +rhel8cis_rsh_server: false +rhel8cis_nis_server: false +rhel8cis_snmp_server: false +rhel8cis_squid_server: false +rhel8cis_smb_server: false +rhel8cis_dovecot_server: false +rhel8cis_httpd_server: false +rhel8cis_vsftpd_server: false +rhel8cis_named_server: false +rhel8cis_nfs_rpc_server: false +rhel8cis_is_mail_server: false +rhel8cis_bind: false +rhel8cis_vsftpd: false +rhel8cis_httpd: false +rhel8cis_dovecot: false +rhel8cis_samba: false +rhel8cis_squid: false +rhel8cis_net_snmp: false +rhel8cis_allow_autofs: false + +## Section 1 vars + +# 1.1.2 +# These settings go into the /etc/fstab file for the /tmp mount settings +# The value must contain nosuid,nodev,noexec to conform to CIS standards +# rhel8cis_tmp_tmpfs_settings: "defaults,rw,nosuid,nodev,noexec,relatime 0 0" +# If set true uses the tmp.mount service else using fstab configuration +rhel8cis_tmp_svc: false + +# 1.2.1 +# This is the login information for your RedHat Subscription +# DO NOT USE PLAIN TEXT PASSWORDS!!!!! +# The intent here is to use a password utility like Ansible Vault here +rhel8cis_rh_sub_user: user +rhel8cis_rh_sub_password: password + +# 1.2.2 +# Do you require rhnsd +# RedHat Satellite Subscription items +rhel8cis_rhnsd_required: false + +# 1.3.3 var log location variable +rhel8cis_varlog_location: "/var/log/sudo.log" + +# xinetd required +rhel8cis_xinetd_required: false + +# 1.4.2 Bootloader password +rhel8cis_bootloader_password_hash: 'grub.pbkdf2.sha512.changethispassword' +rhel8cis_bootloader_password: random +rhel8cis_set_boot_pass: false + +# 1.10/1.11 Set crypto policy (LEGACY, DEFAULT, FUTURE, FIPS) +# Control 1.10 sates not ot use LEGACY and control 1.11 says to use FUTURE or FIPS. +rhel8cis_crypto_policy: "FUTURE" + +# System network parameters (host only OR host and router) +rhel8cis_is_router: false + +# IPv6 required +rhel8cis_ipv6_required: true + +# AIDE +rhel8cis_config_aide: true +# AIDE cron settings +rhel8cis_aide_cron: + cron_user: root + cron_file: /etc/crontab + aide_job: '/usr/sbin/aide --check' + aide_minute: 0 + aide_hour: 5 + aide_day: '*' + aide_month: '*' + aide_weekday: '*' + +# SELinux policy +rhel8cis_selinux_pol: targeted + +# Whether or not to run tasks related to auditing/patching the desktop environment +rhel8cis_gui: no + +# Set to 'true' if X Windows is needed in your environment +rhel8cis_xwindows_required: false + +rhel8cis_openldap_clients_required: false +rhel8cis_telnet_required: false +rhel8cis_talk_required: false +rhel8cis_rsh_required: false +rhel8cis_ypbind_required: false + +# 2.2.1.1 Time Synchronization - Either chrony or ntp +rhel8cis_time_synchronization: chrony + +# 2.2.1.2 Time Synchronization servers - used in template file chrony.conf.j2 +rhel8cis_time_synchronization_servers: + - 0.pool.ntp.org + - 1.pool.ntp.org + - 2.pool.ntp.org + - 3.pool.ntp.org + +rhel8cis_chrony_server_options: "minpoll 8" +rhel8cis_ntp_server_options: "iburst" + +## Section3 vars +# 3.4.2 | PATCH | Ensure /etc/hosts.allow is configured +rhel8cis_host_allow: + - "10.0.0.0/255.0.0.0" + - "172.16.0.0/255.240.0.0" + - "192.168.0.0/255.255.0.0" + +# Firewall Service - either firewalld, iptables, or nftables +rhel8cis_firewall: firewalld + +# 3.4.2.4 Default zone setting +rhel8cis_default_zone: public + +# 3.4.2.5 Zone and Interface setting +rhel8cis_int_zone: customezone +rhel8cis_interface: eth0 + +rhel8cis_firewall_services: + - ssh + - dhcpv6-client + +# 3.4.3.2 Set nftables new table create +rhel8cis_nft_tables_autonewtable: true +rhel8cis_nft_tables_tablename: filter + +# 3.4.3.3 Set nftables new chain create +rhel8cis_nft_tables_autochaincreate: true + +# Warning Banner Content (issue, issue.net, motd) +rhel8cis_warning_banner: | + Authorized uses only. All activity may be monitored and reported. +# End Banner + +## Section4 vars + +rhel8cis_auditd: + space_left_action: email + action_mail_acct: root + admin_space_left_action: halt + max_log_file_action: keep_logs + +rhel8cis_logrotate: "daily" + +# The audit_back_log_limit value should never be below 8192 +rhel8cis_audit_back_log_limit: 8192 + +# The max_log_file parameter should be based on your sites policy +rhel8cis_max_log_file_size: 10 + +# RHEL-08-4.2.1.4/4.2.1.5 remote and destation log server name +rhel8cis_remote_log_server: logagg.example.com + +# RHEL-08-4.2.1.5 +rhel8cis_system_is_log_server: false + +## Section5 vars + +rhel8cis_sshd: + clientalivecountmax: 0 + clientaliveinterval: 900 + ciphers: "aes256-ctr,aes192-ctr,aes128-ctr" + macs: "hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com" + logingracetime: 60 + # WARNING: make sure you understand the precedence when working with these values!! + # allowusers: + # allowgroups: systems dba + # denyusers: + # denygroups: +rhel8cis_pam_faillock: + attempts: 5 + interval: 900 + unlock_time: 900 + fail_for_root: no + remember: 5 + pwhash: sha512 + +# 5.2.5 SSH LogLevel setting. Options are INFO or VERBOSE +rhel8cis_ssh_loglevel: INFO + +# 5.2.19 SSH MaxSessions setting. Must be 4 our less +rhel8cis_ssh_maxsessions: 4 +rhel8cis_inactivelock: + lock_days: 30 + +# 5.3.1/5.3.2 Custom authselect profile settings. Settings in place now will fail, they are place holders from the control example +# Due to the way many multiple options and ways to configure this control needs to be enabled and settings adjusted to minimise risk +rhel8cis_use_authconfig: false +rhel8cis_authselect: + custom_profile_name: custom-profile + default_file_to_copy: "sssd --symlink-meta" + options: with-sudo with-faillock without-nullok + +# 5.3.1 Enable automation to create custom profile settings, using the settings above +rhel8cis_authselect_custom_profile_create: false + +# 5.3.2 Enable automation to select custom profile options, using the settings above +rhel8cis_authselect_custom_profile_select: false + +rhel8cis_pass: + max_days: 365 + min_days: 7 + warn_age: 7 +# Syslog system - either rsyslog or syslog-ng +rhel8cis_syslog: rsyslog +rhel8cis_rsyslog_ansiblemanaged: true + +rhel8cis_vartmp: + source: /tmp + fstype: none + opts: "defaults,nodev,nosuid,noexec,bind" + enabled: no +## PAM +rhel8cis_pam_password: + minlen: "14" + minclass: "4" + +# Starting GID for interactive users +rhel8cis_int_gid: 1000 + +# RHEL-08-5.4.5 +# Session timeout setting file (TMOUT setting can be set in multiple files) +# Timeout value is in seconds. (60 seconds * 10 = 600) +rhel8cis_shell_session_timeout: + file: /etc/profile.d/tmout.sh + timeout: 600 +# RHEL-08-5.4.1.5 Allow ansible to expire password for account with a last changed date in the future. False will just display users in violation, true will expire those users passwords +rhel8cis_futurepwchgdate_autofix: true + +# 5.7 +# rhel8cis_sugroup: sugroup # change accordingly wheel is default + +# wheel users list +rhel8cis_sugroup_users: "root" + +## Section6 vars + +# RHEL-08_6.1.1 +rhel8cis_rpm_audit_file: /var/tmp/rpm_file_check + +# RHEL-08_6.1.10 Allow ansible to adjust world-writable files. False will just display world-writable files, True will remove world-writable +rhel8cis_no_world_write_adjust: true +rhel8cis_passwd_label: "{{ (this_item | default(item)).id }}: {{ (this_item | default(item)).dir }}" +# 6.2.9 +rhel8cis_dotperm_ansiblemanaged: true +#### Goss Configuration Settings #### + +### Goss binary settings ### +goss_version: + release: v0.3.16 + checksum: 'sha256:827e354b48f93bce933f5efcd1f00dc82569c42a179cf2d384b040d8a80bfbfb' +audit_bin_path: /usr/local/bin/ +audit_bin: "{{ audit_bin_path }}goss" +audit_format: json + +# if get_goss_file == download change accordingly +goss_url: "https://github.com/aelsabbahy/goss/releases/download/{{ goss_version.release }}/goss-linux-amd64" + +## if get_goss_file - copy the following needs to be updated for your environment +## it is expected that it will be copied from somewhere accessible to the control node +## e.g copy from ansible control node to remote host +copy_goss_from_path: /some/accessible/path + +### Goss Audit Benchmark file ### +## managed by the control audit_content +# git +audit_file_git: "https://github.com/ansible-lockdown/{{ benchmark }}-Audit.git" +audit_git_version: main + +# copy: +audit_local_copy: "some path to copy from" + +# get_url: +audit_files_url: "some url maybe s3?" + +# Where the goss audit configuration will be stored +audit_files: "/var/tmp/{{ benchmark }}-Audit/" + +## Goss configuration information +# Where the goss configs and outputs are stored +audit_out_dir: '/var/tmp' +audit_conf_dir: "{{ audit_out_dir }}/{{ benchmark }}-Audit/" +pre_audit_outfile: "{{ audit_out_dir }}/{{ ansible_hostname }}_pre_scan_{{ ansible_date_time.epoch }}.{{ audit_format }}" +post_audit_outfile: "{{ audit_out_dir }}/{{ ansible_hostname }}_post_scan_{{ ansible_date_time.epoch }}.{{ audit_format }}" + +## The following should not need changing +goss_file: "{{ audit_conf_dir }}goss.yml" +audit_vars_path: "{{ audit_conf_dir }}/vars/{{ ansible_hostname }}.yml" +audit_results: | + The pre remediation results are: {{ pre_audit_summary }}. + The post remediation results are: {{ post_audit_summary }}. + Full breakdown can be found in {{ audit_out_dir }} diff --git a/Linux/ansible-lockdown/RHEL8-CIS/files/etc/systemd/system/tmp.mount b/Linux/ansible-lockdown/RHEL8-CIS/files/etc/systemd/system/tmp.mount new file mode 100644 index 0000000..47ca662 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/files/etc/systemd/system/tmp.mount @@ -0,0 +1,25 @@ +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. + +[Unit] +Description=Temporary Directory +Documentation=man:hier(7) +Documentation=http://www.freedesktop.org/wiki/Software/systemd/APIFileSystems +ConditionPathIsSymbolicLink=!/tmp +DefaultDependencies=no +Conflicts=umount.target +Before=local-fs.target umount.target + +[Mount] +What=tmpfs +Where=/tmp +Type=tmpfs +Options=mode=1777,strictatime,noexec,nodev,nosuid + +# Make 'systemctl enable tmp.mount' work: +[Install] +WantedBy=local-fs.target diff --git a/Linux/ansible-lockdown/RHEL8-CIS/group_vars/docker b/Linux/ansible-lockdown/RHEL8-CIS/group_vars/docker new file mode 100644 index 0000000..59bc1b3 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/group_vars/docker @@ -0,0 +1,28 @@ +--- +ansible_user: root +# AIDE cron settings +rhel8cis_aide_cron: + cron_user: root + cron_file: /var/spool/cron/root + aide_job: '/usr/sbin/aide --check' + aide_minute: 0 + aide_hour: 5 + aide_day: '*' + aide_month: '*' + aide_weekday: '*' + +rhel8cis_sshd: + clientalivecountmax: 3 + clientaliveinterval: 300 + ciphers: "aes256-ctr,aes192-ctr,aes128-ctr" + macs: "hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com" + logingracetime: 60 + # - make sure you understand the precedence when working with these values!! + allowusers: vagrant + allowgroups: vagrant + denyusers: root + denygroups: root + +# Workarounds for Docker +rhel8cis_skip_for_travis: true +rhel8cis_selinux_disable: true diff --git a/Linux/ansible-lockdown/RHEL8-CIS/group_vars/vagrant b/Linux/ansible-lockdown/RHEL8-CIS/group_vars/vagrant new file mode 100644 index 0000000..899e6bd --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/group_vars/vagrant @@ -0,0 +1,28 @@ +--- +ansible_user: vagrant +# AIDE cron settings +rhel8cis_aide_cron: + cron_user: root + cron_file: /var/spool/cron/root + aide_job: '/usr/sbin/aide --check' + aide_minute: 0 + aide_hour: 5 + aide_day: '*' + aide_month: '*' + aide_weekday: '*' + +rhel8cis_sshd: + clientalivecountmax: 3 + clientaliveinterval: 300 + ciphers: 'aes256-ctr,aes192-ctr,aes128-ctr' + macs: 'hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,umac-128@openssh.com' + logingracetime: 60 + # - make sure you understand the precedence when working with these values!! + allowusers: vagrant + allowgroups: vagrant + denyusers: root + denygroups: root + +# Vagrant can touch code that Docker cannot +rhel8cis_skip_for_travis: false +rhel8cis_selinux_disable: false diff --git a/Linux/ansible-lockdown/RHEL8-CIS/handlers/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/handlers/main.yml new file mode 100644 index 0000000..fe7b5da --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/handlers/main.yml @@ -0,0 +1,106 @@ +--- +# handlers file for RHEL8-CIS + +- name: sysctl flush ipv4 route table + become: yes + sysctl: + name: net.ipv4.route.flush + value: '1' + sysctl_set: yes + ignore_errors: yes + when: ansible_virtualization_type != "docker" + tags: + - skip_ansible_lint + +- name: sysctl flush ipv6 route table + become: yes + sysctl: + name: net.ipv6.route.flush + value: '1' + sysctl_set: yes + when: ansible_virtualization_type != "docker" + +- name: systemd restart tmp.mount + become: yes + systemd: + name: tmp.mount + daemon_reload: yes + enabled: yes + masked: no + state: reloaded + +- name: systemd restart var-tmp.mount + become: yes + systemd: + name: var-tmp.mount + daemon_reload: yes + enabled: yes + masked: no + state: reloaded + +- name: remount tmp + command: mount -o remount /tmp + args: + warn: false + +- name: restart firewalld + become: yes + service: + name: firewalld + state: restarted + +- name: restart xinetd + become: yes + service: + name: xinetd + state: restarted + +- name: restart sshd + become: yes + service: + name: sshd + state: restarted + +- name: restart postfix + become: yes + service: + name: postfix + state: restarted + +- name: reload dconf + become: yes + command: dconf update + +- name: restart auditd + command: /sbin/service auditd restart + changed_when: no + check_mode: no + failed_when: no + args: + warn: no + when: + - not rhel8cis_skip_for_travis + tags: + - skip_ansible_lint + +- name: grub2cfg + command: "grub2-mkconfig -o {{ grub_cfg.stat.lnk_source }}" + ignore_errors: True + tags: + - skip_ansible_lint + +- name: restart rsyslog + become: yes + service: + name: rsyslog + state: restarted + +- name: restart syslog-ng + become: yes + service: + name: syslog-ng + state: restarted + +- name: systemd_daemon_reload + systemd: + daemon-reload: yes diff --git a/Linux/ansible-lockdown/RHEL8-CIS/local.yml b/Linux/ansible-lockdown/RHEL8-CIS/local.yml new file mode 100644 index 0000000..8c2a9f4 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/local.yml @@ -0,0 +1,12 @@ +--- + +- hosts: localhost + connection: local + become: true + vars: + is_container: false + + roles: + - role: "{{ playbook_dir }}" + rhel8cis_system_is_container: "{{ is_container | default(false) }}" + rhel8cis_skip_for_travis: false diff --git a/Linux/ansible-lockdown/RHEL8-CIS/meta/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/meta/main.yml new file mode 100644 index 0000000..797cd40 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/meta/main.yml @@ -0,0 +1,18 @@ +--- +galaxy_info: + author: "Sam Doran, Josh Springer, Daniel Shepherd, Bas Meijeri, James Cassell, Mike Renfro, DFed, George Nalen, Mark Bolwell" + description: "Apply the DISA RHEL 8 CIS" + company: "MindPoint Group" + license: MIT + role_name: rhel8_cis + min_ansible_version: 2.9.0 + platforms: + - name: EL + versions: + - 8 + galaxy_tags: + - system + - security + - cis + - hardening +dependencies: [] diff --git a/Linux/ansible-lockdown/RHEL8-CIS/site.yml b/Linux/ansible-lockdown/RHEL8-CIS/site.yml new file mode 100644 index 0000000..81bc465 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/site.yml @@ -0,0 +1,11 @@ +--- +- hosts: all + become: true + vars: + is_container: false + + roles: + + - role: "{{ playbook_dir }}" + rhel8cis_system_is_container: "{{ is_container | default(false) }}" + rhel8cis_skip_for_travis: false diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/LE_audit_setup.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/LE_audit_setup.yml new file mode 100644 index 0000000..61a4cdf --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/LE_audit_setup.yml @@ -0,0 +1,22 @@ +--- + +- name: Download audit binary + get_url: + url: "{{ goss_url }}" + dest: "{{ audit_bin }}" + owner: root + group: root + checksum: "{{ goss_version.checksum }}" + mode: 0555 + when: + - get_goss_file == 'download' + +- name: copy audit binary + copy: + src: + dest: "{{ audit_bin }}" + mode: 0555 + owner: root + group: root + when: + - get_goss_file == 'copy' diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/audit_homedirperms.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/audit_homedirperms.yml new file mode 100644 index 0000000..489cd52 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/audit_homedirperms.yml @@ -0,0 +1,46 @@ +--- +- name: "SCORED | 6.2.8 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + find: + paths: + - "{{ homedir }}" + recurse: true + file_type: any + register: rhel_08_6_2_8_results + when: + - rhel8cis_rule_6_2_8|bool + tags: + - level1 + - patch + - rule_6.2.8 + +- name: "SCORED | 6.2.8 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + file: + path: "{{ line_item.path }}" + mode: 0640 + loop: "{{ rhel_08_6_2_8_results.files }}" + loop_control: + label: "{{ line_item.path }}" + loop_var: line_item + when: + - rhel_08_6_2_8_results.files.isreg is defined + - rhel8cis_rule_6_2_8|bool + tags: + - level1 + - patch + - rule_6.2.8 + +- name: "SCORED | 6.2.8 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + file: + path: "{{ line_item.path }}" + mode: 0750 + loop: "{{ rhel_08_6_2_8_results.files }}" + loop_control: + label: "{{ line_item.path }}" + loop_var: line_item + when: + - rhel_08_6_2_8_results.files.isdir is defined + - rhel8cis_rule_6_2_8|bool + tags: + - level1 + - patch + - rule_6.2.8 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/check_prereqs.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/check_prereqs.yml new file mode 100644 index 0000000..5ce4ab4 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/check_prereqs.yml @@ -0,0 +1,36 @@ +--- + +- name: "PREREQ | Add the required packages | Python 3" + block: + - name: Check if python36-rpm package installed + command: rpm -q python36-rpm + failed_when: ( python36_rpm_present.rc not in [ 0, 1 ] ) + changed_when: false + args: + warn: false + register: python36_rpm_present + + - name: Add the EPEL repository required for the python36-rpm pkg + package: + name: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm + state: present + register: epel_installed + when: + - python36_rpm_present.rc != '0' + + - name: "PREREQ | Check required packages installed | Python3 " + package: + name: "{{ item }}" + state: present + register: python3reqs_installed + loop: + - python36-rpm + - libselinux-python3 + + - name: Disable Epel repo if installed earlier + command: yum-config-manager disable epel + when: epel_installed.changed + when: + - ( ansible_python.version.major == 3 and ansible_python.version.minor == 6 ) + vars: + ansible_python_interpreter: "{{ python2_bin }}" diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/main.yml new file mode 100644 index 0000000..6595b05 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/main.yml @@ -0,0 +1,111 @@ +--- +# tasks file for RHEL8-CIS +- name: Check OS version and family + fail: + msg: "This role can only be run against RHEL 8. {{ ansible_distribution }} {{ ansible_distribution_major_version }} is not supported." + when: + - ansible_os_family == 'RedHat' + - ansible_distribution_major_version is version_compare('8', '!=') + tags: + - always + +- name: Check ansible version + fail: + msg: You must use ansible 2.9 or greater + when: not ansible_version.full is version_compare('2.9', '>=') + tags: + - always + +- name: Check crypto-policy input + assert: + that: rhel8cis_crypto_policy in rhel8cis_allowed_crypto_policies + +- name: Check rhel8cis_bootloader_password_hash variable has been changed + assert: + that: rhel8cis_bootloader_password_hash != 'grub.pbkdf2.sha512.changethispassword' + msg: "This role will not be able to run single user password commands as rhel8cis_bootloader_password_hash variable has not been set" + when: + - rhel8cis_set_boot_pass + - rhel8cis_rule_1_5_2 + +- name: "check sugroup exists if used" + block: + - name: "Check su group exists if defined" + shell: grep -w "{{ rhel8cis_sugroup }}" /etc/group + register: sugroup_exists + changed_when: false + failed_when: sugroup_exists.rc >= 2 + tags: + - skip_ansible_lint + + - name: Check sugroup if defined exists before continuing + assert: + that: sugroup_exists.rc == 0 + msg: "The variable rhel8cis_sugroup is defined but does not exist please rectify" + when: + - rhel8cis_sugroup is defined + - rhel8cis_rule_5_7 + tags: + - rule_5.7 + +- include: prelim.yml + become: yes + tags: + - prelim_tasks + - always + +- import_tasks: pre_remediation_audit.yml + when: + - run_audit + +- name: Gather the package facts + package_facts: + manager: auto + tags: + - always + +- include: parse_etc_password.yml + become: yes + when: rhel8cis_section6 + +- include: section_1/main.yml + become: yes + when: rhel8cis_section1 + tags: + - rhel8cis_section1 + +- include: section_2/main.yml + become: yes + when: rhel8cis_section2 + +- include: section_3/main.yml + become: yes + when: rhel8cis_section3 + +- include: section_4/main.yml + become: yes + when: rhel8cis_section4 + +- include: section_5/main.yml + become: yes + when: rhel8cis_section5 + +- include: section_6/main.yml + become: yes + when: rhel8cis_section6 + +- include: post.yml + become: yes + tags: + - post_tasks + - always + +- import_tasks: post_remediation_audit.yml + when: + - run_audit + +- name: Show Audit Summary + debug: + msg: "{{ audit_results.split('\n') }}" + when: + - run_audit diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/parse_etc_password.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/parse_etc_password.yml new file mode 100644 index 0000000..e66123f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/parse_etc_password.yml @@ -0,0 +1,32 @@ +--- + +- name: "PRELIM | 5.5.2 | 6.2.7 | 6.2.8 | 6.2.20 | Parse /etc/passwd" + block: + - name: "PRELIM | 5.5.2 | 6.2.7 | 6.2.8 | 6.2.20 | Parse /etc/passwd" + command: cat /etc/passwd + changed_when: no + check_mode: no + register: rhel8cis_passwd_file_audit + + - name: "PRELIM | 5.5.2 | 6.2.7 | 6.2.8 | 6.2.20 | Split passwd entries" + set_fact: + rhel8cis_passwd: "{{ rhel8cis_passwd_file_audit.stdout_lines | map('regex_replace', ld_passwd_regex, ld_passwd_yaml) | map('from_yaml') | list }}" + with_items: "{{ rhel8cis_passwd_file_audit.stdout_lines }}" + vars: + ld_passwd_regex: >- + ^(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*) + ld_passwd_yaml: | + id: >-4 + \g + password: >-4 + \g + uid: \g + gid: \g + gecos: >-4 + \g + dir: >-4 + \g + shell: >-4 + \g + tags: + - always diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/post.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/post.yml new file mode 100644 index 0000000..3c3ced3 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/post.yml @@ -0,0 +1,15 @@ +--- +# Post tasks + +- name: Perform DNF package cleanup + dnf: + autoremove: yes + changed_when: no + +- name: flush handlers + meta: flush_handlers + +- name: Reboot host + reboot: + when: + - not rhel8cis_skip_reboot diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/post_remediation_audit.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/post_remediation_audit.yml new file mode 100644 index 0000000..17ef3f8 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/post_remediation_audit.yml @@ -0,0 +1,43 @@ +--- + +- name: "Post Audit | Run post_remediation {{ benchmark }} audit" + shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ post_audit_outfile }} -g {{ group_names }}" + vars: + warn: false + +- name: Post Audit | ensure audit files readable by users + file: + path: "{{ item }}" + mode: 0644 + state: file + loop: + - "{{ post_audit_outfile }}" + - "{{ pre_audit_outfile }}" + +- name: Post Audit | Capture audit data if json format + block: + - name: "capture data {{ post_audit_outfile }}" + command: "cat {{ post_audit_outfile }}" + register: post_audit + changed_when: false + + - name: Capture post-audit result + set_fact: + post_audit_summary: "{{ post_audit.stdout | from_json |json_query(summary) }}" + vars: + summary: 'summary."summary-line"' + when: + - audit_format == "json" + +- name: Post Audit | Capture audit data if documentation format + block: + - name: "Post Audit | capture data {{ post_audit_outfile }}" + command: "tail -2 {{ post_audit_outfile }}" + register: post_audit + changed_when: false + + - name: Post Audit | Capture post-audit result + set_fact: + post_audit_summary: "{{ post_audit.stdout_lines }}" + when: + - audit_format == "documentation" diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/pre_remediation_audit.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/pre_remediation_audit.yml new file mode 100644 index 0000000..7872862 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/pre_remediation_audit.yml @@ -0,0 +1,118 @@ +--- + +- name: Pre Audit | Setup the audit + include_tasks: LE_audit_setup.yml + when: + - setup_audit + tags: + - setup_audit + +- name: "Pre Audit | Ensure {{ audit_conf_dir }} exists" + file: + path: "{{ audit_conf_dir }}" + state: directory + mode: '0755' + +- name: Pre Audit | If using git for content set up + block: + - name: Pre Audit | Install git (rh8 python3) + package: + name: git + state: present + when: ansible_distribution_major_version == 8 + + - name: Pre Audit | Install git (rh7 python2) + package: + name: git + state: present + vars: + ansible_python_interpreter: "{{ python2_bin }}" + when: ansible_distribution_major_version == 7 + + - name: Pre Audit | retrieve audit content files from git + git: + repo: "{{ audit_file_git }}" + dest: "{{ audit_conf_dir }}" + version: "{{ audit_git_version }}" + when: + - audit_content == 'git' + +- name: Pre Audit | copy to audit content files to server + copy: + src: "{{ audit_local_copy }}" + dest: "{{ audit_conf_dir }}" + mode: 0644 + when: + - audit_content == 'copy' + +- name: Pre Audit | get audit content from url + get_url: + url: "{{ audit_files_url }}" + dest: "{{ audit_conf_dir }}" + when: + - audit_content == 'get_url' + +- name: Pre Audit | Check Goss is available + block: + - name: Pre Audit | Check for goss file + stat: + path: "{{ audit_bin }}" + register: goss_available + + - name: Pre Audit | If audit ensure goss is available + assert: + msg: "Audit has been selected: unable to find goss binary at {{ audit_bin }}" + when: + - not goss_available.stat.exists + when: + - run_audit + +- name: "Pre Audit | Check whether machine is UEFI-based" + stat: + path: /sys/firmware/efi + register: rhel8_efi_boot + tags: + - goss_template + +- name: Pre Audit | Copy ansible default vars values to test audit + template: + src: ansible_vars_goss.yml.j2 + dest: "{{ audit_vars_path }}" + mode: 0600 + when: + - run_audit + tags: + - goss_template + +- name: "Pre Audit | Run pre_remediation {{ benchmark }} audit" + shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ pre_audit_outfile }} -g {{ group_names }}" + vars: + warn: false + +- name: Pre Audit | Capture audit data if json format + block: + - name: "Pre Audit | capture data {{ pre_audit_outfile }}" + command: "cat {{ pre_audit_outfile }}" + register: pre_audit + changed_when: false + + - name: Pre Audit | Capture pre-audit result + set_fact: + pre_audit_summary: "{{ pre_audit.stdout | from_json |json_query(summary) }}" + vars: + summary: 'summary."summary-line"' + when: + - audit_format == "json" + +- name: Pre Audit | Capture audit data if documentation format + block: + - name: "Pre Audit | capture data {{ pre_audit_outfile }}" + command: "tail -2 {{ pre_audit_outfile }}" + register: pre_audit + changed_when: false + + - name: Pre Audit | Capture pre-audit result + set_fact: + pre_audit_summary: "{{ pre_audit.stdout_lines }}" + when: + - audit_format == "documentation" diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/prelim.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/prelim.yml new file mode 100644 index 0000000..885cd40 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/prelim.yml @@ -0,0 +1,110 @@ +--- +# Preliminary tasks that should always be run +# List users in order to look files inside each home directory +- name: "PRELIM | List users accounts" + command: "awk -F: '{print $1}' /etc/passwd" + args: + warn: no + changed_when: no + check_mode: no + register: users + +- name: "PRELIM | Gather accounts with empty password fields" + shell: "cat /etc/shadow | awk -F: '($2 == \"\" ) {j++;print $1; } END {exit j}'" + args: + warn: no + changed_when: no + check_mode: no + register: empty_password_accounts + +- name: "PRELIM | Gather UID 0 accounts other than root" + shell: "cat /etc/passwd | awk -F: '($3 == 0 && $1 != \"root\") {i++;print $1 } END {exit i}'" + args: + warn: no + changed_when: no + check_mode: no + register: uid_zero_accounts_except_root + +- name: "PRELIM | Gather system-wide crypto-policy" + shell: update-crypto-policies --show + args: + warn: no + changed_when: no + check_mode: no + register: system_wide_crypto_policy + +- name: "PRELIM | if systemd coredump" + stat: + path: /etc/systemd/coredump.conf + register: systemd_coredump + when: + - rhel8cis_rule_1_6_1 + +- name: "PRELIM | Section 1.1 | Create list of mount points" + set_fact: + mount_names: "{{ ansible_mounts | map(attribute='mount') | list }}" + +- name: "PRELIM | Section 4.1 | Configure System Accounting (auditd)" + package: + name: audit + state: present + when: rhel8cis_level_2 + +- name: "PRELIM | Section 5.1 | Configure cron" + package: + name: cronie + state: present + +- name: "PRELIM | Install authconfig" + package: + name: authconfig + state: present + when: + - rhel8cis_use_authconfig + - rhel8cis_rule_5_3_1 or + rhel8cis_rule_5_3_2 or + rhel8cis_rule_5_3_3 + +- name: "PRELIM | Set facts based on boot type" + block: + - name: "PRELIM | Check whether machine is UEFI-based" + stat: + path: /sys/firmware/efi + register: rhel_08_efi_boot + + - name: "PRELIM | AUDIT | set legacy boot and grub path | Bios" + set_fact: + rhel8cis_legacy_boot: true + grub2_path: /etc/grub2.cfg + when: not rhel_08_efi_boot.stat.exists + + - name: "PRELIM | set grub fact | UEFI" + set_fact: + grub2_path: /etc/grub2-efi.cfg + when: rhel_08_efi_boot.stat.exists + +# - name: debug legacy boot var +# debug: +# msg: | +# legacy_boot={{ rhel8cis_legacy_boot }} +# grub2_path={{ grub2_path }} + +- name: "PRELIM | AUDIT | Ensure permissions on bootloader config are configured | Get grub config file stats" + stat: + path: "{{ grub2_path }}" + changed_when: false + register: grub_cfg + +# - name: debug grub stat +# debug: +# var: grub_cfg.stat + +- name: "PRELIM | Check for rhnsd service" + shell: "systemctl show rhnsd | grep LoadState | cut -d = -f 2" + changed_when: false + check_mode: false + register: rhnsd_service_status + when: + - rhel8cis_rule_1_2_2 + tags: + - skip_ansible_lint diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.1.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.1.1.x.yml new file mode 100644 index 0000000..b6dffa9 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.1.1.x.yml @@ -0,0 +1,102 @@ +--- + +- name: "1.1.1.1 | L1 | PATCH | Ensure mounting of cramfs filesystems is disabled" + block: + - name: "1.1.1.1 | L1 | PATCH | Ensure mounting of cramfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install cramfs(\\s|$)" + line: "install cramfs /bin/true" + create: yes + mode: 0600 + + - name: "1.1.1.1 | L1 | PATCH | Ensure mounting of cramfs filesystems is disabled | Disable cramfs" + modprobe: + name: cramfs + state: absent + when: ansible_connection != 'docker' + when: + - rhel8cis_rule_1_1_1_1 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_1.1.1.1 + - cramfs + +- name: "1.1.1.2 | L2 | PATCH | Ensure mounting of vFAT filesystems is limited" + block: + - name: "1.1.1.2 | L2 | PATCH | Ensure mounting of vFAT filesystems is limited | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install vfat(\\s|$)" + line: "install vfat /bin/true" + create: yes + mode: 0600 + + - name: "1.1.1.2 | L2 | PATCH | Ensure mounting of vFAT filesystems is limited | Disable vFAT" + modprobe: + name: vfat + state: absent + when: ansible_connection != 'docker' + when: + - rhel8cis_rule_1_1_1_2 + - rhel8cis_legacy_boot + tags: + - level2-server + - level2-workstation + - scored + - patch + - rule_1.1.1.2 + - vfat + +- name: "1.1.1.3 | L1 | PATCH | Ensure mounting of squashfs filesystems is disabled" + block: + - name: "1.1.1.3 | L1 | PATCH | Ensure mounting of squashfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install squashfs(\\s|$)" + line: "install squashfs /bin/true" + create: yes + mode: 0600 + + - name: "1.1.1.3 | L1 | PATCH | Ensure mounting of squashfs filesystems is disabled | Disable squashfs" + modprobe: + name: squashfs + state: absent + when: ansible_connection != 'docker' + when: + - rhel8cis_rule_1_1_1_3 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_1.1.1.3 + - squashfs + +- name: "1.1.1.4 | L1 | PATCH | Ensure mounting of udf filesystems is disabled" + block: + - name: "1.1.1.4 | L1 | PATCH | Ensure mounting of udf filesystems is disable | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install udf(\\s|$)" + line: "install udf /bin/true" + create: yes + mode: 0600 + + - name: "1.1.1.4 | L1 | PATCH | Ensure mounting of udf filesystems is disable | Disable udf" + modprobe: + name: udf + state: absent + when: ansible_connection != 'docker' + when: + - rhel8cis_rule_1_1_1_4 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_1.1.1.4 + - udf diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.1.x.yml new file mode 100644 index 0000000..d461800 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.1.x.yml @@ -0,0 +1,363 @@ +--- + +- name: | + "SCORED | 1.1.2 | PATCH | Ensure /tmp is configured" + "SCORED | 1.1.3 | PATCH | Ensure nodev option set on /tmp partition" + "SCORED | 1.1.4 | PATCH | Ensure nosuid option set on /tmp partition" + "SCORED | 1.1.5 | PATCH | Ensure noexec option set on /tmp partition" + "via fstab" + mount: + name: /tmp + src: "{{ item.device }}" + fstype: "{{ item.fstype }}" + state: present + opts: defaults,{% if rhel8cis_rule_1_1_3 %}noexec,{% endif %}{% if rhel8cis_rule_1_1_4 %}nodev,{% endif %}{% if rhel8cis_rule_1_1_5 %}nosuid{% endif %} + notify: remount tmp + loop: "{{ ansible_mounts }}" + when: + - item.mount == "/tmp" + - not rhel8cis_tmp_svc + - rhel8cis_rule_1_1_2 or + rhel8cis_rule_1_1_3 or + rhel8cis_rule_1_1_4 or + rhel8cis_rule_1_1_5 + tags: + - level1-server + - level1-workstation + - scored + - patch + - mounts + - rule_1.1.3 + - rule_1.1.4 + - rule_1.1.5 + +- name: | + "SCORED | 1.1.2 | PATCH | Ensure /tmp is configured" + "SCORED | 1.1.3 | PATCH | Ensure nodev option set on /tmp partition" + "SCORED | 1.1.4 | PATCH | Ensure nosuid option set on /tmp partition" + "SCORED | 1.1.5 | PATCH | Ensure noexec option set on /tmp partition" + "via systemd" + template: + src: etc/systemd/system/tmp.mount.j2 + dest: /etc/systemd/system/tmp.mount + owner: root + group: root + mode: 0644 + notify: systemd restart tmp.mount + when: + - rhel8cis_tmp_svc + - rhel8cis_rule_1_1_2 or + rhel8cis_rule_1_1_3 or + rhel8cis_rule_1_1_4 or + rhel8cis_rule_1_1_5 + tags: + - level1-server + - level1-workstation + - scored + - patch + - mounts + - rule_1.1.3 + - rule_1.1.4 + - rule_1.1.5 + +- name: "1.1.6 | L2 | AUDIT | Ensure separate partition exists for /var" + block: + - name: "1.1.6 | L2 | AUDIT | Ensure separate partition exists for /var | Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: var_mount_absent + changed_when: var_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.6 | L2 | AUDIT | Ensure separate partition exists for /var | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + when: + - required_mount in mount_names + vars: + required_mount: '/var' + when: + - rhel8cis_rule_1_1_6 + tags: + - level2-server + - level2-workstation + - scored + - patch + - mounts + - rule_1.1.6 + +- name: "1.1.7 | L2 | AUDIT | Ensure separate partition exists for /var/tmp | skips if mount absent" + block: + - name: "1.1.7 | L2 | AUDIT | Ensure separate partition exists for /var/tmp | Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: var_tmp_mount_absent + changed_when: var_tmp_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.7 | L2 | AUDIT | Ensure separate partition exists for /var/tmp | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + register: var_tmp_mount_present + when: + - required_mount in mount_names + vars: + required_mount: '/var/tmp' + when: + - rhel8cis_rule_1_1_7 + tags: + - level2-server + - level2-workstation + - scored + - audit + - mounts + - rule_1.1.7 + +- name: | + "1.1.8 | L1 | PATCH | Ensure nodev option set on /var/tmp partition | skips if mount absent" + "1.1.9 | L1 | PATCH | Ensure nosuid option set on /var/tmp partition | skips if mount absent" + "1.1.10 | L1 | PATCH | Ensure noexec option set on /var/tmp partition | skips if mount absent" + mount: + name: /var/tmp + src: "{{ item.device }}" + fstype: "{{ item.fstype }}" + state: present + opts: defaults,{% if rhel8cis_rule_1_1_10 %}noexec,{% endif %}{% if rhel8cis_rule_1_1_8 %}nodev,{% endif %}{% if rhel8cis_rule_1_1_9 %}nosuid{% endif %} + loop: "{{ ansible_mounts }}" + when: + - var_tmp_mount_present is defined + - item.mount == "/var/tmp" + - rhel8cis_rule_1_1_7 # This is required so the check takes place + - rhel8cis_rule_1_1_8 or + rhel8cis_rule_1_1_9 or + rhel8cis_rule_1_1_10 + tags: + - level1-server + - level1-workstation + - scored + - patch + - mounts + - skip_ansible_lint + +- name: "1.1.11 | L2 | AUDIT | Ensure separate partition exists for /var/log" + block: + - name: "1.1.11 | L2 | AUDIT | Ensure separate partition exists for /var/log | Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: var_log_mount_absent + changed_when: var_log_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.11 | L2 | AUDIT | Ensure separate partition exists for /var/log | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + when: + - required_mount in mount_names + vars: + required_mount: '/var/log' + when: + - rhel8cis_rule_1_1_11 + tags: + - level2-server + - level2-workstation + - scored + - audit + - mounts + - rule_1.1.11 + - skip_ansible_lint + +- name: "1.1.12 | L2 | AUDIT | Ensure separate partition exists for /var/log/audit" + block: + - name: "1.1.12 | L2 | AUDIT | Ensure separate partition exists for /var/log/audit | Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: var_log_audit_mount_absent + changed_when: var_log_audit_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.12 | L2 | AUDIT | Ensure separate partition exists for /var/log/audit | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + when: + - required_mount in mount_names + vars: + required_mount: '/var/log/audit' + when: + - rhel8cis_rule_1_1_12 + tags: + - level2-server + - level2-workstation + - scored + - audit + - mounts + - rule_1.1.12 + + +- name: "1.1.13 | L2 | AUDIT | Ensure separate partition exists for /home" + block: + - name: "1.1.13 | L2 | AUDIT | Ensure separate partition exists for /home | Absent" + debug: + msg: "Warning! {{ required_mount }} doesn't exist. This is a manual task" + register: home_mount_absent + changed_when: home_mount_absent.skipped is undefined + when: + - required_mount not in mount_names + - name: "1.1.13 | L2 | AUDIT | Ensure separate partition exists for /home | Present" + debug: + msg: "Congratulations: {{ required_mount }} exists." + register: home_mount_present + when: + - required_mount in mount_names + vars: + required_mount: '/home' + when: + - rhel8cis_rule_1_1_13 + tags: + - level2-server + - level2-workstation + - scored + - audit + - mounts + - rule_1.1.13 + - skip_ansible_lint + +- name: "1.1.14 | L1 | PATCH | Ensure nodev option set on /home partition | skips if mount absent" + mount: + name: /home + src: "{{ item.device }}" + fstype: "{{ item.fstype }}" + state: present + opts: defaults,{% if rhel8cis_rule_1_1_14 %}nodev{% endif %} + loop: "{{ ansible_mounts }}" + when: + - home_mount_present is defined + - item.mount == "/home" + - rhel8cis_rule_1_1_14 + tags: + - level1-server + - level1-workstation + - scored + - patch + - mounts + - rule_1.1.13 + - skip_ansible_lint + +- name: | + "1.1.15 | L1 | PATCH | Ensure nodev option set on /dev/shm partition | skips if mount absent + 1.1.16 | L1 | PATCH | Ensure nosuid option set on /dev/shm partition | skips if mount absent + 1.1.17 | L1 | PATCH | Ensure noexec option set on /dev/shm partition | skips if mount absent" + block: + - name: | + "1.1.15 | L1 | AUDIT | Ensure nodev option set on /dev/shm partition | Check for /dev/shm existence + 1.1.16 | L1 | AUDIT | Ensure nosuid option set on /dev/shm partition | Check for /dev/shm existence + 1.1.17 | L1 | AUDIT | Ensure noexec option set on /dev/shm partition | Check for /dev/shm existence" + shell: mount -l | grep -E '\s/dev/shm\s' + changed_when: false + failed_when: false + check_mode: no + register: rhel8cis_1_1_15_dev_shm_status + + - name: | + "1.1.15 | L1 | PATCH | Ensure nodev option set on /dev/shm partition | skips if mount absent + 1.1.16 | L1 | PATCH | Ensure nosuid option set on /dev/shm partition | skips if mount absent + 1.1.17 | L1 | PATCH | Ensure noexec option set on /dev/shm partition | skips if mount absent" + mount: + name: /dev/shm + src: tmpfs + fstype: tmpfs + state: mounted + opts: defaults,{% if rhel8cis_rule_1_1_17 %}noexec,{% endif %}{% if rhel8cis_rule_1_1_15 %}nodev,{% endif %}{% if rhel8cis_rule_1_1_16 %}nosuid{% endif %} + when: "'dev/shm' in rhel8cis_1_1_15_dev_shm_status.stdout" + when: + - rhel8cis_rule_1_1_15 or + rhel8cis_rule_1_1_16 or + rhel8cis_rule_1_1_17 + tags: + - level1-server + - level1-workstation + - scored + - patch + - mounts + - rule_1.1.15 + - rule_1.1.16 + - rule_1.1.17 + +- name: | + "1.1.18 | L1 | PATCH | Ensure nodev option set on removable media partitions" + "1.1.19 | L1 | PATCH | Ensure nosuid option set on removable media partitions" + "1.1.20 | L1 | PATCH | Ensure noexec option set on removable media partitions" + debug: + msg: "--> Not relevant" + changed_when: false + when: + - rhel8cis_rule_1_1_18 or + rhel8cis_rule_1_1_19 or + rhel8cis_rule_1_1_20 + tags: + - level1-server + - level1-workstation + - notscored + - audit + - mounts + - rule_1.1.18 + - rule_1.1.19 + - rule_1.1.20 + +- name: "1.1.21 | L1 | PATCH | Ensure sticky bit is set on all world-writable directories" + shell: df --local -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type d -perm -0002 2>/dev/null | xargs chmod a+t + args: + warn: no + changed_when: false + failed_when: false + when: + - rhel8cis_rule_1_1_21 + tags: + - skip_ansible_lint + - level1-server + - level1-workstation + - patch + - stickybits + - permissons + - rule_1.1.21 + +- name: "1.1.22 | L1 | PATCH | Disable Automounting" + service: + name: autofs + enabled: no + when: + - not rhel8cis_allow_autofs + - "'autofs' in ansible_facts.packages" + - rhel8cis_rule_1_1_22 + tags: + - level1-server + - level2-workstation + - patch + - mounts + - automounting + - rule_1.1.22 + +- name: "1.1.23 | L1 | PATCH | Disable USB Storage" + block: + - name: "1.1.23 | L1 | PATCH | Disable USB Storage | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install usb-storage(\\s|$)" + line: "install usb-storage /bin/true" + create: yes + owner: root + group: root + mode: 0600 + + - name: "1.1.23 | L1 | PATCH | Disable USB Storage | Edit modprobe config" + modprobe: + name: usb-storage + state: absent + when: + - rhel8cis_rule_1_1_23 + tags: + - level1-server + - level2-workstation + - patch + - mounts + - removable_storage + - rule_1.1.23 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.10.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.10.yml new file mode 100644 index 0000000..dd0f9b1 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.10.yml @@ -0,0 +1,15 @@ +--- + +- name: "1.10 | L1 | PATCH | Ensure system-wide crypto policy is not legacy" + shell: | + update-crypto-policies --set "{{ rhel8cis_crypto_policy }}" + update-crypto-policies + when: + - rhel8cis_rule_1_10 + - system_wide_crypto_policy['stdout'] == 'LEGACY' + tags: + - level1-server + - level1-workstation + - no system_is_ec2 + - patch + - rule_1.10 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.11.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.11.yml new file mode 100644 index 0000000..bd10e7a --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.11.yml @@ -0,0 +1,15 @@ +--- + +- name: "1.11 | L2 | PATCH | Ensure system-wide crypto policy is FUTURE or FIPS" + shell: | + update-crypto-policies --set "{{ rhel8cis_crypto_policy }}" + update-crypto-policies + when: + - rhel8cis_rule_1_11 + - system_wide_crypto_policy['stdout'] not in rhel8cis_allowed_crypto_policies + tags: + - level2-server + - level2-workstation + - not system_is_ec2 + - patch + - rule_1.11 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.2.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.2.x.yml new file mode 100644 index 0000000..a25b246 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.2.x.yml @@ -0,0 +1,100 @@ +--- + +- name: "1.2.1 | L1 | PATCH | Ensure Red Hat Subscription Manager connection is configured" + redhat_subscription: + state: present + username: "{{ rhel8cis_rh_sub_user }}" + password: "{{ rhel8cis_rh_sub_password }}" + auto_attach: true + no_log: true + when: + - ansible_distribution == "RedHat" + - rhel8cis_rhnsd_required + - rhel8cis_rule_1_2_1 + tags: + - level1-server + - level1-workstation + - notscored + - patch + - rule_1.2.1 + - skip_ansible_lint # Added as no_log still errors on ansuible-lint + +- name: "1.2.2 | L1 | PATCH | Disable the rhnsd Daemon" + service: + name: rhnsd + state: stopped + enabled: no + masked: true + when: + - ansible_distribution == "RedHat" + - rhnsd_service_status.stdout == "loaded" and not rhel8cis_rhnsd_required + - rhel8cis_rule_1_2_2 + tags: + - level1-server + - level1-workstation + - notscored + - patch + - rule_1.2.2 + +- name: "1.2.3 | L1 | AUDIT | Ensure GPG keys are configured" + command: gpg --quiet --with-fingerprint /etc/pki/rpm-gpg/RPM-GPG-KEY-{{ ansible_distribution|lower }}-release + when: + - rhel8cis_rule_1_2_3 + - ansible_distribution == "RedHat" + tags: + - level1-server + - level1-workstation + - notscored + - patch + - rule_1.2.3 + +- name: "1.2.4 | L1 | PATCH | Ensure gpgcheck is globally activated" + block: + - name: "1.2.4 | L1 | AUDIT | Ensure gpgcheck is globally activated | Find repos" + find: + paths: /etc/yum.repos.d + patterns: "*.repo" + register: yum_repos + changed_when: false + + - name: "1.2.4 | L1 | PATCH | Ensure gpgcheck is globally activated | Update yum.repos" + replace: + name: "{{ item.path }}" + regexp: "^gpgcheck=0" + replace: "gpgcheck=1" + with_items: + - "{{ yum_repos.files }}" + when: + - rhel8cis_rule_1_2_4 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_1.2.4 + +- name: "1.2.5 | L1 | Ensure package manager repositories are configured" + block: + - name: "1.2.5 | L1 | AUDIT | Ensure package manager repositories are configured | Get repo list" + command: dnf repolist + changed_when: false + failed_when: false + register: dnf_configured + check_mode: no + args: + warn: false + + - name: "1.2.5 | L1 | AUDIT | Ensure package manager repositories are configured | Display repo list" + debug: + msg: + - "Alert! Below are the configured repos. Please review and make sure all align with site policy" + - "{{ dnf_configured.stdout_lines }}" + when: + - rhel8cis_rule_1_2_5 + tags: + - level1-server + - level1-workstation + - notscored + - patch + - rule_1.2.5 + - skip_ansible_lint diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.3.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.3.x.yml new file mode 100644 index 0000000..6b2826f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.3.x.yml @@ -0,0 +1,44 @@ +--- + +- name: "1.3.1 | L1 | PATCH | Ensure sudo is installed" + package: + name: sudo + state: present + when: + - rhel8cis_rule_1_3_1 + tags: + - level1-server + - level1-workstation + - scored + - sudo + - patch + - rule_1.3.1 + +- name: "1.3.2 | L1 | PATCH | Ensure sudo commands use pty" + lineinfile: + dest: /etc/sudoers + line: "Defaults use_pty" + state: present + when: + - rhel8cis_rule_1_3_2 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_1.3.2 + +- name: "1.3.3 | L1 | PATCH | Ensure sudo log file exists" + lineinfile: + dest: /etc/sudoers + regexp: '^Defaults logfile=' + line: 'Defaults logfile="{{ rhel8cis_varlog_location }}"' + state: present + when: + - rhel8cis_rule_1_3_3 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_1.3.3 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.4.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.4.x.yml new file mode 100644 index 0000000..7eff30a --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.4.x.yml @@ -0,0 +1,50 @@ +--- + +- name: "1.4.1 | L1 | PATCH | Ensure AIDE is installed" + block: + - name: "1.4.1 | L1 | PATCH | Ensure AIDE is installed | Install AIDE" + package: + name: aide + state: present + + - name: "1.4.1 | L1 | PATCH | Ensure AIDE is installed | Configure AIDE" + command: /usr/sbin/aide --init -B 'database_out=file:/var/lib/aide/aide.db.gz' + changed_when: false + failed_when: false + async: 45 + poll: 0 + args: + creates: /var/lib/aide/aide.db.gz + when: not ansible_check_mode + when: + - rhel8cis_config_aide + - rhel8cis_rule_1_4_1 + tags: + - level1-server + - level1-workstation + - scored + - aide + - patch + - rule_1.4.1 + +- name: "1.4.2 | L1 | PATCH | Ensure filesystem integrity is regularly checked" + cron: + name: Run AIDE integrity check + cron_file: "{{ rhel8cis_aide_cron['cron_file'] }}" + user: "{{ rhel8cis_aide_cron['cron_user'] }}" + minute: "{{ rhel8cis_aide_cron['aide_minute'] | default('0') }}" + hour: "{{ rhel8cis_aide_cron['aide_hour'] | default('5') }}" + day: "{{ rhel8cis_aide_cron['aide_day'] | default('*') }}" + month: "{{ rhel8cis_aide_cron['aide_month'] | default('*') }}" + weekday: "{{ rhel8cis_aide_cron['aide_weekday'] | default('*') }}" + job: "{{ rhel8cis_aide_cron['aide_job'] }}" + when: + - rhel8cis_rule_1_4_2 + tags: + - level1-server + - level1-workstation + - scored + - aide + - file_integrity + - patch + - rule_1.4.2 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.5.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.5.x.yml new file mode 100644 index 0000000..e9e84f6 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.5.x.yml @@ -0,0 +1,76 @@ +--- + +- name: "1.5.1 | L1 | PATCH | Ensure permissions on bootloader config are configured" + block: + - name: "1.5.1 | L1 | PATCH | Ensure permissions on bootloader config are configured" + file: + path: "{{ grub_cfg.stat.lnk_source }}" + owner: root + group: root + mode: 0600 + + - name: "1.5.1 | L1 | PATCH | Ensure permissions on bootloader config are configured | UEFI" + mount: + name: /boot/efi + src: "UUID={{ item.uuid }}" + fstype: vfat + state: present + opts: defaults,umask=0027,fmask=0077,uid=0,gid=0 + passno: '0' + loop: "{{ ansible_mounts }}" + when: + - not rhel8cis_legacy_boot + - item.mount == "/boot/efi" + when: + - rhel8cis_rule_1_5_1 + - grub_cfg.stat.exists + - grub_cfg.stat.islnk + tags: + - level1-server + - level1-workstation + - scored + - grub + - patch + - rule_1.5.1 + +- name: "1.5.2 | L1 | PATCH | Ensure bootloader password is set" + copy: + dest: /boot/grub2/user.cfg + content: "GRUB2_PASSWORD={{ rhel8cis_bootloader_password_hash }}" + owner: root + group: root + mode: 0600 + notify: grub2cfg + when: + - rhel8cis_set_boot_pass + - grub_pass is defined and grub_pass.passhash is defined + - grub_pass.passhash | length > 0 + - rhel8cis_rule_1_5_2 + tags: + - level1-server + - level1-workstation + - scored + - grub + - patch + - rule_1.5.2 + +- name: "1.5.3 | L1 | PATCH | Ensure authentication required for single user mode" + block: + - name: "1.5.3 | L1 | PATCH | Ensure authentication required for single user mode | Emergency service" + lineinfile: + dest: /usr/lib/systemd/system/emergency.service + regexp: '/sbin/sulogin' + line: 'ExecStart=-/usr/lib/systemd/systemd-sulogin-shell emergency' + + - name: "1.5.3 | L1 | PATCH | Ensure authentication required for single user mode | Rescue service" + lineinfile: + dest: /usr/lib/systemd/system/rescue.service + regexp: '/sbin/sulogin' + line: 'ExecStart=-/usr/lib/systemd/systemd-sulogin-shell rescue' + when: + - rhel8cis_rule_1_5_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_1.5.3 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.6.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.6.x.yml new file mode 100644 index 0000000..0a456e7 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.6.x.yml @@ -0,0 +1,60 @@ +--- + +- name: "1.6.1 | L1 | PATCH | Ensure core dumps are restricted" + block: + - name: "1.6.1 | L1 | Ensure core dumps are restricted | Update limits.conf file" + lineinfile: + state: present + dest: /etc/security/limits.conf + regexp: '^#?\\*.*core' + line: '* hard core 0' + insertbefore: '^# End of file' + + - name: "1.6.1 | L1 | PATCH | Ensure core dumps are restricted | Set active kernel parameter" + sysctl: + name: fs.suid_dumpable + value: '0' + state: present + reload: yes + sysctl_set: yes + ignoreerrors: yes + + - name: "1.6.1 | L1 | PATCH | Ensure core dumps are restricted | if systemd coredump" + lineinfile: + path: /etc/systemd/coredump.conf + regexp: "{{ item.regexp }}" + line: "{{ item.regexp }}{{ item.line }}" + state: present + with_items: + - {'regexp': 'Storage=', 'line': 'none'} + - {'regexp': 'ProcessSizeMax=', 'line': '0'} + notify: + - systemd_daemon_reload + when: + - systemd_coredump.stat.exists + when: + - rhel8cis_rule_1_6_1 + tags: + - level1-server + - level1-workstation + - scored + - sysctl + - patch + - rule_1.6.1 + +- name: "1.6.2 | L1 | PATCH | Ensure address space layout randomization (ASLR) is enabled" + sysctl: + name: kernel.randomize_va_space + value: '2' + state: present + reload: yes + sysctl_set: yes + ignoreerrors: yes + when: + - rhel8cis_rule_1_6_2 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_1.6.2 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.7.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.7.1.x.yml new file mode 100644 index 0000000..e09a156 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.7.1.x.yml @@ -0,0 +1,115 @@ +--- + +- name: "1.7.1.1 | L2 | PATCH | Ensure SELinux is installed" + package: + name: libselinux + state: present + when: + - rhel8cis_rule_1_7_1_1 + tags: + - level2-server + - level2-workstation + - scored + - patch + - rule_1.7.1.1 + +- name: "1.7.1.2 | L2 | PATCH | Ensure SELinux is not disabled in bootloader configuration" + replace: + dest: /etc/default/grub + regexp: '(selinux|enforcing)\s*=\s*0\s*' + replace: '' + register: selinux_grub_patch + ignore_errors: yes + notify: grub2cfg + when: + - rhel8cis_rule_1_7_1_2 + tags: + - level2-server + - level2-workstation + - scored + - patch + - rule_1.7.1.2 + +- name: "1.7.1.3 | L2 | PATCH | Ensure SELinux policy is configured" + selinux: + conf: /etc/selinux/config + policy: "{{ rhel8cis_selinux_pol }}" + state: enforcing + when: + - not rhel8cis_selinux_disable + - rhel8cis_rule_1_7_1_3 + tags: + - level2-server + - level2-workstation + - scored + - selinux + - patch + - rule_1.7.1.3 + +- name: "1.7.1.4 | L2 | PATCH | Ensure the SELinux state is enforcing" + selinux: + conf: /etc/selinux/config + policy: "{{ rhel8cis_selinux_pol }}" + state: enforcing + when: + - not rhel8cis_selinux_disable + - rhel8cis_rule_1_7_1_4 + tags: + - level2-server + - level2-workstation + - scored + - selinux + - patch + - rule_1.7.1.4 + +- name: "1.7.1.5 | L2 | AUDIT | Ensure no unconfined daemons exist" + block: + - name: "1.7.1.5 | L2 | AUDIT | Ensure no unconfined daemons exist | Find the unconfined daemons" + shell: ps -eZ | grep unconfined_service_t | egrep -vw "tr|ps|egrep|bash|awk" | tr ':' ' ' | awk '{ print $NF }' + register: rhelcis_1_7_1_5_unconf_daemons + failed_when: false + changed_when: false + + - name: "1.7.1.5 | L2 | AUDIT | Ensure no unconfined daemons exist | Message on no unconfined daemones" + debug: + msg: "Good News! There are no unconfined daemons found on your system" + when: rhelcis_1_7_1_5_unconf_daemons.stdout | length == 0 + + - name: "1.7.1.5 | L2 | AUDIT | Ensure no unconfined daemons exist | Message on unconfined daemones" + debug: + msg: "Warning! You have unconfined daemons: {{ rhelcis_1_7_1_5_unconf_daemons.stdout_lines }}" + when: rhelcis_1_7_1_5_unconf_daemons.stdout | length > 0 + when: + - rhel8cis_rule_1_7_1_5 + tags: + - level2-server + - level2-workstation + - audit + - rule_1.7.1.5 + +- name: "1.7.1.6 | L2 | PATCH | Ensure SETroubleshoot is not installed" + package: + name: setroubleshoot + state: absent + when: + - rhel8cis_rule_1_7_1_6 + - "'setroubleshoot' in ansible_facts.packages" + tags: + - level2-server + - scored + - selinux + - patch + - rule_1.7.1.6 + +- name: "1.7.1.7 | L2 | PATCH | Ensure the MCS Translation Service (mcstrans) is not installed" + package: + name: mcstrans + state: absent + when: + - rhel8cis_rule_1_7_1_7 + tags: + - level2-server + - level2-workstation + - scored + - patch + - rule_1.7.1.7 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.8.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.8.1.x.yml new file mode 100644 index 0000000..ab8e3de --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.8.1.x.yml @@ -0,0 +1,96 @@ +--- + +- name: "1.8.1.1 | L1 | PATCH | Ensure message of the day is configured properly" + template: + src: etc/motd.j2 + dest: /etc/motd + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_1_8_1_1 + tags: + - level1-server + - level1-workstation + - banner + - patch + - rule_1.8.1.1 + +- name: "1.8.1.2 | L1 | PATCH | Ensure local login warning banner is configured properly" + template: + src: etc/issue.j2 + dest: /etc/issue + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_1_8_1_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_1.8.1.2 + +- name: "1.8.1.3 | L1 | PATCH | Ensure remote login warning banner is configured properly" + template: + src: etc/issue.net.j2 + dest: /etc/issue.net + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_1_8_1_3 + tags: + - level1-server + - level1-workstation + - banner + - patch + - rule_1.8.1.3 + +- name: "1.8.1.4 | L1 | PATCH | Ensure permissions on /etc/motd are configured" + file: + dest: /etc/motd + state: file + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_1_8_1_4 + tags: + - level1-server + - level1-workstation + - perms + - patch + - rule_1.8.1.4 + +- name: "1.8.1.5 | L1 | PATCH | Ensure permissions on /etc/issue are configured" + file: + dest: /etc/issue + state: file + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_1_8_1_5 + tags: + - level1-server + - level1-workstation + - perms + - patch + - rule_1.8.1.5 + +- name: "1.8.1.6 | L1 | PATCH | Ensure permissions on /etc/issue.net are configured" + file: + dest: /etc/issue.net + state: file + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_1_8_1_6 + tags: + - level1-server + - level1-workstation + - perms + - patch + - rule_1.8.1.6 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.8.2.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.8.2.yml new file mode 100644 index 0000000..0f3593d --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.8.2.yml @@ -0,0 +1,27 @@ +--- + +- name: "1.8.2 | L1 | PATCH | Ensure GDM login banner is configured" + lineinfile: + dest: "{{ item.file }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + create: yes + owner: root + group: root + mode: 0644 + with_items: + - { file: '/etc/dconf/profile/gdm', regexp: 'user-db', line: 'user-db:user' } + - { file: '/etc/dconf/profile/gdm', regexp: 'system-db', line: 'system-db:gdm' } + - { file: '/etc/dconf/profile/gdm', regexp: 'file-db', line: 'file-db:/usr/share/gdm/greeter-dconf-defaults' } + - { file: '/etc/dconf/db/gdm.d/01-banner-message', regexp: '\[org\/gnome\/login-screen\]', line: '[org/gnome/login-screen]' } + - { file: '/etc/dconf/db/gdm.d/01-banner-message', regexp: 'banner-message-enable', line: 'banner-message-enable=true' } + - { file: '/etc/dconf/db/gdm.d/01-banner-message', regexp: 'banner-message-text', line: "banner-message-text='{{ rhel8cis_warning_banner }}' " } + when: + - rhel8cis_gui + - rhel8cis_rule_1_8_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_1.8.2 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.9.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.9.yml new file mode 100644 index 0000000..f413c2c --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/cis_1.9.yml @@ -0,0 +1,15 @@ +--- + +- name: "1.9 | L1 | PATCH | Ensure updates, patches, and additional security software are installed" + package: + name: "*" + state: latest + when: + - rhel8cis_rule_1_9 + - not system_is_ec2 + tags: + - level1-server + - level1-workstation + - patch + - rule_1.9 + - skip_ansible_lint diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/main.yml new file mode 100644 index 0000000..7625f34 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_1/main.yml @@ -0,0 +1,42 @@ +--- + +- name: "SECTION | 1.1 | FileSystem Configurations\n + SECTION | 1.1.1.x | Disable unused filesystems" + include: cis_1.1.1.x.yml +- include: cis_1.1.x.yml + +- name: "SECTION | 1.2 | Configure Software Updates" + include: cis_1.2.x.yml + +- name: "SECTION | 1.3 | Configure sudo" + include: cis_1.3.x.yml + +- name: "SECTION | 1.4 | Filesystem Integrity" + include: cis_1.4.x.yml + when: rhel8cis_config_aide + +- name: "SECTION | 1.5 | Secure Boot Settings" + include: cis_1.5.x.yml + +- name: "SECTION | 1.6 | Additional Process Hardening" + include: cis_1.6.x.yml + +- name: "SECTION | 1.7 | bootloader and Mandatory Access Control" + include: cis_1.7.1.x.yml + when: not rhel8cis_selinux_disable + +- name: "SECTION | 1.8 | Warning Banners" + include: cis_1.8.1.x.yml + +- name: "SECTION | 1.9 | Updated and Patches" + include: cis_1.9.yml + +- name: "SECTION | 1.10 | Crypto policies" + include: cis_1.10.yml + when: + - not system_is_ec2 + +- name: "SECTION | 1.11 | FIPS/FUTURE Crypto policies" + include: cis_1.11.yml + when: + - not system_is_ec2 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.1.1.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.1.1.yml new file mode 100644 index 0000000..74352ae --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.1.1.yml @@ -0,0 +1,14 @@ +--- + +- name: "2.1.1 | L1 | PATCH | Ensure xinetd is not installed" + package: + name: xinetd + state: absent + when: + - rhel8cis_rule_2_1_1 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_2.1.1 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.2.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.2.1.x.yml new file mode 100644 index 0000000..57ad1c6 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.2.1.x.yml @@ -0,0 +1,42 @@ +--- + +- name: "2.2.1.1 | L1 | PATCH | Ensure time synchronization is in use - service install" + package: + name: "{{ rhel8cis_time_synchronization }}" + state: present + when: + - rhel8cis_rule_2_2_1_1 + - not rhel8cis_system_is_container + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.1.1 + +- name: "2.2.1.2 | L1 | PATCH | Ensure chrony is configured" + block: + - name: "2.2.1.2 | L1 | PATCH | Ensure chrony is configured | Set configuration" + template: + src: chrony.conf.j2 + dest: /etc/chrony.conf + owner: root + group: root + mode: 0644 + + - name: "2.2.1.2 | L1 | PATCH | Ensure chrony is configured | modify /etc/sysconfig/chronyd | 1" + lineinfile: + dest: /etc/sysconfig/chronyd + regexp: "^(#)?OPTIONS" + line: "OPTIONS=\"-u chrony\"" + state: present + create: yes + mode: 0644 + when: + - rhel8cis_time_synchronization == "chrony" + - rhel8cis_rule_2_2_1_2 + - not rhel8cis_system_is_container + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.1.2 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.2.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.2.x.yml new file mode 100644 index 0000000..a71b84f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.2.x.yml @@ -0,0 +1,288 @@ +--- + +- name: "2.2.2 | L1 | PATCH | Ensure X Window System is not installed" + block: + - name: "2.2.2 | L1 | AUDIT | Ensure X Window System is not installed | capture xorg-x11 packages" + shell: rpm -qa | grep xorg-x11 + args: + warn: no + failed_when: xorg_x11_installed.rc >=2 + check_mode: no + changed_when: false + register: xorg_x11_installed + + - name: "2.2.2 | L1 | PATCH | Ensure X Window System is not installed | remove packages if found" + shell: "dnf remove {{ item }}" + args: + warn: no + with_items: + - xorg_x11_installed.stdout_lines + when: xorg_x11_installed.stdout | length > 0 + when: + - not rhel8cis_xwindows_required + - rhel8cis_rule_2_2_2 + tags: + - level1-server + - scored + - xwindows + - patch + - rule_2.2.2 + +- name: "2.2.3 | L1 | PATCH | Ensure rsync service is not enabled " + service: + name: rsyncd + state: stopped + enabled: no + when: + - not rhel8cis_rsyncd_server + - "'rsyncd' in ansible_facts.packages" + - rhel8cis_rule_2_2_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.3 + +- name: "2.2.4 | L1 | PATCH | Ensure Avahi Server is not enabled" + service: + name: avahi-daemon + state: stopped + enabled: no + when: + - not rhel8cis_avahi_server + - "'avahi' in ansible_facts.packages" + - rhel8cis_rule_2_2_4 + tags: + - level1-server + - level1-workstation + - scored + - avahi + - services + - patch + - rule_2.2.4 + +- name: "2.2.5 | L1 | PATCH | Ensure SNMP Server is not enabled" + service: + name: snmpd + state: stopped + enabled: no + when: + - not rhel8cis_snmp_server + - "'net-snmp' in ansible_facts.packages" + - rhel8cis_rule_2_2_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.5 + +- name: "2.2.6 | L1 | PATCH | Ensure HTTP Proxy Server is not enabled" + service: + name: squid + state: stopped + enabled: no + when: + - not rhel8cis_squid_server + - "'squid' in ansible_facts.packages" + - rhel8cis_rule_2_2_6 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.6 + +- name: "2.2.7 | L1 | PATCH | Ensure Samba is not enabled" + service: + name: smb + state: stopped + enabled: no + when: + - not rhel8cis_smb_server + - "'samba' in ansible_facts.packages" + - rhel8cis_rule_2_2_7 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.7 + +- name: "2.2.8 | L1 | PATCH | Ensure IMAP and POP3 server is not enabled" + service: + name: dovecot + state: stopped + enabled: no + when: + - not rhel8cis_dovecot_server + - "'dovecot' in ansible_facts.packages" + - rhel8cis_rule_2_2_8 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.8 + +- name: "2.2.9 | L1 | PATCH | Ensure HTTP server is not enabled" + service: + name: httpd + state: stopped + enabled: no + when: + - not rhel8cis_httpd_server + - "'httpd' in ansible_facts.packages" + - rhel8cis_rule_2_2_9 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.9 + +- name: "2.2.10 | L1 | PATCH | Ensure FTP Server is not enabled" + service: + name: vsftpd + state: stopped + enabled: no + when: + - not rhel8cis_vsftpd_server + - "'vsftpd' in ansible_facts.packages" + - rhel8cis_rule_2_2_10 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.10 + +- name: "2.2.11 | L1 | PATCH | Ensure DNS Server is not enabled" + service: + name: named + state: stopped + enabled: no + when: + - not rhel8cis_named_server + - "'bind' in ansible_facts.packages" + - rhel8cis_rule_2_2_11 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.11 + +- name: "2.2.12 | L1 | PATCH | Ensure NFS is not enabled" + service: + name: nfs-server + state: stopped + enabled: no + when: + - not rhel8cis_nfs_rpc_server + - "'nfs-utils' in ansible_facts.packages" + - rhel8cis_rule_2_2_12 + tags: + - level1-server + - level1-workstation + - scored + - nfs + - services + - patch + - rule_2.2.12 + +- name: "2.2.13 | L1 | PATCH | Ensure RPC is not enabled" + service: + name: rpcbind + state: stopped + enabled: no + when: + - not rhel8cis_nfs_rpc_server + - "'rpcbind' in ansible_facts.packages" + - rhel8cis_rule_2_2_13 + tags: + - level1-server + - level1-workstation + - scored + - rpc + - services + - patch + - rule_2.2.7 + +- name: "2.2.14 | L1 | PATCH | Ensure LDAP server is not enabled" + service: + name: slapd + state: stopped + enabled: no + when: + - not rhel8cis_ldap_server + - "'openldap-servers' in ansible_facts.packages" + - rhel8cis_rule_2_2_14 + tags: + - level1-server + - level1-workstation + - scored + - ldap + - services + - patch + - rule_2.2.6 + +- name: "2.2.15 | L1 | PATCH | Ensure DHCP Server is not enabled" + service: + name: dhcpd + state: stopped + enabled: no + when: + - not rhel8cis_dhcp_server + - "'dhcp' in ansible_facts.packages" + - rhel8cis_rule_2_2_15 + tags: + - level1-server + - level1-workstation + - scored + - dhcp + - services + - patch + - rule_2.2.15 + +- name: "2.2.16 | L1 | PATCH | Ensure CUPS is not enabled" + service: + name: cups + state: stopped + enabled: no + when: + - not rhel8cis_cups_server + - "'cups' in ansible_facts.packages" + - rhel8cis_rule_2_2_16 + tags: + - level1-server + - level2-workstation + - scored + - cups + - services + - patch + - rule_2.2.16 + +- name: "2.2.17 | L1 | PATCH | Ensure NIS Server is not enabled" + service: + name: ypserv + state: stopped + enabled: no + when: + - not rhel8cis_nis_server + - "'ypserv' in ansible_facts.packages" + - rhel8cis_rule_2_2_17 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.17 + +- name: "2.2.18 | L1 | PATCH | Ensure mail transfer agent is configured for local-only mode" + lineinfile: + dest: /etc/postfix/main.cf + regexp: "^(#)?inet_interfaces" + line: "inet_interfaces = loopback-only" + notify: restart postfix + when: + - not rhel8cis_is_mail_server + - "'postfix' in ansible_facts.packages" + - rhel8cis_rule_2_2_18 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.2.1 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.3.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.3.x.yml new file mode 100644 index 0000000..00e91f0 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/cis_2.3.x.yml @@ -0,0 +1,43 @@ +--- + +- name: "2.3.1 | L1 | PATCH | Ensure NIS Client is not installed" + package: + name: ypbind + state: absent + when: + - not rhel8cis_ypbind_required + - "'ypbind' in ansible_facts.packages" + - rhel8cis_rule_2_3_1 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.3.1 + +- name: "2.3.2 | L1 | PATCH | Ensure telnet client is not installed" + package: + name: telnet + state: absent + when: + - not rhel8cis_telnet_required + - "'telnet' in ansible_facts.packages" + - rhel8cis_rule_2_3_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.3.2 + +- name: "2.3.3 | L1 | PATCH | Ensure LDAP client is not installed" + package: + name: openldap-clients + state: absent + when: + - not rhel8cis_openldap_clients_required + - "'openldap-clients' in ansible_facts.packages" + - rhel8cis_rule_2_3_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_2.3.3 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/main.yml new file mode 100644 index 0000000..1c99c03 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_2/main.yml @@ -0,0 +1,13 @@ +--- + +- name: "SECTION | 2.1 | xinetd" + include: cis_2.1.1.yml + +- name: "SECTION | 2.2.1 | Time Synchronization" + include: cis_2.2.1.x.yml + +- name: "SECTION | 2.2 | Special Purpose Services" + include: cis_2.2.x.yml + +- name: "SECTION | 2.3 | Service Clients" + include: cis_2.3.x.yml diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.1.x.yml new file mode 100644 index 0000000..b2f532e --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.1.x.yml @@ -0,0 +1,53 @@ +--- + +- name: "3.1.1 | L1 | PATCH | Ensure IP forwarding is disabled" + block: + - name: "3.1.1 | L1 | PATCH | Ensure IP forwarding is disabled | Disable IPv4 forwarding" + sysctl: + name: net.ipv4.ip_forward + value: '0' + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + + - name: "3.1.1 | L1 | PATCH | Ensure IP forwarding is disabled | Disable IPv6 forwarding" + sysctl: + name: net.ipv6.conf.all.forwarding + value: '0' + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv6 route table + when: rhel8cis_ipv6_required + when: + - not rhel8cis_is_router + - rhel8cis_rule_3_1_1 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.1.1 + +- name: "3.1.2 | L1 | PATCH | Ensure packet redirect sending is disabled" + sysctl: + name: '{{ item.name }}' + value: '{{ item.value }}' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + with_items: + - { name: net.ipv4.conf.all.send_redirects, value: 0 } + - { name: net.ipv4.conf.default.send_redirects, value: 0 } + when: + - not rhel8cis_is_router + - rhel8cis_rule_3_1_2 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.1.2 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.2.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.2.x.yml new file mode 100644 index 0000000..ad2f796 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.2.x.yml @@ -0,0 +1,207 @@ +--- + +- name: "3.2.1 | L1 | PATCH | Ensure source routed packets are not accepted" + block: + - name: "3.2.1 | L1 | PATCH | Ensure source routed packets are not accepted | Set routed packets IPv4" + sysctl: + name: '{{ item.name }}' + value: '{{ item.value }}' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + with_items: + - { name: net.ipv4.conf.all.accept_source_route, value: 0 } + - { name: net.ipv4.conf.default.accept_source_route, value: 0 } + + - name: "3.2.1 | L1 | PATCH | Ensure source routed packets are not accepted | Set routed packets IPv6" + sysctl: + name: '{{ item.name }}' + value: '{{ item.value }}' + sysctl_set: yes + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv6 route table + with_items: + - { name: net.ipv6.conf.all.accept_source_route, value: 0} + - { name: net.ipv6.conf.default.accept_source_route, value: 0 } + when: rhel8cis_ipv6_required + when: + - rhel8cis_rule_3_2_1 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.2.1 + +- name: "3.2.2 | L1 | PATCH | Ensure ICMP redirects are not accepted" + block: + - name: "3.2.2 | L1 | PATCH | Ensure ICMP redirects are not accepted | Set ICMP redirects IPv4" + sysctl: + name: '{{ item.name }}' + value: '{{ item.value }}' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + with_items: + - { name: net.ipv4.conf.all.accept_redirects, value: 0 } + - { name: net.ipv4.conf.default.accept_redirects, value: 0 } + + - name: "3.2.2 | L1 | PATCH | Ensure ICMP redirects are not accepted | Set ICMP redirects IPv6" + sysctl: + name: '{{ item.name }}' + value: '{{ item.value }}' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv6 route table + with_items: + - { name: net.ipv6.conf.all.accept_redirects, value: 0 } + - { name: net.ipv6.conf.default.accept_redirects, value: 0 } + when: rhel8cis_ipv6_required + when: + - rhel8cis_rule_3_2_2 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.2.2 + +- name: "3.2.3 | L1 | PATCH | Ensure secure ICMP redirects are not accepted" + sysctl: + name: '{{ item.name }}' + value: '{{ item.value }}' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + with_items: + - { name: net.ipv4.conf.all.secure_redirects, value: 0 } + - { name: net.ipv4.conf.default.secure_redirects, value: 0 } + when: + - rhel8cis_rule_3_2_3 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.2.3 + +- name: "3.2.4 | L1 | PATCH | Ensure suspicious packets are logged" + sysctl: + name: '{{ item.name }}' + value: '{{ item.value }}' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + with_items: + - { name: net.ipv4.conf.all.log_martians, value: 1 } + - { name: net.ipv4.conf.default.log_martians, value: 1 } + when: + - rhel8cis_rule_3_2_4 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.2.4 + +- name: "3.2.5 | L1 | PATCH | Ensure broadcast ICMP requests are ignored" + sysctl: + name: net.ipv4.icmp_echo_ignore_broadcasts + value: '1' + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - rhel8cis_rule_3_2_5 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.2.5 + +- name: "3.2.6 | L1 | PATCH | Ensure bogus ICMP responses are ignored" + sysctl: + name: net.ipv4.icmp_ignore_bogus_error_responses + value: '1' + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - rhel8cis_rule_3_2_6 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.2.6 + +- name: "3.2.7 | L1 | PATCH | Ensure Reverse Path Filtering is enabled" + sysctl: + name: net.ipv4.conf.default.rp_filter + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - rhel8cis_rule_3_2_7 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.2.7 + +- name: "3.2.8 | L1 | PATCH | Ensure TCP SYN Cookies is enabled" + sysctl: + name: net.ipv4.tcp_syncookies + value: '1' + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - rhel8cis_rule_3_2_8 + tags: + - level1-server + - level1-workstation + - sysctl + - patch + - rule_3.2.8 + +- name: "3.2.9 | L2 | PATCH | Ensure IPv6 router advertisements are not accepted" + sysctl: + name: '{{ item.name }}' + value: '{{ item.value }}' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv6 route table + with_items: + - { name: net.ipv6.conf.all.accept_ra, value: 0 } + - { name: net.ipv6.conf.default.accept_ra, value: 0 } + when: + - rhel8cis_ipv6_required + - rhel8cis_rule_3_2_9 + tags: + - level2-server + - level2-workstation + - sysctl + - patch + - rule_3.2.9 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.3.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.3.x.yml new file mode 100644 index 0000000..130af2c --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.3.x.yml @@ -0,0 +1,61 @@ +--- + +- name: "3.3.1 | L2 | PATCH | Ensure DCCP is disabled" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install dccp(\\s|$)" + line: "install dccp /bin/true" + create: yes + mode: 0600 + when: + - rhel8cis_rule_3_3_1 + tags: + - level2-server + - level2-workstation + - patch + - rule_3.3.1 + +- name: "3.3.2 | L2 | PATCH | Ensure SCTP is disabled" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install sctp(\\s|$)" + line: "install sctp /bin/true" + create: yes + mode: 0600 + when: + - rhel8cis_rule_3_3_2 + tags: + - level2-server + - level2-workstation + - patch + - rule_3.3.2 + +- name: "3.3.3 | L2 | PATCH | Ensure RDS is disabled" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install rds(\\s|$)" + line: "install rds /bin/true" + create: yes + mode: 0600 + when: + - rhel8cis_rule_3_3_3 + tags: + - level2-server + - level2-workstation + - patch + - rule_3.3.3 + +- name: "3.3.4 | L2 | PATCH | Ensure TIPC is disabled" + lineinfile: + dest: /etc/modprobe.d/CIS.conf + regexp: "^(#)?install tipc(\\s|$)" + line: "install tipc /bin/true" + create: yes + mode: 0600 + when: + - rhel8cis_rule_3_3_4 + tags: + - level2-server + - level2-workstation + - patch + - rule_3.3.4 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.1.1.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.1.1.yml new file mode 100644 index 0000000..9b79813 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.1.1.yml @@ -0,0 +1,13 @@ +--- + +- name: "3.4.1.1 | L1 | PATCH | Ensure a Firewall package is installed" + package: + name: "{{ rhel8cis_firewall }}" + state: present + when: + - rhel8cis_rule_3_4_1_1 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.1.1 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.2.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.2.x.yml new file mode 100644 index 0000000..d273876 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.2.x.yml @@ -0,0 +1,102 @@ +--- + +- name: "3.4.2.1 | L1 | PATCH | Ensure firewalld service is enabled and running" + service: + name: firewalld + state: started + enabled: yes + when: + - rhel8cis_firewall == "firewalld" + - rhel8cis_rule_3_4_2_1 + tags: + - level1-server + - level1-workstation + - patch + - rule_3_4_2_1 + +- name: "3.4.2.2 | L1 | PATCH | Ensure iptables is not enabled with firewalld" + systemd: + name: iptables + enabled: false + masked: true + ignore_errors: true + when: + - rhel8cis_firewall == "firewalld" + - rhel8cis_rule_3_4_2_2 + tags: + - skip_ansible_lint + - level1-server + - level1-workstation + - patch + - rule_3_4_2_2 + +- name: "3.4.2.3 | L1 | PATCH | Ensure nftables is not enabled with firewalld" + systemd: + name: nftables + enabled: false + masked: true + when: + - rhel8cis_firewall == "firewalld" + - rhel8cis_rule_3_4_2_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_3_4_2_3 + +- name: "3.4.2.4 | L1 | PATCH | Ensure default zone is set" + command: firewall-cmd --set-default-zone="{{ rhel8cis_default_zone }}" + when: + - rhel8cis_firewall == "firewalld" + - rhel8cis_rule_3_4_2_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.2.4 + +- name: "3.4.2.5 | L1 | AUDIT | Ensure network interfaces are assigned to appropriate zone" + block: + - name: "3.4.2.5 | L1 | AUDIT | Ensure network interfaces are assigned to appropriate zone | Get list of interfaces and polocies" + shell: "nmcli -t connection show | awk -F: '{ if($4){print $4} }' | while read INT; do firewall-cmd --get-active-zones | grep -B1 $INT; done" + changed_when: false + failed_when: false + check_mode: no + register: rhel8cis_3_4_2_5_interfacepolicy + + - name: "3.4.2.5 | L1 | AUDIT | Ensure network interfaces are assigned to appropriate zone | Get list of interfaces and polocies | Show the interface to policy" + debug: + msg: + - "The items below are the policies tied to the interfaces, please correct as needed" + - "{{ rhel8cis_3_4_2_5_interfacepolicy.stdout_lines }}" + when: + - rhel8cis_firewall == "firewalld" + - rhel8cis_rule_3_4_2_5 + tags: + - level1-server + - level1-workstation + - audit + - rule_3.4.2.5 + +- name: "3.4.2.6 | L1 | AUDIT | Ensure firewalld drops unnecessary services and ports" + block: + - name: "3.4.2.6 | L1 | AUDIT | Ensure firewalld drops unnecessary services and ports | Get list of services and ports" + shell: "firewall-cmd --get-active-zones | awk '!/:/ {print $1}' | while read ZN; do firewall-cmd --list-all --zone=$ZN; done" + changed_when: false + failed_when: false + check_mode: no + register: rhel8cis_3_4_2_6_servicesport + + - name: "3.4.2.6 | L1 | AUDIT | Ensure firewalld drops unnecessary services and ports | Show services and ports" + debug: + msg: + - "The items below are the services and ports that are accepted, please correct as needed" + - "{{ rhel8cis_3_4_2_6_servicesport.stdout_lines }}" + when: + - rhel8cis_firewall == "firewalld" + - rhel8cis_rule_3_4_2_6 + tags: + - level1-server + - level1-workstation + - audit + - rule_3.4.2.6 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.3.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.3.x.yml new file mode 100644 index 0000000..3b7467d --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.3.x.yml @@ -0,0 +1,264 @@ +--- + +- name: "3.4.3.1 | L1 | PATCH | Ensure iptables are flushed with nftables" + command: ip6tables -F + when: + - rhel8cis_rule_3_4_3_1 + - rhel8cis_firewall != "iptables" + - rhel8cis_ipv6_required + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.3.1 + +- name: "3.4.3.2 | L1 | AUDIT | Ensure an nftables table exists" + block: + - name: "3.4.3.2 | L1 | AUDIT | Ensure a table exists | Check for tables" + command: nft list tables + changed_when: false + failed_when: false + register: rhel8cis_3_4_3_2_nft_tables + + - name: "3.4.3.2 | L1 | AUDIT | Ensure an nftables table exists | Show existing tables" + debug: + msg: + - "Below are the current nft tables, please review" + - "{{ rhel8cis_3_4_3_2_nft_tables.stdout_lines }}" + when: rhel8cis_3_4_3_2_nft_tables.stdout | length > 0 + + - name: "3.4.3.2 | L1 | AUDIT | Ensure an nftables table exists | Alert on no tables" + debug: + msg: + - "Warning! You currently have no nft tables, please review your setup" + - 'Use the command "nft create table inet
" to create a new table' + when: + - rhel8cis_3_4_3_2_nft_tables.stdout | length == 0 + - not rhel8cis_nft_tables_autonewtable + + - name: "3.4.3.2 | L1 | PATCH | Ensure a table exists | Create table if needed" + command: nft create table inet "{{ rhel8cis_nft_tables_tablename }}" + failed_when: no + when: rhel8cis_nft_tables_autonewtable + when: + - rhel8cis_firewall == "nftables" + - rhel8cis_rule_3_4_3_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.3.2 + +- name: "3.4.3.3 | L1 | PATCH | Ensure nftables base chains exist" + block: + - name: "3.4.3.3 | L1 | Ensure nftables base chains exist | Get current chains for INPUT" + shell: nft list ruleset | grep 'hook input' + changed_when: false + failed_when: false + register: rhel8cis_3_4_3_3_input_chains + + - name: "3.4.3.3 | L1 | AUDIT | Ensure nftables base chains exist | Get current chains for FORWARD" + shell: nft list ruleset | grep 'hook forward' + changed_when: false + failed_when: false + register: rhel8cis_3_4_3_3_forward_chains + + - name: "3.4.3.3 | L1 | AUDIT | Ensure nftables base chains exist | Get current chains for OUTPUT" + shell: nft list ruleset | grep 'hook output' + changed_when: false + failed_when: false + register: rhel8cis_3_4_3_3_output_chains + + - name: "3.4.3.3 | L1 | AUDIT | Ensure nftables base chains exist | Display chains for review" + debug: + msg: + - "Below are the current INPUT chains" + - "{{ rhel8cis_3_4_3_3_input_chains.stdout_lines }}" + - "Below are the current FORWARD chains" + - "{{ rhel8cis_3_4_3_3_forward_chains.stdout_lines }}" + - "Below are teh current OUTPUT chains" + - "{{ rhel8cis_3_4_3_3_output_chains.stdout_lines }}" + when: not rhel8cis_nft_tables_autochaincreate + + - name: "3.4.3.3 | L1 | PATCH | Ensure nftables base chains exist | Create chains if needed" + shell: "{{ item }}" + args: + warn: no + failed_when: no + with_items: + - nft create chain inet "{{ rhel8cis_nft_tables_tablename }}" input { type filter hook input priority 0 \; } + - nft create chain inet "{{ rhel8cis_nft_tables_tablename }}" forward { type filter hook forward priority 0 \; } + - nft create chain inet "{{ rhel8cis_nft_tables_tablename }}" output { type filter hook output priority 0 \; } + when: rhel8cis_nft_tables_autochaincreate + when: + - rhel8cis_firewall == "nftables" + - rhel8cis_rule_3_4_3_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.3.3 + +- name: "3.4.3.4 | L1 | PATCH | Ensure nftables loopback traffic is configured" + block: + - name: "3.4.3.4 | L1 | AUDIT | Ensure nftables loopback traffic is configured | Gather iif lo accept existence" + shell: nft list ruleset | awk '/hook input/,/}/' | grep 'iif "lo" accept' + changed_when: false + failed_when: false + register: rhel8cis_3_4_3_4_iiflo + + - name: "3.4.3.4 | L1 | AUDIT | Ensure nftables loopback traffic is configured | Gather ip saddr existence" + shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip saddr' + changed_when: false + failed_when: false + register: rhel8cis_3_4_3_4_ipsaddr + + - name: "3.4.3.4 | L1 | AUDIT | Ensure nftables loopback traffic is configured | Gather ip6 saddr existence" + shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip6 saddr' + changed_when: false + failed_when: false + register: rhel8cis_3_4_3_4_ip6saddr + + - name: "3.4.3.4 | L1 | PATCH | Ensure nftables loopback traffic is configured | Set iif lo accept rule" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" input iif lo accept + when: '"iif \"lo\" accept" not in rhel8cis_3_4_3_4_iiflo.stdout' + + - name: "3.4.3.4 | L1 | PATCH | Ensure nftables loopback traffic is configured | Set ip sddr rule" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" input ip saddr 127.0.0.0/8 counter drop + when: '"ip saddr 127.0.0.0/8 counter packets 0 bytes 0 drop" not in rhel8cis_3_4_3_4_ipsaddr.stdout' + + - name: "3.4.3.4 | L1 | PATCH | Ensure nftables loopback traffic is configured | Set ip6 saddr rule" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" input ip6 saddr ::1 counter drop + when: '"ip6 saddr ::1 counter packets 0 bytes 0 drop" not in rhel8cis_3_4_3_4_ip6saddr.stdout' + when: + - rhel8cis_firewall == "nftables" + - rhel8cis_rule_3_4_3_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.3.4 + +- name: "3.4.3.5 | L1 | PATCH | Ensure nftables outbound and established connections are configured" + block: + - name: "3.4.3.5 | L1 | AUDIT | Ensure nftables outbound and established connections are configured | Gather incoming connection rules" + shell: nft list ruleset | awk '/hook input/,/}/' | grep -E 'ip protocol (tcp|udp|icmp) ct state' + changed_when: false + failed_when: false + register: rhel8cis_3_4_3_5_inconnectionrule + + - name: "3.4.3.5 | L1 | AUDIT | Ensure nftables outbound and established connections are configured | Gather outbound connection rules" + shell: nft list ruleset | awk '/hook output/,/}/' | grep -E 'ip protocol (tcp|udp|icmp) ct state' + changed_when: false + failed_when: false + register: rhel8cis_3_4_3_5_outconnectionrule + + - name: "3.4.3.5 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add input tcp established accept policy" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" input ip protocol tcp ct state established accept + when: '"ip protocol tcp ct state established accept" not in rhel8cis_3_4_3_5_inconnectionrule.stdout' + + - name: "3.4.3.5 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add input udp established accept policy" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" input ip protocol udp ct state established accept + when: '"ip protocol udp ct state established accept" not in rhel8cis_3_4_3_5_inconnectionrule.stdout' + + - name: "3.4.3.5 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add input icmp established accept policy" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" input ip protocol icmp ct state established accept + when: '"ip protocol icmp ct state established accept" not in rhel8cis_3_4_3_5_inconnectionrule.stdout' + + - name: "3.4.3.5 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add output tcp new, related, established accept policy" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" output ip protocol tcp ct state new,related,established accept + when: '"ip protocol tcp ct state established,related,new accept" not in rhel8cis_3_4_3_5_outconnectionrule.stdout' + + - name: "3.4.3.5 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add output udp new, related, established accept policy" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" output ip protocol udp ct state new,related,established accept + when: '"ip protocol udp ct state established,related,new accept" not in rhel8cis_3_4_3_5_outconnectionrule.stdout' + + - name: "3.4.3.5 | L1 | PATCH | Ensure nftables outbound and established connections are configured | Add output icmp new, related, established accept policy" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" output ip protocol icmp ct state new,related,established accept + when: '"ip protocol icmp ct state established,related,new accept" not in rhel8cis_3_4_3_5_outconnectionrule.stdout' + when: + - rhel8cis_firewall == "nftables" + - rhel8cis_rule_3_4_3_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.3.5 + +- name: "3.4.3.6 | L1 | PATCH | Ensure nftables default deny firewall policy" + block: + - name: "3.4.3.6 | L1 | AUDIT | Ensure nftables default deny firewall policy | Check for hook input deny policy" + shell: nft list table inet "{{ rhel8cis_nft_tables_tablename }}" | grep 'hook input' + failed_when: false + changed_when: false + register: rhel8cis_3_4_3_6_inputpolicy + + - name: "3.4.3.6 | L1 | AUDIT | Ensure nftables default deny firewall policy | Check for hook forward deny policy" + shell: nft list table inet "{{ rhel8cis_nft_tables_tablename }}" | grep 'hook forward' + failed_when: false + changed_when: false + register: rhel8cis_3_4_3_6_forwardpolicy + + - name: "3.4.3.6 | L1 | AUDIT | Ensure nftables default deny firewall policy | Check for hook output deny policy" + shell: nft list table inet "{{ rhel8cis_nft_tables_tablename }}" | grep 'hook output' + failed_when: false + changed_when: false + register: rhel8cis_3_4_3_6_outputpolicy + + - name: "3.4.3.6 | L1 | AUDIT | Ensure nftables default deny firewall policy | Check for SSH allow" + shell: nft list table inet "{{ rhel8cis_nft_tables_tablename }}" | grep 'ssh' + failed_when: false + changed_when: false + register: rhel8cis_3_4_3_6_sshallowcheck + + - name: "3.4.3.6 | L1 | PATCH | Ensure nftables default deny firewall policy | Enable SSH traffic" + command: nft add rule inet "{{ rhel8cis_nft_tables_tablename }}" input tcp dport ssh accept + when: '"tcp dport ssh accept" not in rhel8cis_3_4_3_6_sshallowcheck.stdout' + + - name: "3.4.3.6 | L1 | PATCH | Ensure nftables default deny firewall policy | Set hook input deny policy" + command: nft chain inet "{{ rhel8cis_nft_tables_tablename }}" input { policy drop \; } + when: '"type filter hook input priority 0; policy drop;" not in rhel8cis_3_4_3_6_inputpolicy.stdout' + + - name: "3.4.3.6 | L1 | PATCH | Ensure nftables default deny firewall policy | Create hook forward deny policy" + command: nft chain inet "{{ rhel8cis_nft_tables_tablename }}" forward { policy drop \; } + when: '"type filter hook forward priority 0; policy drop;" not in rhel8cis_3_4_3_6_forwardpolicy.stdout' + + - name: "3.4.3.6 | L1 | PATCH | Ensure nftables default deny firewall policy | Create hook output deny policy" + command: nft chain inet "{{ rhel8cis_nft_tables_tablename }}" output { policy drop \; } + when: '"type filter hook output priority 0; policy drop;" not in rhel8cis_3_4_3_6_outputpolicy.stdout' + when: + - rhel8cis_firewall == "nftables" + - rhel8cis_rule_3_4_3_6 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.3.6 + +- name: "3.4.3.7 | L1 | PATCH | Ensure nftables service is enabled | Check if nftables is enabled" + service: + name: nftables + enabled: yes + when: + - rhel8cis_firewall == "nftables" + - rhel8cis_rule_3_4_3_7 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.3.7 + +- name: "3.4.3.8 | L1 | PATCH | Ensure nftables rules are permanent" + lineinfile: + path: /etc/sysconfig/nftables.conf + state: present + insertafter: EOF + line: include "/etc/nftables/inet-{{ rhel8cis_nft_tables_tablename }}" + when: + - rhel8cis_firewall == "nftables" + - rhel8cis_rule_3_4_3_8 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.3.8 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.4.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.4.1.x.yml new file mode 100644 index 0000000..8e82a51 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.4.1.x.yml @@ -0,0 +1,144 @@ +--- + +- name: "3.4.4.1.1 | L1 | PATCH | Ensure iptables default deny firewall policy" + block: + - name: "3.4.4.1.1 | L1 | PATCH | Ensure iptables default deny firewall policy | Configure ssh to be allowed" + iptables: + chain: INPUT + protocol: tcp + destination_port: "22" + jump: ACCEPT + + - name: "3.4.4.1.1 | L1 | PATCH | Ensure iptables default deny firewall policy | Set drop items" + iptables: + policy: DROP + chain: "{{ item }}" + with_items: + - INPUT + - FORWARD + - OUTPUT + when: + - rhel8cis_rule_3_4_4_1_1 + - rhel8cis_firewall == "iptables" + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.1.1 + +- name: "3.4.4.1.2 | L1 | PATCH | Ensure iptables loopback traffic is configured" + block: + - name: "3.4.4.1.2 | L1 | Ensure iptables loopback traffic is configured | INPUT Loopback ACCEPT" + iptables: + action: append + chain: INPUT + in_interface: lo + jump: ACCEPT + + - name: "3.4.4.1.2 | L1 | PATCH | Ensure iptables loopback traffic is configured | OUTPUT Loopback ACCEPT" + iptables: + action: append + chain: OUTPUT + out_interface: lo + jump: ACCEPT + + - name: "3.4.4.1.2 | L1 | PATCH | Ensure iptables loopback traffic is configured | INPUT Loopback 127.0.0.0/8" + iptables: + action: append + chain: INPUT + source: 127.0.0.0/8 + jump: DROP + when: + - rhel8cis_firewall == "iptables" + - rhel8cis_rule_3_4_4_1_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.1.2 + +- name: "3.4.4.1.3 | L1 | PATCH | Ensure iptables outbound and established connections are configured" + iptables: + action: append + chain: '{{ item.chain }}' + protocol: '{{ item.protocol }}' + match: state + ctstate: '{{ item.ctstate }}' + jump: ACCEPT + with_items: + - { chain: OUTPUT, protocol: tcp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: udp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: icmp, ctstate: 'NEW,ESTABLISHED' } + - { chain: INPUT, protocol: tcp, ctstate: ESTABLISHED } + - { chain: INPUT, protocol: udp, ctstate: ESTABLISHED } + - { chain: INPUT, protocol: icmp, ctstate: ESTABLISHED } + when: + - rhel8cis_firewall == "iptables" + - rhel8cis_rule_3_4_4_1_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.1.3 + +- name: "3.4.4.1.4 | L1 | PATCH | Ensure iptables firewall rules exist for all open ports" + block: + - name: "3.4.4.1.4 | L1 | AUDIT | Ensure iptables firewall rules exist for all open ports | Get list of TCP open ports" + shell: netstat -ant |grep "tcp.*LISTEN" | awk '{ print $4 }'| sed 's/.*://' + changed_when: false + failed_when: false + register: rhel8cis_3_4_4_1_4_otcp + + - name: "3.4.4.1.4 | L1 | AUDIT | Ensure iptables firewall rules exist for all open ports | Get the list of udp open ports" + shell: netstat -ant |grep "udp.*LISTEN" | awk '{ print $4 }'| sed 's/.*://' + changed_when: false + failed_when: false + register: rhel8cis_3_4_4_1_4_oudp + + - name: "3.4.4.1.4 | L1 | PATCH | Ensure iptables firewall rules exist for all open ports | Adjust open tcp ports" + iptables: + action: append + chain: INPUT + protocol: tcp + destination_port: "{{ item }}" + match: state + ctstate: NEW + jump: ACCEPT + with_items: + - "{{ rhel8cis_3_4_4_1_4_otcp.stdout_lines }}" + when: rhel8cis_3_4_4_1_4_otcp.stdout is defined + + - name: "3.4.4.1.4 | L1 | PATCH | Ensure iptables firewall rules exist for all open ports | Adjust open udp ports" + iptables: + action: append + chain: INPUT + protocol: udp + destination_port: "{{ item }}" + match: state + ctstate: NEW + jump: ACCEPT + with_items: + - "{{ rhel8cis_3_4_4_1_4_oudp.stdout_lines }}" + when: rhel8cis_3_4_4_1_4_otcp.stdout is defined + when: + - rhel8cis_firewall == "iptables" + - rhel8cis_rule_3_4_4_1_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.1.4 + +- name: "3.4.4.1.5 | L1 | PATCH | Ensure iptables service is enabled and active | Check if iptables is enabled" + service: + name: iptables + enabled: yes + state: started + when: + - rhel8cis_firewall == "iptables" + - rhel8cis_rule_3_4_4_1_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.1.5 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.4.2.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.4.2.x.yml new file mode 100644 index 0000000..f42ab71 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.4.4.2.x.yml @@ -0,0 +1,136 @@ +--- + +- name: "3.4.4.2.1 | L1 | PATCH | Ensure ip6tables default deny firewall policy" + block: + - name: "3.4.4.2.1 | L1 | Ensure ip6tables default deny firewall policy | Configure ssh to be allowed" + iptables: + chain: INPUT + protocol: tcp + destination_port: "22" + jump: ACCEPT + ip_version: ipv6 + + - name: "3.4.4.2.1 | L1 | PATCH | Ensure ip6tables default deny firewall policy | Set drop items" + iptables: + policy: DROP + chain: "{{ item }}" + ip_version: ipv6 + with_items: + - INPUT + - FORWARD + - OUTPUT + when: + - rhel8cis_firewall == "iptables" + - rhel8cis_rule_3_4_4_2_1 + - rhel8cis_ipv6_required + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.2.1 + +- name: "3.4.4.2.2 | L1 | PATCH | Ensure ip6tables loopback traffic is configured" + block: + - name: "3.4.4.2.2 | L1 | PATCH | Ensure ip6tables loopback traffic is configured | INPUT Loopback ACCEPT" + iptables: + action: append + chain: INPUT + in_interface: lo + jump: ACCEPT + ip_version: ipv6 + + - name: "3.4.4.2.2 | L1 | PATCH | Ensure ip6tables loopback traffic is configured | OUTPUT Loopback ACCEPT" + iptables: + action: append + chain: OUTPUT + out_interface: lo + jump: ACCEPT + ip_version: ipv6 + + - name: "3.4.4.2.2 | L1 | PATCH | Ensure ip6tables loopback traffic is configured | INPUT Loopback 127.0.0.0/8" + iptables: + action: append + chain: INPUT + source: ::1 + jump: DROP + ip_version: ipv6 + when: + - rhel8cis_firewall == "iptables" + - rhel8cis_rule_3_4_4_2_2 + - rhel8cis_ipv6_required + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.2.2 + +- name: "3.4.4.2.3 | L1 | PATCH | Ensure ip6tables outbound and established connections are configured" + iptables: + action: append + chain: '{{ item.chain }}' + protocol: '{{ item.protocol }}' + match: state + ctstate: '{{ item.ctstate }}' + jump: ACCEPT + ip_version: ipv6 + with_items: + - { chain: OUTPUT, protocol: tcp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: udp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: icmp, ctstate: 'NEW,ESTABLISHED' } + - { chain: INPUT, protocol: tcp, ctstate: ESTABLISHED } + - { chain: INPUT, protocol: udp, ctstate: ESTABLISHED } + - { chain: INPUT, protocol: icmp, ctstate: ESTABLISHED } + when: + - rhel8cis_firewall == "iptables" + - rhel8cis_rule_3_4_4_2_3 + - rhel8cis_ipv6_required + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.2.3 + +- name: "3.4.4.2.4 | L1 | PATCH | Ensure ip6tables firewall rules exist for all open ports" + block: + - name: "3.4.4.2.4 | L1 | AUDIT | Ensure ip6tables firewall rules exist for all open ports | Get list of TCP6 open ports" + shell: netstat -ant |grep "tcp6.*LISTEN" | awk '{ print $4 }'| sed 's/.*://' + changed_when: false + failed_when: false + register: rhel8cis_3_4_4_2_4_otcp + + - name: "3.4.4.2.4 | L1 | PATCH |Ensure ip6tables firewall rules exist for all open ports| Adjust open tcp6 ports" + iptables: + action: append + chain: INPUT + protocol: tcp + destination_port: "{{ item }}" + match: state + ctstate: NEW + jump: ACCEPT + ip_version: ipv6 + with_items: + - "{{ rhel8cis_3_4_4_2_4_otcp.stdout_lines }}" + when: rhel8cis_3_4_4_2_4_otcp.stdout is defined + when: + - rhel8cis_firewall == "iptables" + - rhel8cis_rule_3_4_4_2_4 + - rhel8cis_ipv6_required + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.2.4 + +- name: "3.4.4.2.5 | L1 | PATCH | Ensure ip6tables service is enabled and active | Check if ip6tables is enabled" + service: + name: ip6tables + enabled: yes + state: started + when: + - rhel8cis_firewall == "iptables" + - rhel8cis_rule_3_4_4_2_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_3.4.4.2.5 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.5.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.5.yml new file mode 100644 index 0000000..361c610 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.5.yml @@ -0,0 +1,32 @@ +--- + +- name: "3.5 | L1 | PATCH | Ensure wireless interfaces are disabled" + block: + - name: "3.5 | L1 | AUDIT | Ensure wireless interfaces are disabled | Check if nmcli command is available" + command: rpm -q NetworkManager + changed_when: false + failed_when: false + check_mode: no + args: + warn: no + register: rhel_08_nmcli_available + + - name: "3.5 | L1 | AUDIT | Ensure wireless interfaces are disabled | Check if wifi is enabled" + command: nmcli radio wifi + register: rhel_08_wifi_enabled + changed_when: rhel_08_wifi_enabled.stdout != "disabled" + failed_when: false + when: rhel_08_nmcli_available.rc == 0 + + - name: "3.5 | L1 | PATCH | Ensure wireless interfaces are disabled | Disable wifi if enabled" + command: nmcli radio all off + changed_when: false + failed_when: false + when: rhel_08_wifi_enabled is changed + when: + - rhel8cis_rule_3_5 + tags: + - level1-server + - level2-workstation + - patch + - rule_3.5 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.6.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.6.yml new file mode 100644 index 0000000..89ccd90 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_3/cis_3.6.yml @@ -0,0 +1,17 @@ +--- + +- name: "3.6 | L2 | PATCH | Disable IPv6" + replace: + dest: /etc/default/grub + regexp: '(^GRUB_CMDLINE_LINUX\s*\=\s*)(?:")(.+)(?/dev/null; done + changed_when: false + failed_when: false + check_mode: no + register: priv_procs + + - name: "4.1.12 | L2 | PATCH | Ensure successful file system mounts are collected" + template: + src: audit/rhel8cis_rule_4_1_12.rules.j2 + dest: /etc/audit/rules.d/rhel8cis_rule_4_1_12.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel8cis_rule_4_1_12 + tags: + - level2-server + - level2-workstation + - auditd + - patch + - rule_4.1.12 + +- name: "4.1.13 | L2 | PATCH | Ensure use of privileged commands is collected" + template: + src: audit/rhel8cis_rule_4_1_13.rules.j2 + dest: /etc/audit/rules.d/rhel8cis_rule_4_1_13.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel8cis_rule_4_1_13 + tags: + - level2-server + - level2-workstation + - auditd + - patch + - rule_4.1.13 + +- name: "4.1.14 | L2 | PATCH | Ensure file deletion events by users are collected" + template: + src: audit/rhel8cis_rule_4_1_14.rules.j2 + dest: /etc/audit/rules.d/rhel8cis_rule_4_1_14.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel8cis_rule_4_1_14 + tags: + - level2-server + - level2-workstation + - auditd + - patch + - rule_4.1.14 + +- name: "4.1.15 | L2 | PATCH | Ensure kernel module loading and unloading is collected" + template: + src: audit/rhel8cis_rule_4_1_15.rules.j2 + dest: /etc/audit/rules.d/rhel8cis_rule_4_1_15.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel8cis_rule_4_1_15 + tags: + - level2-server + - level2-workstation + - auditd + - patch + - rule_4.1.15 + +- name: "4.1.16 | L2 | PATCH | Ensure system administrator actions (sudolog) are collected" + template: + src: audit/rhel8cis_rule_4_1_16.rules.j2 + dest: /etc/audit/rules.d/rhel8cis_rule_4_1_16.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel8cis_rule_4_1_16 + tags: + - level2-server + - level2-workstation + - auditd + - patch + - rule_4.1.16 + +- name: "4.1.17 | L2 | PATCH | Ensure the audit configuration is immutable" + template: + src: audit/rhel8cis_rule_4_1_17.rules.j2 + dest: /etc/audit/rules.d/99-finalize.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - rhel8cis_rule_4_1_17 + tags: + - level2-server + - level2-workstation + - auditd + - patch + - rule_4.1.17 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.1.x.yml new file mode 100644 index 0000000..a7a961c --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.1.x.yml @@ -0,0 +1,177 @@ +--- + +- name: "4.2.1.1 | L1 | PATCH | Ensure rsyslog installed" + package: + name: rsyslog + state: present + when: + - "'rsyslog' not in ansible_facts.packages" + - rhel8cis_rule_4_2_1_1 + tags: + - level1-server + - level1-workstation + - patch + - rule_4.2.1.1 + +- name: "4.2.1.2 | L1 | PATCH | Ensure rsyslog Service is enabled" + service: + name: rsyslog + enabled: yes + when: + - rhel8cis_rule_4_2_1_2 + tags: + - level1-server + - level1-workstation + - patch + - rsyslog + - rule_4.2.1.2 + +- name: "4.2.1.3 | L1 | PATCH | Ensure rsyslog default file permissions configured" + lineinfile: + dest: /etc/rsyslog.conf + regexp: '^\$FileCreateMode' + line: '$FileCreateMode 0640' + notify: restart rsyslog + when: + - rhel8cis_rule_4_2_1_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_4.2.1.3 + +- name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured" + block: + - name: "4.2.1.4 | L1 | AUDIT | Ensure logging is configured | rsyslog current config message out" + command: cat /etc/rsyslog.conf + become: yes + changed_when: false + failed_when: no + check_mode: no + register: rhel_08_4_2_1_4_audit + + - name: "4.2.1.4 | L1 | AUDIT | Ensure logging is configured | rsyslog current config message out" + debug: + msg: + - "These are the current logging configurations for rsyslog, please review:" + - "{{ rhel_08_4_2_1_4_audit.stdout_lines }}" + + - name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured | mail.* log setting" + blockinfile: + path: /etc/rsyslog.conf + state: present + marker: "# {mark} MAIL LOG SETTINGS (ANSIBLE MANAGED)" + block: | + # mail logging additions to meet CIS standards + mail.* -/var/log/mail + mail.info -/var/log/mail.info + mail.warning -/var/log/mail.warning + mail.err /var/log/mail.err + insertafter: '# Log all the mail messages in one place.' + notify: restart rsyslog + when: rhel8cis_rsyslog_ansiblemanaged + + - name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured | news.crit log setting" + blockinfile: + path: /etc/rsyslog.conf + state: present + marker: "# {mark} NEWS LOG SETTINGS (ANSIBLE MANAGED)" + block: | + # news logging additions to meet CIS standards + news.crit -/var/log/news/news.crit + news.notice -/var/log/news/news.crit + insertafter: '# Save news errors of level crit and higher in a special file.' + notify: restart rsyslog + when: rhel8cis_rsyslog_ansiblemanaged + + - name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured | Misc. log setting" + blockinfile: + path: /etc/rsyslog.conf + state: present + marker: "# {mark} MISC. LOG SETTINGS (ANSIBLE MANAGED)" + block: | + # misc. logging additions to meet CIS standards + *.=warning;*.=err -/var/log/warn + *.crit /var/log/warn + *.*;mail.none;news.none /var/log/messages + insertafter: '#### RULES ####' + notify: restart rsyslog + when: rhel8cis_rsyslog_ansiblemanaged + + - name: "4.2.1.4 | L1 | PATCH | Ensure logging is configured | Local log settings" + blockinfile: + path: /etc/rsyslog.conf + state: present + marker: "#{mark} LOCAL LOG SETTINGS (ANSIBLE MANAGED)" + block: | + # local log settings + local0,local1.* -/var/log/localmessages + local2,local3.* -/var/log/localmessages + local4,local5.* -/var/log/localmessages + local6,local7.* -/var/log/localmessages + *.emrg :omusrmsg:* + insertafter: '#### RULES ####' + notify: restart rsyslog + when: + - rhel8cis_rule_4_2_1_4 + tags: + - level1-server + - level1-workstation + - patch + - rsyslog + - rule_4.2.1.4 + +- name: "4.2.1.5 | L1 | PATCH | Ensure rsyslog is configured to send logs to a remote log host" + blockinfile: + path: /etc/rsyslog.conf + state: present + block: | + # remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional + *.* @@{{ rhel8cis_remote_log_server }} + insertafter: EOF + register: result + failed_when: + - result is failed + - result.rc != 257 + notify: restart rsyslog + when: + - rhel8cis_rule_4_2_1_5 + - rhel8cis_remote_log_server is defined + tags: + - level1-server + - level1-workstation + - patch + - rule_4.2.1.5 + - rsyslog + +- name: "4.2.1.6 | L1 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts." + block: + - name: "4.2.1.6 | L1 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts. | When not log host" + replace: + path: /etc/rsyslog.conf + regexp: '({{ item }})' + replace: '#\1' + notify: restart rsyslog + with_items: + - '^(\$ModLoad imtcp)' + - '^(\$InputTCPServerRun)' + when: not rhel8cis_system_is_log_server + + - name: "4.2.1.6 | L1 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts. | When log host" + replace: + path: /etc/rsyslog.conf + regexp: '^#(.*{{ item }}.*)' + replace: '\1' + notify: restart rsyslog + with_items: + - 'ModLoad imtcp' + - 'InputTCPServerRun' + when: rhel8cis_system_is_log_server + when: + - rhel8cis_rule_4_2_1_6 + tags: + - level1-server + - level1-workstation + - patch + - rule_4.2.1.6 + - rsyslog diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.2.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.2.x.yml new file mode 100644 index 0000000..49ce75f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.2.x.yml @@ -0,0 +1,43 @@ +--- + +- name: "4.2.2.1 | L1 | PATCH | Ensure journald is configured to send logs to rsyslog" + lineinfile: + dest: /etc/systemd/journald.conf + regexp: "^#ForwardToSyslog=|^ForwardToSyslog=" + line: ForwardToSyslog=yes + state: present + when: + - rhel8cis_rule_4_2_2_1 + tags: + - level1-server + - level1-workstation + - patch + - rule_4.2.2.1 + +- name: "4.2.2.2 | L1 | PATCH | Ensure journald is configured to compress large log files" + lineinfile: + dest: /etc/systemd/journald.conf + regexp: "^#Compress=|^Compress=" + line: Compress=yes + state: present + when: + - rhel8cis_rule_4_2_2_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_4.2.2.2 + +- name: "4.2.2.3 | L1 | PATCH | Ensure journald is configured to write logfiles to persistent disk" + lineinfile: + dest: /etc/systemd/journald.conf + regexp: "^#Storage=|^Storage=" + line: Storage=persistent + state: present + when: + - rhel8cis_rule_4_2_2_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_4.2.2.3 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.3.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.3.yml new file mode 100644 index 0000000..6956609 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.2.3.yml @@ -0,0 +1,13 @@ +--- + +- name: "4.2.3 | L1 | PATCH | Ensure permissions on all logfiles are configured" + command: find /var/log -type f -exec chmod g-wx,o-rwx "{}" + + changed_when: false + failed_when: false + when: + - rhel8cis_rule_4_2_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_4.2.3 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.3.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.3.yml new file mode 100644 index 0000000..2d60dac --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/cis_4.3.yml @@ -0,0 +1,24 @@ +--- + +- name: "4.3 | L1 | PATCH | Ensure logrotate is configured" + block: + - name: "4.3 | L1 | AUDIT | Ensure logrotate is configured | Get logrotate settings" + find: + paths: /etc/logrotate.d/ + register: log_rotates + + - name: "4.3 | L1 | PATCH | Ensure logrotate is configured" + replace: + path: "{{ item.path }}" + regexp: '^(\s*)(daily|weekly|monthly|yearly)$' + replace: "\\1{{ rhel8cis_logrotate }}" + with_items: + - "{{ log_rotates.files }}" + - { path: "/etc/logrotate.conf" } + when: + - rhel8cis_rule_4_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_4.3 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/main.yml new file mode 100644 index 0000000..f3a9500 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_4/main.yml @@ -0,0 +1,23 @@ +--- + +- name: "SECTION | 4.1| Configure System Accounting (auditd)" + include: cis_4.1.1.x.yml + +- name: "SECTION | 4.1.2.x| Configure Data Retention" + include: cis_4.1.2.x.yml + +- name: "SECTION | 4.1.x| Auditd rules" + include: cis_4.1.x.yml + +- name: "SECTION | 4.2.x| Configure Logging" + include: cis_4.2.1.x.yml + when: rhel8cis_syslog == 'rsyslog' + +- name: "SECTION | 4.2.2.x| Configure journald" + include: cis_4.2.2.x.yml + +- name: "SECTION | 4.2.3 | Configure logile perms" + include: cis_4.2.3.yml + +- name: "SECTION | 4.3 | Configure logrotate" + include: cis_4.3.yml diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.1.x.yml new file mode 100644 index 0000000..73de68f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.1.x.yml @@ -0,0 +1,147 @@ +--- + +- name: "5.1.1 | L1 | PATCH | Ensure cron daemon is enabled" + service: + name: crond + enabled: yes + when: + - rhel8cis_rule_5_1_1 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.1.1 + +- name: "5.1.2 | L1 | PATCH | Ensure permissions on /etc/crontab are configured" + file: + dest: /etc/crontab + owner: root + group: root + mode: 0600 + when: + - rhel8cis_rule_5_1_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.1.2 + +- name: "5.1.3 | L1 | PATCH | Ensure permissions on /etc/cron.hourly are configured" + file: + dest: /etc/cron.hourly + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel8cis_rule_5_1_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.1.3 + +- name: "5.1.4 | L1 | PATCH | Ensure permissions on /etc/cron.daily are configured" + file: + dest: /etc/cron.daily + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel8cis_rule_5_1_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.1.4 + +- name: "5.1.5 | L1 | PATCH | Ensure permissions on /etc/cron.weekly are configured" + file: + dest: /etc/cron.weekly + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel8cis_rule_5_1_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.1.5 + +- name: "5.1.6 | L1 | PATCH | Ensure permissions on /etc/cron.monthly are configured" + file: + dest: /etc/cron.monthly + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel8cis_rule_5_1_6 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.1.6 + +- name: "5.1.7 | L1 | PATCH | Ensure permissions on /etc/cron.d are configured" + file: + dest: /etc/cron.d + state: directory + owner: root + group: root + mode: 0700 + when: + - rhel8cis_rule_5_1_7 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.1.7 + +- name: "5.1.8 | L1 | PATCH | Ensure at/cron is restricted to authorized users" + block: + - name: "5.1.8 | L1 | PATCH | Ensure at/cron is restricted to authorized users | Remove at.deny" + file: + dest: /etc/at.deny + state: absent + + - name: "5.1.8 | L1 | PATCH | Ensure at/cron is restricted to authorized users | Check if at.allow exists" + stat: + path: "/etc/at.allow" + register: p + + - name: "5.1.8 | L1 | PATCH | Ensure at/cron is restricted to authorized users | Ensure at.allow is restricted to authorized users" + file: + dest: /etc/at.allow + state: '{{ "file" if p.stat.exists else "touch" }}' + owner: root + group: root + mode: 0600 + + - name: "5.1.8 | L1 | PATCH | Ensure at/cron is restricted to authorized users | Remove cron.deny" + file: + dest: /etc/cron.deny + state: absent + + - name: "5.1.8 | L1 | PATCH | Ensure at/cron is restricted to authorized users | Check if cron.allow exists" + stat: + path: "/etc/cron.allow" + register: p + + - name: "5.1.8 | L1 | PATCH | Ensure at/cron is restricted to authorized users | Ensure cron.allow is restricted to authorized users" + file: + dest: /etc/cron.allow + state: '{{ "file" if p.stat.exists else "touch" }}' + owner: root + group: root + mode: 0600 + when: + - rhel8cis_rule_5_1_8 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.1.8 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.2.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.2.x.yml new file mode 100644 index 0000000..f434994 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.2.x.yml @@ -0,0 +1,345 @@ +--- + +- name: "5.2.1 | L1 | PATCH | Ensure permissions on /etc/ssh/sshd_config are configured" + file: + dest: /etc/ssh/sshd_config + state: file + owner: root + group: root + mode: 0600 + when: + - rhel8cis_rule_5_2_1 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.1 + +- name: "5.2.2 | L1 | PATCH | Ensure SSH access is limited" + block: + - name: "5.2.2 | L1 | PATCH | Ensure SSH access is limited | Add line to sshd_config for allowusers" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^AllowUsers" + line: AllowUsers {{ rhel8cis_sshd['allowusers'] }} + notify: restart sshd + when: "rhel8cis_sshd['allowusers']|default('') | length > 0" + + - name: "5.2.2 | L1 | PATCH | Ensure SSH access is limited | Add line to sshd_config for allowgroups" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^AllowGroups" + line: AllowGroups {{ rhel8cis_sshd['allowgroups'] }} + notify: restart sshd + when: "rhel8cis_sshd['allowgroups']|default('') | length > 0" + + - name: "5.2.2 | L1 | PATCH | Ensure SSH access is limited | Add line to sshd_config for denyusers" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^DenyUsers" + line: DenyUsers {{ rhel8cis_sshd['denyusers'] }} + notify: restart sshd + when: "rhel8cis_sshd['denyusers']|default('') | length > 0" + + - name: "5.2.2 | L1 | PATCH | Ensure SSH access is limited | Add line to sshd_config for denygroups" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^DenyGroups" + line: DenyGroups {{ rhel8cis_sshd['denygroups'] }} + notify: restart sshd + when: "rhel8cis_sshd['denygroups']|default('') | length > 0" + when: + - rhel8cis_rule_5_2_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.2 + +- name: "5.2.3 | L1 | PATCH | Ensure permissions on SSH private host key files are configured" + block: + - name: "5.2.3 | L1 | AUDIT | Ensure permissions on SSH private host key files are configured | Find the SSH private host keys" + find: + paths: /etc/ssh + patterns: 'ssh_host_*_key' + recurse: true + file_type: any + register: rhel8cis_5_2_3_ssh_private_host_key + + - name: "5.2.3 | L1 | PATCH | Ensure permissions on SSH private host key files are configured | Set permissions on SSH private host keys" + file: + path: "{{ item.path }}" + owner: root + group: root + mode: 0600 + with_items: + - "{{ rhel8cis_5_2_3_ssh_private_host_key.files }}" + when: + - rhel8cis_rule_5_2_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.3 + +- name: "5.2.4 | L1 | PATCH | Ensure permissions on SSH public host key files are configured" + block: + - name: "5.2.4 | L1 | AUDIT | Ensure permissions on SSH public host key files are configured | Find the SSH public host keys" + find: + paths: /etc/ssh + patterns: 'ssh_host_*_key.pub' + recurse: true + file_type: any + register: rhel8cis_5_2_4_ssh_public_host_key + + - name: "5.2.4 | L1 | PATCH | Ensure permissions on SSH public host key files are configured | Set permissions on SSH public host keys" + file: + path: "{{ item.path }}" + owner: root + group: root + mode: 0644 + with_items: + - "{{ rhel8cis_5_2_4_ssh_public_host_key.files }}" + when: + - rhel8cis_rule_5_2_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.4 + +- name: "5.2.5 | L1 | PATCH | Ensure SSH LogLevel is appropriate" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#LogLevel|^LogLevel" + line: 'LogLevel {{ rhel8cis_ssh_loglevel }}' + when: + - rhel8cis_rule_5_2_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.5 + +- name: "5.2.6 | L2 | PATCH | Ensure SSH X11 forwarding is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#X11Forwarding|^X11Forwarding" + line: 'X11Forwarding no' + when: + - rhel8cis_rule_5_2_6 + tags: + - level2-server + - level1-workstation + - patch + - rule_5.2.6 + +- name: "5.2.7 | L1 | PATCH | Ensure SSH MaxAuthTries is set to 4 or less" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^(#)?MaxAuthTries \d' + line: 'MaxAuthTries 4' + when: + - rhel8cis_rule_5_2_7 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.7 + +- name: "5.2.8 | L1 | PATCH | Ensure SSH IgnoreRhosts is enabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#IgnoreRhosts|^IgnoreRhosts" + line: 'IgnoreRhosts yes' + when: + - rhel8cis_rule_5_2_8 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.8 + +- name: "5.2.9 | L1 | PATCH | Ensure SSH HostbasedAuthentication is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: ^#HostbasedAuthentication|^HostbasedAuthentication" + line: 'HostbasedAuthentication no' + when: + - rhel8cis_rule_5_2_9 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.9 + +- name: "5.2.10 | L1 | PATCH | Ensure SSH root login is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#PermitRootLogin|^PermitRootLogin" + line: 'PermitRootLogin no' + when: + - rhel8cis_rule_5_2_10 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.10 + +- name: "5.2.11 | L1 | PATCH | Ensure SSH PermitEmptyPasswords is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#PermitEmptyPasswords|^PermitEmptyPasswords" + line: 'PermitEmptyPasswords no' + when: + - rhel8cis_rule_5_2_11 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.11 + +- name: "5.2.12 | L1 | PATCH | Ensure SSH PermitUserEnvironment is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#PermitUserEnvironment|^PermitUserEnvironment" + line: 'PermitUserEnvironment no' + when: + - rhel8cis_rule_5_2_12 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.12 + +- name: "5.2.13 | L1 | PATCH | Ensure SSH Idle Timeout Interval is configured" + block: + - name: "5.2.13 | L1 | PATCH | Ensure SSH Idle Timeout Interval is configured | Add line in sshd_config for ClientAliveInterval" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^ClientAliveInterval' + line: "ClientAliveInterval {{ rhel8cis_sshd['clientaliveinterval'] }}" + + - name: "5.2.13 | L1 | PATCH | Ensure SSH Idle Timeout Interval is configured | Ensure SSH ClientAliveCountMax set to <= 3" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^ClientAliveCountMax' + line: "ClientAliveCountMax {{ rhel8cis_sshd['clientalivecountmax'] }}" + when: + - rhel8cis_rule_5_2_13 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.13 + +- name: "5.2.14 | L1 | PATCH | Ensure SSH LoginGraceTime is set to one minute or less" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#LoginGraceTime|^LoginGraceTime" + line: "LoginGraceTime {{ rhel8cis_sshd['logingracetime'] }}" + when: + - rhel8cis_rule_5_2_14 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.14 + +- name: "5.2.15 | L1 | PATCH | Ensure SSH warning banner is configured" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^Banner' + line: 'Banner /etc/issue.net' + when: + - rhel8cis_rule_5_2_15 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.15 + +- name: "5.2.16 | L1 | PATCH | Ensure SSH PAM is enabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#UsePAM|^UsePAM" + line: 'UsePAM yes' + when: + - rhel8cis_rule_5_2_16 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.16 + +- name: "5.2.17 | L2 | PATCH | Ensure SSH AllowTcpForwarding is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#AllowTcpForwarding|^AllowTcpForwarding" + line: 'AllowTcpForwarding no' + when: + - rhel8cis_rule_5_2_17 + tags: + - level2-server + - level2-workstation + - patch + - rule_5.2.17 + +- name: "5.2.18 | L1 | PATCH | Ensure SSH MaxStartups is configured" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#MaxStartups|^MaxStartups" + line: 'MaxStartups 10:30:60' + when: + - rhel8cis_rule_5_2_18 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.18 + +- name: "5.2.19 | L1 | PATCH | Ensure SSH MaxSessions is set to 4 or less" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#MaxSessions|^MaxSessions" + line: 'MaxSessions {{ rhel8cis_ssh_maxsessions }}' + when: + - rhel8cis_rule_5_2_19 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.19 + +- name: "5.2.20 | L1 | PATCH | Ensure system-wide crypto policy is not over-ridden" + shell: sed -ri "s/^\s*(CRYPTO_POLICY\s*=.*)$/# \1/" /etc/sysconfig/sshd + args: + warn: no + notify: restart sshd + when: + - rhel8cis_rule_5_2_20 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.2.20 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.3.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.3.x.yml new file mode 100644 index 0000000..a3c7128 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.3.x.yml @@ -0,0 +1,90 @@ +--- + +- name: "5.3.1 | L1 | PATCH | Create custom authselect profile" + block: + - name: "5.3.1 | L1 | PATCH | Create custom authselect profile | Gather profiles" + shell: 'authselect current | grep "Profile ID: custom/"' + failed_when: false + changed_when: false + check_mode: no + register: rhel8cis_5_3_1_profiles + + - name: "5.3.1 | L1 | AUDIT | Create custom authselect profile | Show profiles" + debug: + msg: + - "Below are the current custom profiles" + - "{{ rhel8cis_5_3_1_profiles.stdout_lines }}" + + - name: "5.3.1 | L1 | PATCH | Create custom authselect profile | Create custom profiles" + shell: authselect create-profile {{ rhel8cis_authselect['custom_profile_name'] }} -b {{ rhel8cis_authselect['default_file_to_copy'] }} + args: + warn: no + when: rhel8cis_authselect_custom_profile_create + when: + - rhel8cis_rule_5_3_1 + tags: + - level1-server + - level1-workstation + - patch + - authselect + - rule_5.3.1 + +- name: "5.3.2 | L1 | PATCH | Select authselect profile" + block: + - name: "5.3.2 | L1 | AUDIT | Select authselect profile | Gather profiles and enabled features" + shell: "authselect current" + args: + warn: no + failed_when: false + changed_when: false + check_mode: no + register: rhel8cis_5_3_2_profiles + + - name: "5.3.2 | L1 | AUDIT | Select authselect profile | Show profiles" + debug: + msg: + - "Below are the current custom profiles" + - "{{ rhel8cis_5_3_2_profiles.stdout_lines }}" + + - name: "5.3.2 | L1 | PATCH | Select authselect profile | Create custom profiles" + shell: "authselect select custom/{{ rhel8cis_authselect['custom_profile_name'] }} {{ rhel8cis_authselect['options'] }}" + args: + warn: no + when: rhel8cis_authselect_custom_profile_select + when: + - rhel8cis_rule_5_3_2 + tags: + - level1-server + - level1-workstation + - patch + - authselect + - rule_5.3.2 + +- name: "5.3.3 | L1 | PATCH | Ensure authselect includes with-faillock" + block: + - name: "5.3.3 | L1 | AUDIT | Ensure authselect includes with-faillock | Gather profiles and enabled features" + shell: "authselect current | grep with-faillock" + failed_when: false + changed_when: false + check_mode: no + register: rhel8cis_5_3_3_profiles_faillock + + - name: "5.3.3 | L1 | AUDIT | Ensure authselect includes with-faillock| Show profiles" + debug: + msg: + - "Below are the current custom profiles" + - "{{ rhel8cis_5_3_3_profiles_faillock.stdout_lines }}" + + - name: "5.3.3 | L1 | PATCH | Ensure authselect includes with-faillock | Create custom profiles" + shell: "authselect select custom/{{ rhel8cis_authselect['custom_profile_name'] }} with-faillock" + args: + warn: no + when: rhel8cis_authselect_custom_profile_select + when: + - rhel8cis_rule_5_3_3 + tags: + - level1-server + - level1-workstation + - patch + - authselect + - rule_5.3.3 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.4.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.4.x.yml new file mode 100644 index 0000000..1b0600d --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.4.x.yml @@ -0,0 +1,131 @@ +--- + +- name: | + "5.4.1 | L1 | PATCH | Ensure password creation requirements are configured + 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured + 5.4.3 | L1 | PATCH | Ensure password reuse is limited + 5.4.4 | L1 | PATCH | Ensure password hashing algorithm is SHA-512" + block: + - name: "5.4.1 | L1 | PATCH | Ensure password creation requirements are configured | Set pwquality config settings" + lineinfile: + state: present + dest: /etc/security/pwquality.conf + regexp: ^{{ item.name }} + line: "{{ item.name }} = {{ item.value }}" + with_items: + - { name: minlen, value: "{{ rhel8cis_pam_password.minlen }}" } + - { name: minclass, value: "{{ rhel8cis_pam_password.minclass }}" } + when: rhel8cis_rule_5_4_1 + + - name: | + "5.4.1 | L1 | PATCH | Ensure password creation requirements are configured | Set system-auth retry settings + 5.4.3| L1 | PATCH | Ensure password reuse is limited | Set system-auth remember settings" + lineinfile: + dest: /etc/pam.d/system-auth + state: present + regexp: '^password requisite pam_pwquality.so' + line: "password requisite pam_pwquality.so try_first_pass local_users_only enforce-for-root retry=3 remember={{ rhel8cis_pam_faillock.remember }}" + insertbefore: '^#?password ?' + when: + - rhel8cis_rule_5_4_1 or + rhel8cis_rule_5_4_3 + + - name: "5.4.1 | L1 | PATCH | Ensure password creation requirements are configured | Set system-auth retry settings" + lineinfile: + dest: /etc/pam.d/password-auth + state: present + regexp: '^password requisite pam_pwquality.so' + line: "password requisite pam_pwquality.so try_first_pass local_users_only enforce-for-root retry=3" + insertbefore: '^#?password ?' + when: rhel8cis_rule_5_4_1 + + - name: "5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured | Add deny count and unlock time for preauth" + lineinfile: + dest: /etc/pam.d/{{ item }} + state: present + regexp: '^auth required pam_faillock.so preauth' + line: "auth required pam_faillock.so preauth silent deny={{ rhel8cis_pam_faillock.attempts }}{{ (rhel8cis_pam_faillock.fail_for_root) | ternary(' even_deny_root ',' ') }}unlock_time={{ rhel8cis_pam_faillock.unlock_time }}" + insertafter: '^#?auth ?' + with_items: + - "system-auth" + - "password-auth" + when: rhel8cis_rule_5_4_2 + + - name: "5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured | Add deny count and unlock times for authfail" + lineinfile: + dest: /etc/pam.d/{{ item }} + state: present + regexp: '^auth required pam_faillock.so authfail' + line: "auth required pam_faillock.so authfail deny={{ rhel8cis_pam_faillock.attempts }}{{ (rhel8cis_pam_faillock.fail_for_root) | ternary(' even_deny_root ',' ') }}unlock_time={{ rhel8cis_pam_faillock.unlock_time }}" + insertafter: '^#?auth ?' + with_items: + - "system-auth" + - "password-auth" + when: rhel8cis_rule_5_4_2 + + - name: | + "5.4.3 | L1 | PATCH | Ensure password reuse is limited | Set system-auth remember remember settings + 5.4.4 | L1 | PATCH | Ensure password hashing algorithm is SHA-512 | Set system-auth pwhash settings" + lineinfile: + dest: /etc/pam.d/system-auth + state: present + regexp: '^password sufficient pam_unix.so' + line: "password sufficient pam_unix.so {{ rhel8cis_pam_faillock.pwhash }} shadow try_first_pass use_authtok remember={{ rhel8cis_pam_faillock.remember }}" + insertafter: '^#?password ?' + when: + - rhel8cis_rule_5_4_3 or + rhel8cis_rule_5_4_4 + + - name: "5.4.4 | L1 | PATCH | Ensure password hashing algorithm is SHA-512 | Set system-auth pwhash settings" + lineinfile: + dest: /etc/pam.d/password-auth + state: present + regexp: '^password sufficient pam_unix.so' + line: "password sufficient pam_unix.so {{ rhel8cis_pam_faillock.pwhash }} shadow try_first_pass use_authtok" + insertafter: '^#?password ?' + when: rhel8cis_rule_5_4_4 + + # The two steps below were added to keep authconfig from overwritting the above configs. This follows steps from here: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/chap-hardening_your_system_with_tools_and_services + # With the steps below you will score five (5) points lower due to false positive results + - name: | + "5.4.1 | L1 | PATCH | Ensure password creation requirements are configured + 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured + 5.4.3 | L1 | PATCH | Ensure password reuse is limited + 5.4.4 | L1 | PATCH | Ensure password hashing algorithm is SHA-512" + copy: + src: /etc/pam.d/{{ item }} + dest: /etc/pam.d/{{ item }}-local + remote_src: yes + owner: root + group: root + mode: '0644' + with_items: + - "system-auth" + - "password-auth" + + - name: | + "5.4.1 | L1 | PATCH | Ensure password creation requirements are configured + 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured + 5.4.3 | L1 | PATCH | Ensure password reuse is limited + 5.4.4 | L1 | PATCH | Ensure password hashing algorithm is SHA-512" + file: + src: /etc/pam.d/{{ item }}-local + dest: /etc/pam.d/{{ item }} + state: link + force: yes + with_items: + - "system-auth" + - "password-auth" + when: + - rhel8cis_rule_5_4_1 or + rhel8cis_rule_5_4_2 or + rhel8cis_rule_5_4_3 or + rhel8cis_rule_5_4_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.4.1 + - rule_5.4.2 + - rule_5.4.3 + - rule_5.4.4 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.5.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.5.1.x.yml new file mode 100644 index 0000000..aa85b59 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.5.1.x.yml @@ -0,0 +1,117 @@ +--- + +- name: "5.5.1.1 | L1 | PATCH | Ensure password expiration is 365 days or less" + lineinfile: + state: present + dest: /etc/login.defs + regexp: '^PASS_MAX_DAYS' + line: "PASS_MAX_DAYS {{ rhel8cis_pass['max_days'] }}" + when: + - rhel8cis_rule_5_5_1_1 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.5.1.1 + +- name: "5.5.1.2 | L1 | PATCH | Ensure minimum days between password changes is 7 or more" + lineinfile: + state: present + dest: /etc/login.defs + regexp: '^PASS_MIN_DAYS' + line: "PASS_MIN_DAYS {{ rhel8cis_pass['min_days'] }}" + when: + - rhel8cis_rule_5_5_1_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.5.1.2 + +- name: "5.5.1.3 | L1 | PATCH | Ensure password expiration warning days is 7 or more" + lineinfile: + state: present + dest: /etc/login.defs + regexp: '^PASS_WARN_AGE' + line: "PASS_WARN_AGE {{ rhel8cis_pass['warn_age'] }}" + when: + - rhel8cis_rule_5_5_1_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.5.1.3 + +- name: "5.5.1.4 | L1 | PATCH | Ensure inactive password lock is 30 days or less" + block: + - name: "5.5.1.4 | L1 | AUDIT | Ensure inactive password lock is 30 days or less | Check current settings" + shell: useradd -D | grep INACTIVE={{ rhel8cis_inactivelock.lock_days }} | cut -f2 -d= + changed_when: false + failed_when: false + check_mode: no + register: rhel8cis_5_5_1_4_inactive_settings + + - name: "5.5.1.4 | L1 | PATCH | Ensure inactive password lock is 30 days or less | Set default inactive setting" + command: useradd -D -f {{ rhel8cis_inactivelock.lock_days }} + when: rhel8cis_5_5_1_4_inactive_settings.stdout | length == 0 + + - name: "5.5.1.4 | L1 | AUDIT | Ensure inactive password lock is 30 days or less | Getting user list" + shell: 'egrep ^[^:]+:[^\!*] /etc/shadow | cut -d: -f1' + check_mode: no + register: rhel_08_5_5_1_4_audit + changed_when: false + + - name: "5.5.1.4 | L1 | PATCH | Ensure inactive password lock is 30 days or less | Apply Inactive setting to existing accounts" + command: chage --inactive {{ rhel8cis_inactivelock.lock_days }} "{{ item }}" + with_items: + - "{{ rhel_08_5_5_1_4_audit.stdout_lines }}" + when: + - rhel8cis_rule_5_5_1_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.5.1.4 + +- name: "5.5.1.5 | L1 | PATCH | Ensure all users last password change date is in the past" + block: + - name: "5.5.1.5 | L1 | AUDIT | Ensure all users last password change date is in the past | Get current date in Unix Time" + shell: echo $(($(date --utc --date "$1" +%s)/86400)) + failed_when: false + changed_when: false + check_mode: no + register: rhel8cis_5_5_1_5_currentut + + - name: "5.5.1.5 | L1 | AUDIT | Ensure all users last password change date is in the past | Get list of users with last changed pw date in the future" + shell: "cat /etc/shadow | awk -F: '{if($3>{{ rhel8cis_5_5_1_5_currentut.stdout }})print$1}'" + changed_when: false + failed_when: false + check_mode: no + register: rhel8cis_5_5_1_5_user_list + + - name: "5.5.1.5 | L1 | AUDIT | Ensure all users last password change date is in the past | Alert no pw change in the future exist" + debug: + msg: "Good News! All accounts have PW change dates that are in the past" + when: rhel8cis_5_5_1_5_user_list.stdout | length == 0 + + - name: "5.5.1.5 | L1 | AUDIT | Ensure all users last password change date is in the past | Alert on accounts with pw change in the future" + debug: + msg: "Warning! The following accounts have the last PW change date in the future: {{ rhel8cis_5_5_1_5_user_list.stdout_lines }}" + when: + - rhel8cis_5_5_1_5_user_list.stdout | length > 0 + - not rhel8cis_futurepwchgdate_autofix + + - name: "5.5.1.5 | L1 | PATCH | Ensure all users last password change date is in the past | Fix accounts with pw change in the future" + command: passwd --expire {{ item }} + when: + - rhel8cis_5_5_1_5_user_list | length > 0 + - rhel8cis_futurepwchgdate_autofix + with_items: + - "{{ rhel8cis_5_5_1_5_user_list.stdout_lines }}" + when: + - rhel8cis_rule_5_5_1_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.5.1.5 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.5.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.5.x.yml new file mode 100644 index 0000000..97ac3b3 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.5.x.yml @@ -0,0 +1,96 @@ +--- + +- name: "5.5.2 | L1 | PATCH | Ensure system accounts are secured" + block: + - name: "5.5.2 | L1 | Ensure system accounts are secured | Set nologin" + user: + name: "{{ item.id }}" + shell: /usr/sbin/nologin + with_items: + - "{{ rhel8cis_passwd }}" + when: + - item.id != "root" + - item.id != "sync" + - item.id != "shutdown" + - item.id != "halt" + - item.gid < rhel8cis_int_gid + - item.shell != " /bin/false" + - item.shell != " /usr/sbin/nologin" + + - name: "5.5.2 | L1 | PATCH | Ensure system accounts are secured | Lock accounts" + user: + name: "{{ item.id }}" + password_lock: true + with_items: + - "{{ rhel8cis_passwd }}" + when: + - item.id != "halt" + - item.id != "shutdown" + - item.id != "sync" + - item.id != "root" + - item.gid < rhel8cis_int_gid + - item.shell != " /bin/false" + - item.shell != " /usr/sbin/nologin" + when: + - rhel8cis_rule_5_5_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.5.2 + +- name: "5.5.3 | L1 | PATCH | Ensure default user shell timeout is 900 seconds or less" + blockinfile: + create: yes + mode: 0644 + dest: "{{ item.dest }}" + state: "{{ item.state }}" + marker: "# {mark} ANSIBLE MANAGED" + block: | + # Set session timeout - CIS ID RHEL-08-5.4.5 + TMOUT={{ rhel8cis_shell_session_timeout.timeout }} + export TMOUT + readonly TMOUT + with_items: + - { dest: "{{ rhel8cis_shell_session_timeout.file }}", state: present } + - { dest: /etc/profile, state: "{{ (rhel8cis_shell_session_timeout.file == '/etc/profile') | ternary('present', 'absent') }}" } + when: + - rhel8cis_rule_5_5_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.5.3 + +- name: "5.5.4 | L1 | PATCH | Ensure default group for the root account is GID 0" + command: usermod -g 0 root + changed_when: false + failed_when: false + when: + - rhel8cis_rule_5_5_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.5.4 + +- name: "5.5.5 | L1 | PATCH | Ensure default user umask is 027 or more restrictive" + block: + - name: "5.5.5 | L1 | PATCH | Ensure default user umask is 027 or more restrictive | Set umask for /etc/bashrc" + replace: + path: /etc/bashrc + regexp: '(^\s+umask) 0[012][0-6]' + replace: '\1 027' + + - name: "5.5.5 | L1 | PATCH | Ensure default user umask is 027 or more restrictive | Set umask for /etc/profile" + replace: + path: /etc/profile + regexp: '(^\s+umask) 0[012][0-6]' + replace: '\1 027' + when: + - rhel8cis_rule_5_5_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.5.5 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.6.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.6.yml new file mode 100644 index 0000000..9bbfd29 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.6.yml @@ -0,0 +1,35 @@ +--- + +# this will just display the list of consoles. The site will need to confirm the allowed consoles are correct and change manually if needed. +- name: "5.6 | L1 | AUDIT | Ensure root login is restricted to system console" + block: + - name: "5.6 | L1 | AUDIT | Ensure root login is restricted to system console | Check if securetty file exists" + stat: + path: /etc/securetty + register: rhel8cis_securetty_check + + - name: "5.6 | L1 | AUDIT | Ensure root login is restricted to system console | Capture consoles" + command: cat /etc/securetty + changed_when: false + register: rhel_08_5_6_audit + when: rhel8cis_securetty_check.stat.exists + + - name: "5.6 | L1 | AUDIT |Ensure root login is restricted to system console | Display Console" + debug: + msg: + - "These are the consoles with root login access, please review:" + - "{{ rhel_08_5_6_audit.stdout_lines }}" + when: rhel8cis_securetty_check.stat.exists + + - name: "5.6 | L1 | AUDIT | Ensure root login is restricted to system console | Display that no securetty file exists" + debug: + msg: + - "There is no /etc/securetty file, this has been removed by default in RHEL8" + when: not rhel8cis_securetty_check.stat.exists + when: + - rhel8cis_rule_5_6 + tags: + - level1-server + - level1-workstation + - audit + - rule_5.6 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.7.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.7.yml new file mode 100644 index 0000000..45e41dd --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/cis_5.7.yml @@ -0,0 +1,22 @@ +--- + +- name: "5.7 | L1 | PATCH | Ensure access to the su command is restricted" + block: + - name: "5.7 | L1 | PATCH | Ensure access to the su command is restricted | Setting pam_wheel to use_uid" + lineinfile: + state: present + dest: /etc/pam.d/su + regexp: '^(#)?auth\s+required\s+pam_wheel\.so' + line: 'auth required pam_wheel.so use_uid {% if rhel8cis_sugroup is defined %}group={{ rhel8cis_sugroup }}{% endif %}' + + - name: "5.7 | L1 | PATCH | Ensure access to the su command is restricted | wheel group contains root" + user: + name: "{{ rhel8cis_sugroup_users }}" + groups: "{{ rhel8cis_sugroup | default('wheel') }}" + when: + - rhel8cis_rule_5_7 + tags: + - level1-server + - level1-workstation + - patch + - rule_5.7 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/main.yml new file mode 100644 index 0000000..7d6a203 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_5/main.yml @@ -0,0 +1,27 @@ +--- + +- name: "SECTION | 5.1 | Configure time-based job schedulers" + include: cis_5.1.x.yml + +- name: "SECTION | 5.2 | Configure SSH Server" + include: cis_5.2.x.yml + +- name: "SECTION | 5.3 | Configure Profiles" + include: cis_5.3.x.yml + when: + - rhel8cis_use_authconfig + +- name: "SECTION | 5.4 | Configure PAM " + include: cis_5.4.x.yml + +- name: "SECTION | 5.5.1.x | Passwords and Accounts" + include: cis_5.5.1.x.yml + +- name: "SECTION | 5.5.x | System Accounts and User Settings" + include: cis_5.5.x.yml + +- name: "SECTION | 5.6 | Root Login" + include: cis_5.6.yml + +- name: Section | 5.7 | su Command Restriction + include: cis_5.7.yml diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/cis_6.1.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/cis_6.1.x.yml new file mode 100644 index 0000000..ccdc019 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/cis_6.1.x.yml @@ -0,0 +1,294 @@ +--- + +- name: "6.1.1 | L2 | AUDIT | Audit system file permissions" + block: + - name: "6.1.1 | L2 | AUDIT | Audit system file permissions | Audit the packages" + shell: rpm -Va --nomtime --nosize --nomd5 --nolinkto + args: + warn: no + changed_when: false + failed_when: false + register: rhel8cis_6_1_1_packages_rpm + + - name: "6.1.1 | L2 | AUDIT | Audit system file permissions | Create list and warning" + block: + - name: "6.1.1 | L2 | Audit system file permissions | Add file discrepancy list to system" + copy: + dest: "{{ rhel8cis_rpm_audit_file }}" + content: "{{ rhel8cis_6_1_1_packages_rpm.stdout }}" + + - name: "6.1.1 | L2 | AUDIT | Audit system file permissions | Message out alert for package descrepancies" + debug: + msg: | + "Warning! You have some package descrepancies issues. + The file list can be found in {{ rhel8cis_rpm_audit_file }}" + when: rhel8cis_6_1_1_packages_rpm.stdout|length > 0 + + - name: "6.1.1 | L2 | AUDIT | Audit system file permissions | Message out no package descrepancies" + debug: + msg: "Good News! There are no package descrepancies" + when: rhel8cis_6_1_1_packages_rpm.stdout|length == 0 + when: + - rhel8cis_rule_6_1_1 + tags: + - level2-server + - level2-workstation + - audit + - rule_6.1.1 + +- name: "6.1.2 | L1 | PATCH | Ensure permissions on /etc/passwd are configured" + file: + dest: /etc/passwd + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_6_1_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.2 + +- name: "6.1.3 | L1 | PATCH | Ensure permissions on /etc/passwd- are configured" + file: + dest: /etc/passwd- + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_6_1_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.3 + +- name: "6.1.4 | L1 | PATCH | Ensure permissions on /etc/shadow are configured" + file: + dest: /etc/shadow + owner: root + group: root + mode: 0000 + when: + - rhel8cis_rule_6_1_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.4 + +- name: "6.1.5 | L1 | PATCH | Ensure permissions on /etc/shadow- are configured" + file: + dest: /etc/shadow- + owner: root + group: root + mode: 0000 + when: + - rhel8cis_rule_6_1_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.5 + +- name: "6.1.6 | L1 | PATCH | Ensure permissions on /etc/gshadow are configured" + file: + dest: /etc/gshadow + owner: root + group: root + mode: 0000 + when: + - rhel8cis_rule_6_1_6 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.6 + +- name: "6.1.7 | L1 | PATCH | Ensure permissions on /etc/gshadow- are configured" + file: + dest: /etc/gshadow- + owner: root + group: root + mode: 0000 + when: + - rhel8cis_rule_6_1_7 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.7 + +- name: "6.1.8 | L1 | PATCH | Ensure permissions on /etc/group are configured" + file: + dest: /etc/group- + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_6_1_8 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.8 + +- name: "6.1.9 | L1 | PATCH | Ensure permissions on /etc/group- are configured" + file: + dest: /etc/group- + owner: root + group: root + mode: 0644 + when: + - rhel8cis_rule_6_1_9 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.9 + +- name: "6.1.10 | L1 | PATCH | Ensure no world writable files exist" + block: + - name: "6.1.10 | L1 | AUDIT | Ensure no world writable files exist | Get list of world-writable files" + shell: df --local -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type f -perm -0002 + failed_when: false + changed_when: false + register: rhel_08_6_1_10_perms_results + + - name: "6.1.10 | L1 | AUDIT | Ensure no world writable files exist | Alert no world-writable files exist" + debug: + msg: "Good news! We have not found any world-writable files on your system" + when: + - rhel_08_6_1_10_perms_results.stdout is not defined + + - name: "6.1.10 | L1 | PATCH | Ensure no world writable files exist | Adjust world-writable files if they exist (Configurable)" + file: + path: '{{ item }}' + mode: o-w + state: touch + with_items: "{{ rhel_08_6_1_10_perms_results.stdout_lines }}" + when: + - rhel_08_6_1_10_perms_results.stdout_lines is defined + - rhel8cis_no_world_write_adjust + when: + - rhel8cis_rule_6_1_10 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.10 + +- name: "6.1.11 | L1 | AUDIT | Ensure no unowned files or directories exist" + block: + - name: "6.1.11 | L1 | AUDIT | Ensure no unowned files or directories exist | Finding all unowned files or directories" + command: find "{{ item.mount }}" -xdev -nouser + check_mode: false + failed_when: false + changed_when: false + with_items: "{{ ansible_mounts }}" + register: rhel_08_6_1_11_audit + when: item['device'].startswith('/dev') and not 'bind' in item['options'] + + - name: "6.1.11 | L1 | AUDIT | Ensure no unowned files or directories exist | Displaying any unowned files or directories" + debug: + msg: "Manual intervention is required -- missing owner on items in {{ item.item.mount }}: {{ item.stdout_lines | join(', ') }}" + with_items: "{{ rhel_08_6_1_11_audit.results }}" + when: + - item.stdout_lines is defined + - item.stdout_lines | length > 0 + when: + - rhel8cis_rule_6_1_11 + tags: + - level1-server + - level1-workstation + - audit + - rule_6.1.11 + +- name: "6.1.12 | L1 | AUDIT | Ensure no ungrouped files or directories exist" + block: + - name: "6.1.12 | L1 | AUDIT | Ensure no ungrouped files or directories exist | Finding all ungrouped files or directories" + command: find "{{ item.mount }}" -xdev -nogroup + check_mode: false + failed_when: false + changed_when: false + register: rhel_08_6_1_12_audit + with_items: "{{ ansible_mounts }}" + when: item['device'].startswith('/dev') and not 'bind' in item['options'] + + - name: "6.1.12 | L1 | AUDIT | Ensure no ungrouped files or directories exist | Displaying all ungrouped files or directories" + debug: + msg: "Manual intervention is required -- missing group on items in {{ item.item.mount }}: {{ item.stdout_lines | join(', ') }}" + with_items: "{{ rhel_08_6_1_12_audit.results }}" + when: + - item.stdout_lines is defined + - item.stdout_lines | length > 0 + when: + - rhel8cis_rule_6_1_12 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.12 + +- name: "6.1.13 | L1 | AUDIT | Audit SUID executables" + block: + - name: "6.1.13 | L1 | AUDIT | Audit SUID executables | Find all SUID executables" + shell: df {{ item.mount }} -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type f -perm -4000 + failed_when: false + changed_when: false + register: rhel_08_6_1_13_perms_results + with_items: "{{ ansible_mounts }}" + + - name: "6.1.13 | L1 | AUDIT | Audit SUID executables | Alert no SUID executables exist" + debug: + msg: "Good news! We have not found any SUID executable files on your system" + failed_when: false + changed_when: false + when: + - rhel_08_6_1_13_perms_results.stdout is not defined + + - name: "6.1.13 | L1 | AUDIT | Audit SUID executables | Alert SUID executables exist" + debug: + msg: "Manual intervention is required -- SUID set on items in {{ item.item.mount }}: {{ item.stout_lines | join(', ') }}" + with_items: "{{ rhel_08_6_1_13_perms_results.stdout_lines }}" + when: + - rhel_08_6_1_13_perms_results.stdout is defined + when: + - rhel8cis_rule_6_1_13 + tags: + - level1-server + - level1-workstation + - audit + - rule_6.1.13 + +- name: "6.1.14 | L1 | AUDIT | Audit SGID executables" + block: + - name: "6.1.14 | L1 | AUDIT | Audit SGID executables | Find all SGID executables" + shell: df {{ item.mount }} -P | awk {'if (NR!=1) print $6'} | xargs -I '{}' find '{}' -xdev -type f -perm -2000 + failed_when: false + changed_when: false + register: rhel_08_6_1_14_perms_results + with_items: "{{ ansible_mounts }}" + + - name: "6.1.14 | L1 | AUDIT | Audit SGID executables | Alert no SGID executables exist" + debug: + msg: "Good news! We have not found any SGID executable files on your system" + failed_when: false + changed_when: false + when: + - rhel_08_6_1_14_perms_results.stdout is not defined + + - name: "6.1.14 | L1 | AUDIT | Audit SGID executables | Alert SGID executables exist" + debug: + msg: "Manual intervention is required -- SGID set on items in {{ item.item.mount }}: {{ item.stout_lines | join(', ') }}" + with_items: "{{ rhel_08_6_1_14_perms_results.stdout_lines }}" + when: + - rhel_08_6_1_14_perms_results.stdout is defined + when: + - rhel8cis_rule_6_1_14 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.1.14 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/cis_6.2.x.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/cis_6.2.x.yml new file mode 100644 index 0000000..42f22dc --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/cis_6.2.x.yml @@ -0,0 +1,519 @@ +--- + +- name: "6.2.1 | L1 | AUDIT | Ensure password fields are not empty" + command: passwd -l {{ item }} + changed_when: false + failed_when: false + with_items: "{{ empty_password_accounts.stdout_lines }}" + when: + - empty_password_accounts.rc + - rhel8cis_rule_6_2_1 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.1 + +- name: "6.2.2 | L1 | PATCH | Ensure no legacy '+' entries exist in /etc/passwd" + command: sed -i '/^+/ d' /etc/passwd + changed_when: false + failed_when: false + when: + - rhel8cis_rule_6_2_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.2 + - skip_ansible_lint + +- name: "6.2.3 | L1 | PATCH | Ensure root PATH Integrity" + block: + - name: "6.2.3 | L1 | AUDIT | Ensure root PATH Integrity | Determine empty value" + shell: 'echo $PATH | grep ::' + check_mode: no + register: path_colon + changed_when: False + failed_when: path_colon.rc == 0 + + - name: "6.2.3 | L1 | AUDIT | Ensure root PATH Integrity | Determin colon end" + shell: 'echo $PATH | grep :$' + check_mode: no + register: path_colon_end + changed_when: False + failed_when: path_colon_end.rc == 0 + + - name: "6.2.3 | L1 | AUDIT | Ensure root PATH Integrity | Determine dot in path" + shell: "/bin/bash --login -c 'env | grep ^PATH=' | sed -e 's/PATH=//' -e 's/::/:/' -e 's/:$//' -e 's/:/\\n/g'" + check_mode: no + register: dot_in_path + changed_when: False + failed_when: '"." in dot_in_path.stdout_lines' + + - name: "6.2.3 | L1 | AUDIT | Ensure root PATH Integrity | Alert on empty value, colon end, and dot in path" + debug: + msg: + - "The following paths have an empty value: {{ path_colon.stdout_lines }}" + - "The following paths have colon end: {{ path_colon_end.stdout_lines }}" + - "The following paths have a dot in the path: {{ dot_in_path.stdout_lines }}" + + - name: "6.2.3 | L1 | PATCH | Ensure root PATH Integrity (Scored) | Determine rights and owner" + file: > + path='{{ item }}' + follow=yes + state=directory + owner=root + mode='o-w,g-w' + with_items: "{{ dot_in_path.stdout_lines }}" + when: + - rhel8cis_rule_6_2_3 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.3 + +- name: "6.2.4 | L1 | PATCH | Ensure no legacy '+' entries exist in /etc/shadow" + command: sed -i '/^+/ d' /etc/shadow + changed_when: false + failed_when: false + when: + - rhel8cis_rule_6_2_4 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.4 + - skip_ansible_lint + +- name: "6.2.5 | L1 | PATCH | Ensure no legacy '+' entries exist in /etc/group" + command: sed -i '/^+/ d' /etc/group + changed_when: false + failed_when: false + when: + - rhel8cis_rule_6_2_5 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.5 + - skip_ansible_lint + +- name: "6.2.6 | L1 | PATCH | Ensure root is the only UID 0 account" + command: passwd -l {{ item }} + changed_when: false + failed_when: false + with_items: "{{ uid_zero_accounts_except_root.stdout_lines }}" + when: + - uid_zero_accounts_except_root.rc + - rhel8cis_rule_6_2_6 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.6 + +- name: "6.2.7 | L1 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + block: + - name: "6.2.7 | L1 | AUDIT | Ensure users' home directories permissions are 750 or more restrictive" + stat: + path: "{{ item }}" + with_items: "{{ rhel8cis_passwd | selectattr('uid', '>=', rhel8cis_int_gid) | selectattr('uid', '!=', 65534) | map(attribute='dir') | list }}" + register: rhel_08_6_2_7_audit + + - name: "6.2.7 | L1 | AUDIT | Ensure users' home directories permissions are 750 or more restrictive" + command: find -H {{ item.0 | quote }} -not -type l -perm /027 + check_mode: false + changed_when: rhel_08_6_2_7_patch_audit.stdout | length > 0 + register: rhel_08_6_2_7_patch_audit + when: + - ansible_check_mode + - item.1.exists + with_together: + - "{{ rhel_08_6_2_7_audit.results | map(attribute='item') | list }}" + - "{{ rhel_08_6_2_7_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + - name: "6.2.7 | L1 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + file: + path: "{{ item.0 }}" + recurse: yes + mode: a-st,g-w,o-rwx + register: rhel_08_6_2_7_patch + when: + - not ansible_check_mode + - item.1.exists + with_together: + - "{{ rhel_08_6_2_7_audit.results | map(attribute='item') | list }}" + - "{{ rhel_08_6_2_7_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + # set default ACLs so the homedir has an effective umask of 0027 + - name: "6.2.7 | L1 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + acl: + path: "{{ item.0 }}" + default: yes + state: present + recursive: yes + etype: "{{ item.1.etype }}" + permissions: "{{ item.1.mode }}" + when: not rhel8cis_system_is_container + with_nested: + - "{{ (ansible_check_mode | ternary(rhel_08_6_2_7_patch_audit, rhel_08_6_2_7_patch)).results | + rejectattr('skipped', 'defined') | map(attribute='item') | map('first') | list }}" + - + - etype: group + mode: rx + - etype: other + mode: '0' + when: + - rhel8cis_rule_6_2_7 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.7 + +- name: "6.2.8 | L1 | PATCH | Ensure users own their home directories" + file: + path: "{{ item.dir }}" + owner: "{{ item.id }}" + state: directory + with_items: "{{ rhel8cis_passwd }}" + loop_control: + label: "{{ rhel8cis_passwd_label }}" + when: + - item.uid >= rhel8cis_int_gid + - rhel8cis_rule_6_2_8 + tags: + - skip_ansible_lint # settings found on 6_2_7 + - level1-server + - level1-workstation + - patch + - rule_6.2.8 + +- name: "6.2.9 | L1 | PATCH | Ensure users' dot files are not group or world-writable" + block: + - name: "6.2.9 | L1 | AUDIT | Ensure users' dot files are not group or world-writable | Check for files" + shell: find /home/ -name "\.*" -perm /g+w,o+w + changed_when: false + failed_when: false + register: rhel8cis_6_2_9_audit + + - name: "6.2.9 | L1 | AUDIT | Ensure users' dot files are not group or world-writable | Alert on files found" + debug: + msg: "Good news! We have not found any group or world-writable dot files on your sytem" + when: + - rhel8cis_6_2_9_audit.stdout is not defined + + - name: "6.2.9 | L1 | PATCH | Ensure users' dot files are not group or world-writable | Changes files if configured" + file: + path: '{{ item }}' + mode: go-w + with_items: "{{ rhel8cis_6_2_9_audit.stdout_lines }}" + when: + - rhel8cis_6_2_9_audit.stdout is defined + - rhel8cis_dotperm_ansiblemanaged + when: + - rhel8cis_rule_6_2_9 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.9 + +- name: "6.2.10 | L1 | PATCH | Ensure no users have .forward files" + file: + state: absent + dest: "~{{ item }}/.forward" + with_items: "{{ users.stdout_lines }}" + when: + - rhel8cis_rule_6_2_10 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.10 + +- name: "6.2.11 | L1 | PATCH | Ensure no users have .netrc files" + file: + state: absent + dest: "~{{ item }}/.netrc" + with_items: "{{ users.stdout_lines }}" + when: + - rhel8cis_rule_6_2_11 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.11 + +- name: "6.2.12 | L1 | PATCH | Ensure users' .netrc Files are not group or world accessible" + command: /bin/true + changed_when: false + failed_when: false + when: + - rhel8cis_rule_6_2_12 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.12 + +- name: "6.2.13 | L1 | PATCH | Ensure no users have .rhosts files" + file: + state: absent + dest: "~{{ item }}/.rhosts" + with_items: "{{ users.stdout_lines }}" + when: + - rhel8cis_rule_6_2_13 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.13 + +- name: "6.2.14 | L1 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group" + block: + - name: "6.2.14 | L1 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Check /etc/passwd entries" + shell: pwck -r | grep 'no group' | awk '{ gsub("[:\47]",""); print $2}' + changed_when: false + failed_when: false + check_mode: false + register: passwd_gid_check + + - name: "6.2.14 | L1 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Print message that all groups match between passwd and group files" + debug: + msg: "Good News! There are no users that have non-existent GUIDs (Groups)" + when: passwd_gid_check.stdout is not defined + + - name: "6.2.14 | L1 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Print warning about users with invalid GIDs missing GID entries in /etc/group" + debug: + msg: "WARNING: The following users have non-existent GIDs (Groups): {{ passwd_gid_check.stdout_lines | join (', ') }}" + when: passwd_gid_check.stdout is defined + when: + - rhel8cis_rule_6_2_14 + tags: + - level1-server + - level1-workstation + - audit + - rule_6.2.14 + +- name: "6.2.15 | L1 | AUDIT Ensure no duplicate UIDs exist" + block: + - name: "6.2.15 | L1 | AUDIT | Ensure no duplicate UIDs exist | Check for duplicate UIDs" + shell: "pwck -r | awk -F: '{if ($3 in uid) print $1 ; else uid[$3]}' /etc/passwd" + changed_when: false + failed_when: false + register: user_uid_check + + - name: "6.2.15 | L1 | AUDIT | Ensure no duplicate UIDs exist | Print message that no duplicate UIDs exist" + debug: + msg: "Good News! There are no duplicate UID's in the system" + when: user_uid_check.stdout is not defined + + - name: "6.2.15 | L1 | AUDIT| Ensure no duplicate UIDs exist | Print warning about users with duplicate UIDs" + debug: + msg: "Warning: The following users have UIDs that are duplicates: {{ user_uid_check.stdout_lines }}" + when: user_uid_check.stdout is defined + when: + - rhel8cis_rule_6_2_15 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.15 + +- name: "6.2.16 | L1 | AUDIT | Ensure no duplicate GIDs exist" + block: + - name: "6.2.16 | L1 | AUDIT | Ensure no duplicate GIDs exist | Check for duplicate GIDs" + shell: "pwck -r | awk -F: '{if ($3 in users) print $1 ; else users[$3]}' /etc/group" + changed_when: false + failed_when: false + register: user_user_check + + - name: "6.2.16 | L1 | AUDIT | Ensure no duplicate GIDs exist | Print message that no duplicate GID's exist" + debug: + msg: "Good News! There are no duplicate GIDs in the system" + when: user_user_check.stdout is not defined + + - name: "6.2.16 | L1 | AUDIT | Ensure no duplicate GIDs exist | Print warning about users with duplicate GIDs" + debug: + msg: "Warning: The following groups have duplicate GIDs: {{ user_user_check.stdout_lines }}" + when: user_user_check.stdout is defined + when: + - rhel8cis_rule_6_2_16 + tags: + - level1-server + - level1-workstation + - audit + - rule_6.2.16 + +- name: "6.2.17 | L1 | AUDIT | Ensure no duplicate user names exist" + block: + - name: "6.2.17 | L1 | AUDIT | Ensure no duplicate user names exist | Check for duplicate User Names" + shell: "pwck -r | awk -F: '{if ($1 in users) print $1 ; else users[$1]}' /etc/passwd" + changed_when: false + failed_when: false + register: user_username_check + + - name: "6.2.17 | L1 | AUDIT | Ensure no duplicate user names exist | Print message that no duplicate user names exist" + debug: + msg: "Good News! There are no duplicate user names in the system" + when: user_username_check.stdout is not defined + + - name: "6.2.17 | L1 | AUDIT | Ensure no duplicate user names exist | Print warning about users with duplicate User Names" + debug: + msg: "Warning: The following user names are duplicates: {{ user_username_check.stdout_lines }}" + when: user_username_check.stdout is defined + when: + - rhel8cis_rule_6_2_17 + tags: + - level1-server + - level1-workstation + - audit + - rule_6.2.17 + +- name: "6.2.18 | L1 | AUDIT |Ensure no duplicate group names exist" + block: + - name: "6.2.18 | L1 | AUDIT | Ensure no duplicate group names exist | Check for duplicate group names" + shell: 'getent passwd | cut -d: -f1 | sort -n | uniq -d' + changed_when: false + failed_when: false + check_mode: no + register: group_group_check + + - name: "6.2.18 | L1 | AUDIT | Ensure no duplicate group names exist | Print message that no duplicate groups exist" + debug: + msg: "Good News! There are no duplicate group names in the system" + when: group_group_check.stdout is defined + + - name: "6.2.18 | L1 | AUDIT | Ensure no duplicate group names exist | Print warning about users with duplicate group names" + debug: + msg: "Warning: The following group names are duplicates: {{ group_group_check.stdout_lines }}" + when: group_group_check.stdout is not defined + when: + - rhel8cis_rule_6_2_18 + tags: + - level1-server + - level1-workstation + - audit + - rule_6.2.18 + +- name: "6.2.19 | L1 | AUDIT | Ensure shadow group is empty" + block: + - name: "6.2.19 | L1 | AUDIT | Ensure shadow group is empty | Check for shadow group and pull group id" + shell: "getent group shadow | cut -d: -f3" + changed_when: false + failed_when: false + check_mode: no + register: rhel8cis_shadow_gid + + - name: "6.2.19 | L1 | AUDIT | Ensure shadow group is empty | Check /etc/group for empty shadow group" + shell: grep ^shadow:[^:]*:[^:]*:[^:]+ /etc/group + changed_when: false + failed_when: false + check_mode: no + register: rhel8cis_empty_shadow + + - name: "6.2.19 | L1 | AUDIT | Ensure shadow group is empty | Check for users assigned to shadow" + shell: "getent passwd | awk -F: '$4 == '{{ rhel8cis_shadow_gid.stdout }}' {print $1}'" + changed_when: false + failed_when: false + check_mode: no + register: rhel8cis_shadow_passwd + + - name: "6.2.19 | L1 | AUDIT | Ensure shadow group is empty | Alert shadow group is empty and no users assigned" + debug: + msg: + - " Good News! The shadow group is empty and there are no users assigned to shadow" + when: + - rhel8cis_empty_shadow.stdout | length == 0 + - rhel8cis_shadow_passwd.stdout | length == 0 + + - name: "6.2.19 | L1 | AUDIT | Ensure shadow group is empty | Alert shadow group is not empty" + debug: + msg: + - "Alert! The shadow group is not empty" + when: + - rhel8cis_empty_shadow.stdout | length > 0 + + - name: "6.2.19 | L1 | AUDIT | Ensure shadow group is empty | Alert users are using shadow group" + debug: + msg: + - "Alert! The following users are assigned to the shadow group, please assing them to the appropriate group" + - "{{ rhel8cis_shadow_passwd.stdout_lines }}" + when: + - rhel8cis_shadow_passwd.stdout | length > 0 + when: + - rhel8cis_rule_6_2_19 + tags: + - level1-server + - level1-workstation + - audit + - rule_6.2.19 + +- name: "6.2.20 | L1 | PATCH | Ensure all users' home directories exist" + block: + - name: "6.2.20 | L1 | AUDIT | Ensure all users' home directories exist" + stat: + path: "{{ item }}" + register: rhel_08_6_2_20_audit + with_items: "{{ rhel8cis_passwd | selectattr('uid', '>=', rhel8cis_int_gid) | selectattr('uid', '!=', 65534) | map(attribute='dir') | list }}" + + - name: "6.2.20 | L1 | AUDIT | Ensure all users' home directories exist" + command: find -H {{ item.0 | quote }} -not -type l -perm /027 + check_mode: false + changed_when: rhel_08_6_2_20_patch_audit.stdout | length > 0 + register: rhel_08_6_2_20_patch_audit + when: + - ansible_check_mode + - item.1.exists + with_together: + - "{{ rhel_08_6_2_20_audit.results | map(attribute='item') | list }}" + - "{{ rhel_08_6_2_20_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + - name: "6.2.20 | L1 | PATCH | Ensure all users' home directories exist" + file: + path: "{{ item.0 }}" + recurse: yes + mode: a-st,g-w,o-rwx + register: rhel_08_6_2_20_patch + when: + - not ansible_check_mode + - item.1.exists + with_together: + - "{{ rhel_08_6_2_20_audit.results | map(attribute='item') | list }}" + - "{{ rhel_08_6_2_20_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + # set default ACLs so the homedir has an effective umask of 0027 + - name: "6.2.20 | L1 | PATCH | Ensure all users' home directories exist" + acl: + path: "{{ item.0 }}" + default: yes + state: present + recursive: yes + etype: "{{ item.1.etype }}" + permissions: "{{ item.1.mode }}" + when: not rhel8cis_system_is_container + with_nested: + - "{{ (ansible_check_mode | ternary(rhel_08_6_2_20_patch_audit, rhel_08_6_2_20_patch)).results | + rejectattr('skipped', 'defined') | map(attribute='item') | map('first') | list }}" + - + - etype: group + mode: rx + - etype: other + mode: '0' + when: + - rhel8cis_rule_6_2_20 + tags: + - level1-server + - level1-workstation + - patch + - rule_6.2.20 diff --git a/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/main.yml new file mode 100644 index 0000000..bf6943a --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/tasks/section_6/main.yml @@ -0,0 +1,7 @@ +--- + +- name: "SECTION | 6.1 | System File Permissions" + include: cis_6.1.x.yml + +- name: "SECTION | 6.2 | User and Group Settings" + include: cis_6.2.x.yml diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/ansible_vars_goss.yml.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/ansible_vars_goss.yml.j2 new file mode 100644 index 0000000..652b968 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/ansible_vars_goss.yml.j2 @@ -0,0 +1,474 @@ +## metadata for Audit benchmark +benchmark_version: '1.0.1' + +# Set if genuine RHEL (subscription manager check) not for derivatives e.g. CentOS +is_redhat_os: {% if ansible_distribution == "RedHat" %}true{% else %}false{% endif %} + +rhel8cis_os_distribution: {{ ansible_distribution | lower }} + +# timeout for each command to run where set - default = 10seconds/10000ms +timeout_ms: {{ audit_cmd_timeout }} + +# Taken from LE rhel8-cis +rhel8cis_notauto: {{ rhel8cis_notauto }} +rhel8cis_section1: {{ rhel8cis_section1 }} +rhel8cis_section2: {{ rhel8cis_section2 }} +rhel8cis_section3: {{ rhel8cis_section3 }} +rhel8cis_section4: {{ rhel8cis_section4 }} +rhel8cis_section5: {{ rhel8cis_section5 }} +rhel8cis_section6: {{ rhel8cis_section6 }} + +rhel8cis_level_1: {{ rhel8cis_level_1 }} +rhel8cis_level_2: {{ rhel8cis_level_2 }} + +rhel8cis_selinux_disable: {{ rhel8cis_selinux_disable }} + + + +# to enable rules that may have IO impact on a system e.g. full filesystem scans or CPU heavy +run_heavy_tests: true +{% if rhel8cis_legacy_boot is defined %} +rhel8cis_legacy_boot: {{ rhel8cis_legacy_boot }} +{% endif %} + + +rhel8cis_set_boot_pass: {{ rhel8cis_set_boot_pass }} +# These variables correspond with the CIS rule IDs or paragraph numbers defined in +# the CIS benchmark documents. +# PLEASE NOTE: These work in coordination with the section # group variables and tags. +# You must enable an entire section in order for the variables below to take effect. +# Section 1 rules +rhel8cis_rule_1_1_1_1: {{ rhel8cis_rule_1_1_1_1 }} +rhel8cis_rule_1_1_1_2: {{ rhel8cis_rule_1_1_1_2 }} +rhel8cis_rule_1_1_1_3: {{ rhel8cis_rule_1_1_1_3 }} +rhel8cis_rule_1_1_1_4: {{ rhel8cis_rule_1_1_1_4 }} +rhel8cis_rule_1_1_2: {{ rhel8cis_rule_1_1_2 }} +rhel8cis_rule_1_1_3: {{ rhel8cis_rule_1_1_3 }} +rhel8cis_rule_1_1_4: {{ rhel8cis_rule_1_1_4 }} +rhel8cis_rule_1_1_5: {{ rhel8cis_rule_1_1_5 }} +rhel8cis_rule_1_1_6: {{ rhel8cis_rule_1_1_6 }} +rhel8cis_rule_1_1_7: {{ rhel8cis_rule_1_1_7 }} +rhel8cis_rule_1_1_8: {{ rhel8cis_rule_1_1_8 }} +rhel8cis_rule_1_1_9: {{ rhel8cis_rule_1_1_9 }} +rhel8cis_rule_1_1_10: {{ rhel8cis_rule_1_1_10 }} +rhel8cis_rule_1_1_11: {{ rhel8cis_rule_1_1_11 }} +rhel8cis_rule_1_1_12: {{ rhel8cis_rule_1_1_12 }} +rhel8cis_rule_1_1_13: {{ rhel8cis_rule_1_1_13 }} +rhel8cis_rule_1_1_14: {{ rhel8cis_rule_1_1_14 }} +rhel8cis_rule_1_1_15: {{ rhel8cis_rule_1_1_15 }} +rhel8cis_rule_1_1_16: {{ rhel8cis_rule_1_1_16 }} +rhel8cis_rule_1_1_17: {{ rhel8cis_rule_1_1_17 }} +rhel8cis_rule_1_1_18: {{ rhel8cis_rule_1_1_18 }} +rhel8cis_rule_1_1_19: {{ rhel8cis_rule_1_1_19 }} +rhel8cis_rule_1_1_20: {{ rhel8cis_rule_1_1_20 }} +rhel8cis_rule_1_1_21: {{ rhel8cis_rule_1_1_21 }} +rhel8cis_rule_1_1_22: {{ rhel8cis_rule_1_1_22 }} +rhel8cis_rule_1_1_23: {{ rhel8cis_rule_1_1_23 }} +rhel8cis_rule_1_2_1: {% if ansible_distribution == "RedHat" %}True{% else %}False{% endif %} # Only run if Redhat and Subscribed +rhel8cis_rule_1_2_2: {{ rhel8cis_rule_1_2_2 }} +rhel8cis_rule_1_2_3: {{ rhel8cis_rule_1_2_3 }} +rhel8cis_rule_1_2_4: {{ rhel8cis_rule_1_2_4 }} +rhel8cis_rule_1_2_5: {{ rhel8cis_rule_1_2_5 }} +rhel8cis_rule_1_3_1: {{ rhel8cis_rule_1_3_1 }} +rhel8cis_rule_1_3_2: {{ rhel8cis_rule_1_3_2 }} +rhel8cis_rule_1_3_3: {{ rhel8cis_rule_1_3_3 }} +rhel8cis_rule_1_4_1: {{ rhel8cis_rule_1_4_1 }} +rhel8cis_rule_1_4_2: {{ rhel8cis_rule_1_4_2 }} +rhel8cis_rule_1_5_1: {{ rhel8cis_rule_1_5_1 }} +rhel8cis_rule_1_5_2: {{ rhel8cis_rule_1_5_2 }} +rhel8cis_rule_1_5_3: {{ rhel8cis_rule_1_5_3 }} +rhel8cis_rule_1_6_1: {{ rhel8cis_rule_1_6_1 }} +rhel8cis_rule_1_6_2: {{ rhel8cis_rule_1_6_2 }} +rhel8cis_rule_1_7_1_1: {{ rhel8cis_rule_1_7_1_1 }} +rhel8cis_rule_1_7_1_2: {{ rhel8cis_rule_1_7_1_2 }} +rhel8cis_rule_1_7_1_3: {{ rhel8cis_rule_1_7_1_3 }} +rhel8cis_rule_1_7_1_4: {{ rhel8cis_rule_1_7_1_4 }} +rhel8cis_rule_1_7_1_5: {{ rhel8cis_rule_1_7_1_5 }} +rhel8cis_rule_1_7_1_6: {{ rhel8cis_rule_1_7_1_6 }} +rhel8cis_rule_1_7_1_7: {{ rhel8cis_rule_1_7_1_7 }} +rhel8cis_rule_1_8_1_1: {{ rhel8cis_rule_1_8_1_1 }} +rhel8cis_rule_1_8_1_2: {{ rhel8cis_rule_1_8_1_2 }} +rhel8cis_rule_1_8_1_3: {{ rhel8cis_rule_1_8_1_3 }} +rhel8cis_rule_1_8_1_4: {{ rhel8cis_rule_1_8_1_4 }} +rhel8cis_rule_1_8_1_5: {{ rhel8cis_rule_1_8_1_5 }} +rhel8cis_rule_1_8_1_6: {{ rhel8cis_rule_1_8_1_6 }} +rhel8cis_rule_1_8_2: {{ rhel8cis_rule_1_8_2 }} +rhel8cis_rule_1_9: {{ rhel8cis_rule_1_9 }} +rhel8cis_rule_1_10: {{ rhel8cis_rule_1_10 }} +rhel8cis_rule_1_11: {{ rhel8cis_rule_1_11 }} + +# section 2 rules +rhel8cis_rule_2_1_1: {{ rhel8cis_rule_2_1_1 }} +rhel8cis_rule_2_2_1_1: {{ rhel8cis_rule_2_2_1_1 }} +rhel8cis_rule_2_2_1_2: {{ rhel8cis_rule_2_2_1_2 }} +rhel8cis_rule_2_2_2: {{ rhel8cis_rule_2_2_2 }} +rhel8cis_rule_2_2_3: {{ rhel8cis_rule_2_2_3 }} +rhel8cis_rule_2_2_4: {{ rhel8cis_rule_2_2_4 }} +rhel8cis_rule_2_2_5: {{ rhel8cis_rule_2_2_5 }} +rhel8cis_rule_2_2_6: {{ rhel8cis_rule_2_2_6 }} +rhel8cis_rule_2_2_7: {{ rhel8cis_rule_2_2_7 }} +rhel8cis_rule_2_2_8: {{ rhel8cis_rule_2_2_8 }} +rhel8cis_rule_2_2_9: {{ rhel8cis_rule_2_2_9 }} +rhel8cis_rule_2_2_10: {{ rhel8cis_rule_2_2_10 }} +rhel8cis_rule_2_2_11: {{ rhel8cis_rule_2_2_11 }} +rhel8cis_rule_2_2_12: {{ rhel8cis_rule_2_2_12 }} +rhel8cis_rule_2_2_13: {{ rhel8cis_rule_2_2_13 }} +rhel8cis_rule_2_2_14: {{ rhel8cis_rule_2_2_14 }} +rhel8cis_rule_2_2_15: {{ rhel8cis_rule_2_2_15 }} +rhel8cis_rule_2_2_16: {{ rhel8cis_rule_2_2_16 }} +rhel8cis_rule_2_2_17: {{ rhel8cis_rule_2_2_17 }} +rhel8cis_rule_2_2_18: {{ rhel8cis_rule_2_2_18 }} +rhel8cis_rule_2_3_1: {{ rhel8cis_rule_2_3_1 }} +rhel8cis_rule_2_3_2: {{ rhel8cis_rule_2_3_2 }} +rhel8cis_rule_2_3_3: {{ rhel8cis_rule_2_3_3 }} + + +# Section 3 rules +rhel8cis_rule_3_1_1: {{ rhel8cis_rule_3_1_1 }} +rhel8cis_rule_3_1_2: {{ rhel8cis_rule_3_1_2 }} +rhel8cis_rule_3_2_1: {{ rhel8cis_rule_3_2_1 }} +rhel8cis_rule_3_2_2: {{ rhel8cis_rule_3_2_2 }} +rhel8cis_rule_3_2_3: {{ rhel8cis_rule_3_2_3 }} +rhel8cis_rule_3_2_4: {{ rhel8cis_rule_3_2_4 }} +rhel8cis_rule_3_2_5: {{ rhel8cis_rule_3_2_5 }} +rhel8cis_rule_3_2_6: {{ rhel8cis_rule_3_2_6 }} +rhel8cis_rule_3_2_7: {{ rhel8cis_rule_3_2_7 }} +rhel8cis_rule_3_2_8: {{ rhel8cis_rule_3_2_8 }} +rhel8cis_rule_3_2_9: {{ rhel8cis_rule_3_2_9 }} +rhel8cis_rule_3_3_1: {{ rhel8cis_rule_3_3_1 }} +rhel8cis_rule_3_3_2: {{ rhel8cis_rule_3_3_2 }} +rhel8cis_rule_3_3_3: {{ rhel8cis_rule_3_3_3 }} +rhel8cis_rule_3_3_4: {{ rhel8cis_rule_3_3_4 }} +rhel8cis_rule_3_4_1_1: {{ rhel8cis_rule_3_4_1_1 }} +rhel8cis_rule_3_4_2_1: {{ rhel8cis_rule_3_4_2_1 }} +rhel8cis_rule_3_4_2_2: {{ rhel8cis_rule_3_4_2_2 }} +rhel8cis_rule_3_4_2_3: {{ rhel8cis_rule_3_4_2_3 }} +rhel8cis_rule_3_4_2_4: {{ rhel8cis_rule_3_4_2_4 }} +rhel8cis_rule_3_4_2_5: {{ rhel8cis_rule_3_4_2_5 }} +rhel8cis_rule_3_4_2_6: {{ rhel8cis_rule_3_4_2_6 }} +rhel8cis_rule_3_5: {{ rhel8cis_rule_3_5 }} +rhel8cis_rule_3_6: {{ rhel8cis_rule_3_6 }} + + +# Section 4 rules +rhel8cis_rule_4_1_1_1: {{ rhel8cis_rule_4_1_1_1 }} +rhel8cis_rule_4_1_1_2: {{ rhel8cis_rule_4_1_1_2 }} +rhel8cis_rule_4_1_1_3: {{ rhel8cis_rule_4_1_1_3 }} +rhel8cis_rule_4_1_1_4: {{ rhel8cis_rule_4_1_1_4 }} +rhel8cis_rule_4_1_2_1: {{ rhel8cis_rule_4_1_2_1 }} +rhel8cis_rule_4_1_2_2: {{ rhel8cis_rule_4_1_2_2 }} +rhel8cis_rule_4_1_2_3: {{ rhel8cis_rule_4_1_2_3 }} +rhel8cis_rule_4_1_3: {{ rhel8cis_rule_4_1_3 }} +rhel8cis_rule_4_1_4: {{ rhel8cis_rule_4_1_4 }} +rhel8cis_rule_4_1_5: {{ rhel8cis_rule_4_1_5 }} +rhel8cis_rule_4_1_6: {{ rhel8cis_rule_4_1_6 }} +rhel8cis_rule_4_1_7: {{ rhel8cis_rule_4_1_7 }} +rhel8cis_rule_4_1_8: {{ rhel8cis_rule_4_1_8 }} +rhel8cis_rule_4_1_9: {{ rhel8cis_rule_4_1_9 }} +rhel8cis_rule_4_1_10: {{ rhel8cis_rule_4_1_10 }} +rhel8cis_rule_4_1_11: {{ rhel8cis_rule_4_1_11 }} +rhel8cis_rule_4_1_12: {{ rhel8cis_rule_4_1_12 }} +rhel8cis_rule_4_1_13: {{ rhel8cis_rule_4_1_13 }} +rhel8cis_rule_4_1_14: {{ rhel8cis_rule_4_1_14 }} +rhel8cis_rule_4_1_15: {{ rhel8cis_rule_4_1_15 }} +rhel8cis_rule_4_1_16: {{ rhel8cis_rule_4_1_16 }} +rhel8cis_rule_4_1_17: {{ rhel8cis_rule_4_1_17 }} +rhel8cis_rule_4_2_1_1: {{ rhel8cis_rule_4_2_1_1 }} +rhel8cis_rule_4_2_1_2: {{ rhel8cis_rule_4_2_1_2 }} +rhel8cis_rule_4_2_1_3: {{ rhel8cis_rule_4_2_1_3 }} +rhel8cis_rule_4_2_1_4: {{ rhel8cis_rule_4_2_1_4 }} +rhel8cis_rule_4_2_1_5: {{ rhel8cis_rule_4_2_1_5 }} +rhel8cis_rule_4_2_1_6: {{ rhel8cis_rule_4_2_1_6 }} +rhel8cis_rule_4_2_2_1: {{ rhel8cis_rule_4_2_2_1 }} +rhel8cis_rule_4_2_2_2: {{ rhel8cis_rule_4_2_2_2 }} +rhel8cis_rule_4_2_2_3: {{ rhel8cis_rule_4_2_2_3 }} +rhel8cis_rule_4_2_3: {{ rhel8cis_rule_4_2_3 }} +rhel8cis_rule_4_3: {{ rhel8cis_rule_4_3 }} + +# Section 5 +rhel8cis_rule_5_1_1: {{ rhel8cis_rule_5_1_1 }} +rhel8cis_rule_5_1_2: {{ rhel8cis_rule_5_1_2 }} +rhel8cis_rule_5_1_3: {{ rhel8cis_rule_5_1_3 }} +rhel8cis_rule_5_1_4: {{ rhel8cis_rule_5_1_4 }} +rhel8cis_rule_5_1_5: {{ rhel8cis_rule_5_1_5 }} +rhel8cis_rule_5_1_6: {{ rhel8cis_rule_5_1_6 }} +rhel8cis_rule_5_1_7: {{ rhel8cis_rule_5_1_7 }} +rhel8cis_rule_5_1_8: {{ rhel8cis_rule_5_1_8 }} + +rhel8cis_rule_5_2_1: {{ rhel8cis_rule_5_2_1 }} +rhel8cis_rule_5_2_2: {{ rhel8cis_rule_5_2_2 }} +rhel8cis_rule_5_2_3: {{ rhel8cis_rule_5_2_3 }} +rhel8cis_rule_5_2_4: {{ rhel8cis_rule_5_2_4 }} +rhel8cis_rule_5_2_5: {{ rhel8cis_rule_5_2_5 }} +rhel8cis_rule_5_2_6: {{ rhel8cis_rule_5_2_6 }} +rhel8cis_rule_5_2_7: {{ rhel8cis_rule_5_2_7 }} +rhel8cis_rule_5_2_8: {{ rhel8cis_rule_5_2_8 }} +rhel8cis_rule_5_2_9: {{ rhel8cis_rule_5_2_9 }} +rhel8cis_rule_5_2_10: {{ rhel8cis_rule_5_2_10 }} +rhel8cis_rule_5_2_11: {{ rhel8cis_rule_5_2_11 }} +rhel8cis_rule_5_2_12: {{ rhel8cis_rule_5_2_12 }} +rhel8cis_rule_5_2_13: {{ rhel8cis_rule_5_2_13 }} +rhel8cis_rule_5_2_14: {{ rhel8cis_rule_5_2_14 }} +rhel8cis_rule_5_2_15: {{ rhel8cis_rule_5_2_15 }} +rhel8cis_rule_5_2_16: {{ rhel8cis_rule_5_2_16 }} +rhel8cis_rule_5_2_17: {{ rhel8cis_rule_5_2_17 }} +rhel8cis_rule_5_2_18: {{ rhel8cis_rule_5_2_18 }} +rhel8cis_rule_5_2_19: {{ rhel8cis_rule_5_2_19 }} +rhel8cis_rule_5_2_20: {{ rhel8cis_rule_5_2_20 }} + +rhel8cis_rule_5_3_1: {{ rhel8cis_rule_5_3_1 }} +rhel8cis_rule_5_3_2: {{ rhel8cis_rule_5_3_2 }} +rhel8cis_rule_5_3_3: {{ rhel8cis_rule_5_3_3 }} + +rhel8cis_rule_5_4_1: {{ rhel8cis_rule_5_4_1 }} +rhel8cis_rule_5_4_2: {{ rhel8cis_rule_5_4_2 }} +rhel8cis_rule_5_4_3: {{ rhel8cis_rule_5_4_3 }} +rhel8cis_rule_5_4_4: {{ rhel8cis_rule_5_4_4 }} + +rhel8cis_rule_5_5_1_1: {{ rhel8cis_rule_5_5_1_1 }} +rhel8cis_rule_5_5_1_2: {{ rhel8cis_rule_5_5_1_2 }} +rhel8cis_rule_5_5_1_3: {{ rhel8cis_rule_5_5_1_3 }} +rhel8cis_rule_5_5_1_4: {{ rhel8cis_rule_5_5_1_4 }} +rhel8cis_rule_5_5_1_5: {{ rhel8cis_rule_5_5_1_5 }} + +rhel8cis_rule_5_5_2: {{ rhel8cis_rule_5_5_2 }} +rhel8cis_rule_5_5_3: {{ rhel8cis_rule_5_5_3 }} +rhel8cis_rule_5_5_4: {{ rhel8cis_rule_5_5_4 }} +rhel8cis_rule_5_5_5: {{ rhel8cis_rule_5_5_5 }} + +rhel8cis_rule_5_6: {{ rhel8cis_rule_5_6 }} +rhel8cis_rule_5_7: {{ rhel8cis_rule_5_7 }} + +# Section 6 +rhel8cis_rule_6_1_1: {{ rhel8cis_rule_6_1_1 }} +rhel8cis_rule_6_1_2: {{ rhel8cis_rule_6_1_2 }} +rhel8cis_rule_6_1_3: {{ rhel8cis_rule_6_1_3 }} +rhel8cis_rule_6_1_4: {{ rhel8cis_rule_6_1_4 }} +rhel8cis_rule_6_1_5: {{ rhel8cis_rule_6_1_5 }} +rhel8cis_rule_6_1_6: {{ rhel8cis_rule_6_1_6 }} +rhel8cis_rule_6_1_7: {{ rhel8cis_rule_6_1_7 }} +rhel8cis_rule_6_1_8: {{ rhel8cis_rule_6_1_8 }} +rhel8cis_rule_6_1_9: {{ rhel8cis_rule_6_1_9 }} +rhel8cis_rule_6_1_10: {{ rhel8cis_rule_6_1_10 }} +rhel8cis_rule_6_1_11: {{ rhel8cis_rule_6_1_11 }} +rhel8cis_rule_6_1_12: {{ rhel8cis_rule_6_1_12 }} +rhel8cis_rule_6_1_13: {{ rhel8cis_rule_6_1_13 }} +rhel8cis_rule_6_1_14: {{ rhel8cis_rule_6_1_14 }} + +rhel8cis_rule_6_2_1: {{ rhel8cis_rule_6_2_1 }} +rhel8cis_rule_6_2_2: {{ rhel8cis_rule_6_2_2 }} +rhel8cis_rule_6_2_3: {{ rhel8cis_rule_6_2_3 }} +rhel8cis_rule_6_2_4: {{ rhel8cis_rule_6_2_4 }} +rhel8cis_rule_6_2_5: {{ rhel8cis_rule_6_2_5 }} +rhel8cis_rule_6_2_6: {{ rhel8cis_rule_6_2_6 }} +rhel8cis_rule_6_2_7: {{ rhel8cis_rule_6_2_7 }} +rhel8cis_rule_6_2_8: {{ rhel8cis_rule_6_2_8 }} +rhel8cis_rule_6_2_9: {{ rhel8cis_rule_6_2_9 }} +rhel8cis_rule_6_2_10: {{ rhel8cis_rule_6_2_10 }} +rhel8cis_rule_6_2_11: {{ rhel8cis_rule_6_2_11 }} +rhel8cis_rule_6_2_12: {{ rhel8cis_rule_6_2_12 }} +rhel8cis_rule_6_2_13: {{ rhel8cis_rule_6_2_13 }} +rhel8cis_rule_6_2_14: {{ rhel8cis_rule_6_2_14 }} +rhel8cis_rule_6_2_15: {{ rhel8cis_rule_6_2_15 }} +rhel8cis_rule_6_2_16: {{ rhel8cis_rule_6_2_16 }} +rhel8cis_rule_6_2_17: {{ rhel8cis_rule_6_2_17 }} +rhel8cis_rule_6_2_18: {{ rhel8cis_rule_6_2_18 }} +rhel8cis_rule_6_2_19: {{ rhel8cis_rule_6_2_19 }} +rhel8cis_rule_6_2_20: {{ rhel8cis_rule_6_2_20 }} + + +# Service configuration booleans set true to keep service +rhel8cis_avahi_server: {{ rhel8cis_avahi_server }} +rhel8cis_cups_server: {{ rhel8cis_cups_server }} +rhel8cis_dhcp_server: {{ rhel8cis_dhcp_server }} +rhel8cis_ldap_server: {{ rhel8cis_ldap_server }} +rhel8cis_telnet_server: {{ rhel8cis_telnet_server }} +rhel8cis_nfs_server: {{ rhel8cis_nfs_server }} +rhel8cis_rpc_server: {{ rhel8cis_rpc_server }} +rhel8cis_ntalk_server: {{ rhel8cis_ntalk_server }} +rhel8cis_rsyncd_server: {{ rhel8cis_rsyncd_server }} +rhel8cis_tftp_server: {{ rhel8cis_tftp_server }} +rhel8cis_rsh_server: {{ rhel8cis_rsh_server }} +rhel8cis_nis_server: {{ rhel8cis_nis_server }} +rhel8cis_snmp_server: {{ rhel8cis_snmp_server }} +rhel8cis_squid_server: {{ rhel8cis_squid_server }} +rhel8cis_smb_server: {{ rhel8cis_smb_server }} +rhel8cis_dovecot_server: {{ rhel8cis_dovecot_server }} +rhel8cis_httpd_server: {{ rhel8cis_httpd_server }} +rhel8cis_vsftpd_server: {{ rhel8cis_vsftpd_server }} +rhel8cis_named_server: {{ rhel8cis_named_server }} +rhel8cis_nfs_rpc_server: {{ rhel8cis_nfs_rpc_server }} +rhel8cis_is_mail_server: {{ rhel8cis_is_mail_server }} +rhel8cis_bind: {{ rhel8cis_bind }} +rhel8cis_vsftpd: {{ rhel8cis_vsftpd }} +rhel8cis_httpd: {{ rhel8cis_httpd }} +rhel8cis_dovecot: {{ rhel8cis_dovecot }} +rhel8cis_samba: {{ rhel8cis_samba }} +rhel8cis_squid: {{ rhel8cis_squid }} +rhel8cis_net_snmp: {{ rhel8cis_net_snmp}} +rhel8cis_allow_autofs: {{ rhel8cis_allow_autofs }} + +# client services +rhel8cis_openldap_clients_required: {{ rhel8cis_openldap_clients_required }} +rhel8cis_telnet_required: {{ rhel8cis_telnet_required }} +rhel8cis_talk_required: {{ rhel8cis_talk_required }} +rhel8cis_rsh_required: {{ rhel8cis_rsh_required }} +rhel8cis_ypbind_required: {{ rhel8cis_ypbind_required }} + +# AIDE +rhel8cis_config_aide: {{ rhel8cis_config_aide }} + +# aide setup via - cron, timer +rhel8_aide_scan: cron + +# AIDE cron settings +rhel8cis_aide_cron: + cron_user: {{ rhel8cis_aide_cron.cron_user }} + cron_file: '{{ rhel8cis_aide_cron.cron_file }}' + aide_job: ' {{ rhel8cis_aide_cron.aide_job }}' + aide_minute: '{{ rhel8cis_aide_cron.aide_minute }}' + aide_hour: '{{ rhel8cis_aide_cron.aide_hour }}' + aide_day: '{{ rhel8cis_aide_cron.aide_day }}' + aide_month: '{{ rhel8cis_aide_cron.aide_month }}' + aide_weekday: '{{ rhel8cis_aide_cron.aide_weekday }}' + +# 1.5.1 Bootloader password +rhel8cis_bootloader_password: {{ rhel8cis_bootloader_password_hash }} +rhel8cis_set_boot_pass: {{ rhel8cis_set_boot_pass }} + +# 1.10 crypto +rhel8cis_crypto_policy: {{ rhel8cis_crypto_policy }} + +# Warning Banner Content (issue, issue.net, motd) +rhel8cis_warning_banner: {{ rhel8cis_warning_banner }} +# End Banner + +# Set to 'true' if X Windows is needed in your environment +rhel8cis_xwindows_required: {{ rhel8cis_xwindows_required }} + +# Whether or not to run tasks related to auditing/patching the desktop environment +rhel8cis_gui: {{ rhel8cis_gui }} + +# xinetd required +rhel8cis_xinetd_required: {{ rhel8cis_xinetd_required }} + +# IPv6 required +rhel8cis_ipv6_required: {{ rhel8cis_ipv6_required }} + +# System network parameters (host only OR host and router) +rhel8cis_is_router: {{ rhel8cis_is_router }} + +# Time Synchronization +rhel8cis_time_synchronization: {{ rhel8cis_time_synchronization }} + +rhel8cis_varlog_location: {{ rhel8cis_varlog_location }} + +rhel8cis_firewall: {{ rhel8cis_firewall }} +#rhel8cis_firewall: iptables +rhel8cis_default_firewall_zone: {{ rhel8cis_default_zone }} +rhel8cis_firewall_interface: +- enp0s3 +- enp0s8 + +rhel8cis_firewall_services: {{ rhel8cis_firewall_services }} + + + +### Section 4 +## auditd settings +rhel8cis_auditd: + space_left_action: {{ rhel8cis_auditd.space_left_action}} + action_mail_acct: {{ rhel8cis_auditd.action_mail_acct }} + admin_space_left_action: {{ rhel8cis_auditd.admin_space_left_action }} + max_log_file_action: {{ rhel8cis_auditd.max_log_file_action }} + auditd_backlog_limit: {{ rhel8cis_audit_back_log_limit }} + +## syslog +rhel8_cis_rsyslog: true + +### Section 5 +rhel8cis_sshd_limited: false +#Note the following to understand precedence and layout +rhel8cis_sshd_access: + AllowUser: + AllowGroup: + DenyUser: + DenyGroup: + +rhel8cis_ssh_strong_ciphers: Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128- gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr +rhel8cis_ssh_weak_ciphers: + 3des-cbc + aes128-cbc + aes192-cbc + aes256-cbc + arcfour + arcfour128 + arcfour256 + blowfish-cbc + cast128-cbc + rijndael-cbc@lysator.liu.se + +rhel8cis_ssh_strong_macs: MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2- 512,hmac-sha2-256 +rhel8cis_ssh_weak_macs: + hmac-md5 + hmac-md5-96 + hmac-ripemd160 + hmac-sha1 + hmac-sha1-96 + umac-64@openssh.com + umac-128@openssh.com + hmac-md5-etm@openssh.com + hmac-md5-96-etm@openssh.com + hmac-ripemd160-etm@openssh.com + hmac-sha1-etm@openssh.com + hmac-sha1-96-etm@openssh.com + umac-64-etm@openssh.com + umac-128-etm@openssh.com + +rhel8cis_ssh_strong_kex: KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman- group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256 +rhel8cis_ssh_weak_kex: + diffie-hellman-group1-sha1 + diffie-hellman-group14-sha1 + diffie-hellman-group-exchange-sha1 + +rhel8cis_ssh_aliveinterval: "300" +rhel8cis_ssh_countmax: "3" + +## PAM +rhel8cis_pam_password: + minlen: {{ rhel8cis_pam_password.minlen }} + minclass: {{ rhel8cis_pam_password.minclass }} +rhel8cis_pam_passwd_retry: "3" +# faillock or tally2 +rhel8cis_accountlock: faillock + +## note this is to skip tests +skip_rhel8cis_pam_passwd_auth: true +skip_rhel8cis_pam_system_auth: true + +# choose one of below +rhel8cis_pwhistory_so: "14" +rhel8cis_unix_so: false +rhel8cis_passwd_remember: "5" + +# logins.def password settings +rhel8cis_pass: + max_days: {{ rhel8cis_pass.max_days }} + min_days: {{ rhel8cis_pass.min_days }} + warn_age: {{ rhel8cis_pass.warn_age }} + +# 5.3.1/5.3.2 Custon authselect profile settings. Settings in place now will fail, they are place holders from the control example +rhel8cis_authselect: + custom_profile_name: {{ rhel8cis_authselect['custom_profile_name'] }} + default_file_to_copy: {{ rhel8cis_authselect.default_file_to_copy }} + options: {{ rhel8cis_authselect.options }} + +# 5.3.1 Enable automation to creat custom profile settings, using the setings above +rhel8cis_authselect_custom_profile_create: {{ rhel8cis_authselect_custom_profile_create }} + +# 5.3.2 Enable automation to select custom profile options, using the settings above +rhel8cis_authselect_custom_profile_select: {{ rhel8cis_authselect_custom_profile_select }} + +# 5.7 +rhel8cis_sugroup: {{ rhel8cis_sugroup| default('wheel') }} +rhel8cis_sugroup_users: {{ rhel8cis_sugroup_users }} diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_10.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_10.rules.j2 new file mode 100644 index 0000000..e0ad440 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_10.rules.j2 @@ -0,0 +1,5 @@ +-a always,exit -F arch=b32 -S creat,open,openat,open_by_handle_at,truncate,ftruncate -F exit=-EACCES -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -F key=access +-a always,exit -F arch=b32 -S creat,open,openat,open_by_handle_at,truncate,ftruncate -F exit=-EPERM -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -F key=access +-a always,exit -F arch=b64 -S creat,open,openat,open_by_handle_at,truncate,ftruncate -F exit=-EACCES -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -F key=access +-a always,exit -F arch=b64 -S creat,open,openat,open_by_handle_at,truncate,ftruncate -F exit=-EPERM -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -F key=access + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_11.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_11.rules.j2 new file mode 100644 index 0000000..c8bded4 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_11.rules.j2 @@ -0,0 +1,6 @@ +-w /etc/group -p wa -k identity +-w /etc/passwd -p wa -k identity +-w /etc/gshadow -p wa -k identity +-w /etc/shadow -p wa -k identity +-w /etc/security/opasswd -p wa -k identity + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_12.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_12.rules.j2 new file mode 100644 index 0000000..c6f169f --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_12.rules.j2 @@ -0,0 +1,3 @@ +-a always,exit -F arch=b32 -S mount -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -k mounts +-a always,exit -F arch=b64 -S mount -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -k mounts + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_13.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_13.rules.j2 new file mode 100644 index 0000000..9097629 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_13.rules.j2 @@ -0,0 +1,4 @@ +{% for proc in priv_procs.stdout_lines -%} +-a always,exit -F path={{ proc }} -F perm=x -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -k privileged + +{% endfor %} diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_14.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_14.rules.j2 new file mode 100644 index 0000000..a916aa1 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_14.rules.j2 @@ -0,0 +1,3 @@ +-a always,exit -F arch=b32 -S rmdir,unlink,unlinkat,rename -S renameat -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -F key=delete +-a always,exit -F arch=b64 -S rmdir,unlink,unlinkat,rename -S renameat -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -F key=delete + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_15.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_15.rules.j2 new file mode 100644 index 0000000..b89cea6 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_15.rules.j2 @@ -0,0 +1,5 @@ +-w /usr/sbin/insmod -p x -k modules +-w /usr/sbin/rmmod -p x -k modules +-w /usr/sbin/modprobe -p x -k modules +-a always,exit -F arch=b64 -S init_module -S delete_module -k modules + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_16.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_16.rules.j2 new file mode 100644 index 0000000..e5d31c7 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_16.rules.j2 @@ -0,0 +1,2 @@ +-w /var/log/sudo.log -p wa -k actions + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_17.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_17.rules.j2 new file mode 100644 index 0000000..a2b3aa0 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_17.rules.j2 @@ -0,0 +1,2 @@ +-e 2 + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_3.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_3.rules.j2 new file mode 100644 index 0000000..0ae21fd --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_3.rules.j2 @@ -0,0 +1,2 @@ +-w /etc/sudoers -p wa -k scope +-w /etc/sudoers.d/ -p wa -k scope diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_4.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_4.rules.j2 new file mode 100644 index 0000000..cbd1e7c --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_4.rules.j2 @@ -0,0 +1,3 @@ +-w /var/log/faillog -p wa -k logins +-w /var/log/lastlog -p wa -k logins + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_5.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_5.rules.j2 new file mode 100644 index 0000000..f9e3dbf --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_5.rules.j2 @@ -0,0 +1,4 @@ +-w /var/run/utmp -p wa -k session +-w /var/log/wtmp -p wa -k logins +-w /var/log/btmp -p wa -k logins + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_6.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_6.rules.j2 new file mode 100644 index 0000000..8cedee6 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_6.rules.j2 @@ -0,0 +1,6 @@ +-a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change +-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change +-a always,exit -F arch=b64 -S clock_settime -k time-change +-a always,exit -F arch=b32 -S clock_settime -k time-change +-w /etc/localtime -p wa -k time-change + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_7.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_7.rules.j2 new file mode 100644 index 0000000..fe5bd08 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_7.rules.j2 @@ -0,0 +1,3 @@ +-w /etc/selinux/ -p wa -k MAC-policy +-w /usr/share/selinux/ -p wa -k MAC-policy + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_8.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_8.rules.j2 new file mode 100644 index 0000000..cea8926 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_8.rules.j2 @@ -0,0 +1,7 @@ +-a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale +-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale +-w /etc/issue -p wa -k system-locale +-w /etc/issue.net -p wa -k system-locale +-w /etc/hosts -p wa -k system-locale +-w /etc/sysconfig/network -p wa -k system-locale + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_9.rules.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_9.rules.j2 new file mode 100644 index 0000000..02e5cd2 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/audit/rhel8cis_rule_4_1_9.rules.j2 @@ -0,0 +1,7 @@ +-a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>={{ rhel8cis_int_gid }} -F auid!=4294967295 -k perm_mod + diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/chrony.conf.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/chrony.conf.j2 new file mode 100644 index 0000000..1e65073 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/chrony.conf.j2 @@ -0,0 +1,93 @@ +# This the default chrony.conf file for the Debian chrony package. After +# editing this file use the command 'invoke-rc.d chrony restart' to make +# your changes take effect. John Hasler 1998-2008 + +# See www.pool.ntp.org for an explanation of these servers. Please +# consider joining the project if possible. If you can't or don't want to +# use these servers I suggest that you try your ISP's nameservers. We mark +# the servers 'offline' so that chronyd won't try to connect when the link +# is down. Scripts in /etc/ppp/ip-up.d and /etc/ppp/ip-down.d use chronyc +# commands to switch it on when a dialup link comes up and off when it goes +# down. Code in /etc/init.d/chrony attempts to determine whether or not +# the link is up at boot time and set the online status accordingly. If +# you have an always-on connection such as cable omit the 'offline' +# directive and chronyd will default to online. +# +# Note that if Chrony tries to go "online" and dns lookup of the servers +# fails they will be discarded. Thus under some circumstances it is +# better to use IP numbers than host names. + +{% for server in rhel8cis_time_synchronization_servers -%} +server {{ server }} {{ rhel8cis_chrony_server_options }} +{% endfor %} + +# Look here for the admin password needed for chronyc. The initial +# password is generated by a random process at install time. You may +# change it if you wish. + +keyfile /etc/chrony/chrony.keys + +# Set runtime command key. Note that if you change the key (not the +# password) to anything other than 1 you will need to edit +# /etc/ppp/ip-up.d/chrony, /etc/ppp/ip-down.d/chrony, /etc/init.d/chrony +# and /etc/cron.weekly/chrony as these scripts use it to get the password. + +commandkey 1 + +# I moved the driftfile to /var/lib/chrony to comply with the Debian +# filesystem standard. + +driftfile /var/lib/chrony/chrony.drift + +# Comment this line out to turn off logging. + +log tracking measurements statistics +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. + +maxupdateskew 100.0 + +# Dump measurements when daemon exits. + +dumponexit + +# Specify directory for dumping measurements. + +dumpdir /var/lib/chrony + +# Let computer be a server when it is unsynchronised. + +local stratum 10 + +# Allow computers on the unrouted nets to use the server. + +#allow 10/8 +#allow 192.168/16 +#allow 172.16/12 + +# This directive forces `chronyd' to send a message to syslog if it +# makes a system clock adjustment larger than a threshold value in seconds. + +logchange 0.5 + +# This directive defines an email address to which mail should be sent +# if chronyd applies a correction exceeding a particular threshold to the +# system clock. + +# mailonchange root@localhost 0.5 + +# This directive tells chrony to regulate the real-time clock and tells it +# Where to store related data. It may not work on some newer motherboards +# that use the HPET real-time clock. It requires enhanced real-time +# support in the kernel. I've commented it out because with certain +# combinations of motherboard and kernel it is reported to cause lockups. + +# rtcfile /var/lib/chrony/chrony.rtc + +# If the last line of this file reads 'rtconutc' chrony will assume that +# the CMOS clock is on UTC (GMT). If it reads '# rtconutc' or is absent +# chrony will assume local time. The line (if any) was written by the +# chrony postinst based on what it found in /etc/default/rcS. You may +# change it if necessary. +rtconutc diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/issue.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/issue.j2 new file mode 100644 index 0000000..080a169 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/issue.j2 @@ -0,0 +1 @@ +{{ rhel8cis_warning_banner }} diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/issue.net.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/issue.net.j2 new file mode 100644 index 0000000..080a169 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/issue.net.j2 @@ -0,0 +1 @@ +{{ rhel8cis_warning_banner }} diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/motd.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/motd.j2 new file mode 100644 index 0000000..080a169 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/motd.j2 @@ -0,0 +1 @@ +{{ rhel8cis_warning_banner }} diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/systemd/system/tmp.mount.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/systemd/system/tmp.mount.j2 new file mode 100644 index 0000000..e62d186 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/etc/systemd/system/tmp.mount.j2 @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: LGPL-2.1+ +# +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. + +[Unit] +Description=Temporary Directory (/tmp) +Documentation=man:hier(7) +Documentation=https://www.freedesktop.org/wiki/Software/systemd/APIFileSystems +ConditionPathIsSymbolicLink=!/tmp +DefaultDependencies=no +Conflicts=umount.target +Before=local-fs.target umount.target +After=swap.target + +[Mount] +What=tmpfs +Where=/tmp +Type=tmpfs +Options=mode=1777,strictatime,{% if rhel8cis_rule_1_1_3 %}nodev,{% endif %}{% if rhel8cis_rule_1_1_4 %}nosuid,{% endif %}{% if rhel8cis_rule_1_1_5 %}noexec{% endif %} + +# Make 'systemctl enable tmp.mount' work: +[Install] +WantedBy=local-fs.target \ No newline at end of file diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/hosts.allow.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/hosts.allow.j2 new file mode 100644 index 0000000..9743ef9 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/hosts.allow.j2 @@ -0,0 +1,11 @@ +# +# hosts.allow This file contains access rules which are used to +# allow or deny connections to network services that +# either use the tcp_wrappers library or that have been +# started through a tcp_wrappers-enabled xinetd. +# +# See 'man 5 hosts_options' and 'man 5 hosts_access' +# for information on rule syntax. +# See 'man tcpd' for information on tcp_wrappers +# +ALL: {% for iprange in rhel8cis_host_allow -%}{{ iprange }}{% if not loop.last %}, {% endif %}{% endfor %} diff --git a/Linux/ansible-lockdown/RHEL8-CIS/templates/ntp.conf.j2 b/Linux/ansible-lockdown/RHEL8-CIS/templates/ntp.conf.j2 new file mode 100644 index 0000000..62c51eb --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/templates/ntp.conf.j2 @@ -0,0 +1,59 @@ +# For more information about this file, see the man pages +# ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). + +driftfile /var/lib/ntp/drift + +# Permit time synchronization with our time source, but do not +# permit the source to query or modify the service on this system. +#restrict default nomodify notrap nopeer noquery +restrict -4 default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery + +# Permit all access over the loopback interface. This could +# be tightened as well, but to do so would effect some of +# the administrative functions. +restrict 127.0.0.1 +restrict ::1 + +# Hosts on local network are less restricted. +#restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap + +# Use public servers from the pool.ntp.org project. +# Please consider joining the pool (http://www.pool.ntp.org/join.html). +{% for server in rhel8cis_time_synchronization_servers -%} +server {{ server }} {{ rhel8cis_ntp_server_options }} +{% endfor %} + +#broadcast 192.168.1.255 autokey # broadcast server +#broadcastclient # broadcast client +#broadcast 224.0.1.1 autokey # multicast server +#multicastclient 224.0.1.1 # multicast client +#manycastserver 239.255.254.254 # manycast server +#manycastclient 239.255.254.254 autokey # manycast client + +# Enable public key cryptography. +#crypto + +includefile /etc/ntp/crypto/pw + +# Key file containing the keys and key identifiers used when operating +# with symmetric key cryptography. +keys /etc/ntp/keys + +# Specify the key identifiers which are trusted. +#trustedkey 4 8 42 + +# Specify the key identifier to use with the ntpdc utility. +#requestkey 8 + +# Specify the key identifier to use with the ntpq utility. +#controlkey 8 + +# Enable writing of statistics records. +#statistics clockstats cryptostats loopstats peerstats + +# Disable the monitoring facility to prevent amplification attacks using ntpdc +# monlist command when default restrict does not include the noquery flag. See +# CVE-2013-5211 for more details. +# Note: Monitoring will not be disabled with the limited restriction flag. +disable monitor diff --git a/Linux/ansible-lockdown/RHEL8-CIS/vars/main.yml b/Linux/ansible-lockdown/RHEL8-CIS/vars/main.yml new file mode 100644 index 0000000..a6126d5 --- /dev/null +++ b/Linux/ansible-lockdown/RHEL8-CIS/vars/main.yml @@ -0,0 +1,5 @@ +--- +# vars file for RHEL8-CIS +rhel8cis_allowed_crypto_policies: + - 'FUTURE' + - 'FIPS' diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/.DS_Store b/Linux/ansible-lockdown/UBUNTU18-CIS/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..70f73efb59a2df365c901140174c9eca7171fdf2 GIT binary patch literal 10244 zcmeHMTWl0n82e&TcCYCJ-{G+4Iji z|No!MH{X9QQvkr0j1mP11Ast3CZ!-{B@)xKUr`FUHET&C`2%Q>03B3VPr09b1bhU1 z1bhU11bhVULhAnG`n&!@$qLRs>KFv_f(H1jfC>C!g?kEI76vP2(X*S!?xUs1| z9E~)L*N3y4H%IEj8yh!`kBh?MHPLMyM^a-?XHSlwdX>)=&|P|<+9^-v=jdq)iII5P z&`o2I>X>~+4j~wV3?w{wULcEnoIKWfXnl6O(OQztHb%A(TAMb_2-Yh!B}&tR#lZAT z(*s%M6l5Ba%K#J7pcVsHK*TEYvB^$`5u&3-r7=i>Ug(93sqcKaMQ$fEOZqE8c;->i zj|8pI1&>1y?1c_!fnIv*pz=;?DNX%)0s&8Z`~^_x^Vm@uXNG#T!Sald>RiEn7`nX9o9Wnh>RF!97b#4 zEV&TzeN@O}V>ufd`5Y_aEinnsJsL~EHED_$?yN|xV{$gK)I@@Rc0Y|hch zrj7OCElrUbmyKK5oJ-6#2Ois_OQ>~gq}8EzU@{ls-{j6IVr5luPH5Ne&OO1d0crjS z9YMN=ZOt^SeQMewIUrSAbZqHJnnrh8W%|CPsb^A#H6YDP%6c-R%eLCA>v8RAmFlXk zWZKmAZqw3OGyD5(^@PoIMXLS-mYKF&d1C{Tl(E#kDz&AJsBP4u)vr;P%uzz|;@rh6 zSJ$m?*!)PWeX?4bH~*gC!r%cq0U5S-R8!OYWhaF67kVJrW6_8ONp=i31S7$oV{~*ACCjO43^s*&bvdExqNF(eYzaOT z>Kk#;7Ny-z<+k9$P(LkMDVm}sI(5yV*3e1K-5A@Ev>)-^UN|Bm4rt#A`T--{Oz>6aI|9;C1{fP#&lZxL@=F zxf5%5ZW@jZEFf8$_3r6BaIQ4#Yj*5>tRuMl&Sm}Ab2-%)EL^lWyypG~H*VeDGDQgw z5AxZ_Dd=#Fb1%m&{@M!&J;`GtxMF242yTXj6DGYxUG;>(B1o!mtZQml3&c7}aEo>8 zsCY>V@M5rL{RWZPFU@kBZfF#U=Tf<&4Q}2dQY1)auJ(~_q}?secC{;-NefP)rmjl- z-$;A~uEF>4D_n;g@HfuJDqMp1;yP@^t@tP>ZX52vF6_o9Fpj-=7zgkOCQ!i?8ffBi zqVY)_$5S|g&vF_+Pc(iBU&V8H9$zCmU%p35=EtPgb$~{LxekRY~3CrltAhXkZ@ups|V-^nME-1}%<`8D9Mf>#s|Gx`n>QBK( zz(-&@1hA|v-qu3e+=}ZpWZ&Au^gBX7mYCgyBe@G@%!JkR&+$}XpX2!%it2J_CZ>ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/.yamllint b/Linux/ansible-lockdown/UBUNTU18-CIS/.yamllint new file mode 100755 index 0000000..93378b9 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/.yamllint @@ -0,0 +1,20 @@ +--- +ignore: | + tests/ + molecule/ + .gitlab-ci.yml + *molecule.yml + +extends: default + +rules: + indentation: + spaces: 4 + truthy: disable + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/CONTRIBUTING.rst b/Linux/ansible-lockdown/UBUNTU18-CIS/CONTRIBUTING.rst new file mode 100644 index 0000000..572ae7c --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/CONTRIBUTING.rst @@ -0,0 +1,68 @@ +Contributing to MindPoint Group Projects +======================================== + +Rules +----- +1) All commits must be GPG signed (details in Signing section) +2) All commits must have Signed-off-by (Signed-off-by: Joan Doe ) in the commit message (details in Signing section) +3) All work is done in your own branch or own fork +4) Pull requests + a) From within the repo: All pull requests go into the devel branch. There are automated checks for signed commits, signoff in commit message, and functional testing + b) From a forked repo: All pull requests will go into a staging branch within the repo. There are automated checks for signed commits, signoff in commit message, and functional testing when going from staging to devel +5) Be open and nice to each other + +Workflow +-------- +- Your work is done in your own individual branch. Make sure to to Signed-off and GPG sign all commits you intend to merge +- All community Pull Requests are into the devel branch (from forked repos they go to staging before devel). There are automated checks for GPG signed, Signed-off in commits, and functional tests before being approved. If your pull request comes in from outside of our repo, the pull request will go into a staging branch. There is info needed from our repo for our CI/CD testing. +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release +Signing your contribution +------------------------- + +We've chosen to use the Developer's Certificate of Origin (DCO) method +that is employed by the Linux Kernel Project, which provides a simple +way to contribute to MindPoint Group projects. + +The process is to certify the below DCO 1.1 text +:: + + Developer's Certificate of Origin 1.1 + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +:: + +Then, when it comes time to submit a contribution, include the +following text in your contribution commit message: + +:: + + Signed-off-by: Joan Doe + +:: + + +This message can be entered manually, or if you have configured git +with the correct `user.name` and `user.email`, you can use the `-s` +option to `git commit` to automatically include the signoff message. \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/LICENSE b/Linux/ansible-lockdown/UBUNTU18-CIS/LICENSE new file mode 100644 index 0000000..3ae3c23 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Mindpoint Group / Lockdown Enterprise / Lockdown Enterprise Releases + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/README.md b/Linux/ansible-lockdown/UBUNTU18-CIS/README.md new file mode 100644 index 0000000..2ee1922 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/README.md @@ -0,0 +1,86 @@ +Ubuntu 18 CIS +========= + +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/UBUNTU18-CIS/CommunityToDevel?label=Devel%20Build%20Status&style=plastic) +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/UBUNTU18-CIS/DevelToMaster?label=Main%20Build%20Status&style=plastic) +![Release](https://img.shields.io/github/v/release/ansible-lockdown/UBUNTU18-CIS?style=plastic) + +Configure Ubuntu 18 machine to be [CIS](https://www.cisecurity.org/cis-benchmarks/) v2.1.0 compliant. There are some intrusive tasks that have a toggle in defaults main.yml to disable to automated fix + +Caution(s) +--------- + +This role **will make changes to the system** that could break things. This is not an auditing tool but rather a remediation tool to be used after an audit has been conducted. + +This role was developed against a clean install of the Operating System. If you are implimenting to an existing system please review this role for any site specific changes that are needed. + +To use release version please point to main branch +Based on +[CIS_Ubuntu_Linux_18.04_LTS_Benchmark](https://community.cisecurity.org/collab/public/index.php). + +Documentation +------------- + +- [Getting Started](https://www.lockdownenterprise.com/docs/getting-started-with-lockdown) +- [Customizing Roles](https://www.lockdownenterprise.com/docs/customizing-lockdown-enterprise) +- [Per-Host Configuration](https://www.lockdownenterprise.com/docs/per-host-lockdown-enterprise-configuration) +- [Getting the Most Out of the Role](https://www.lockdownenterprise.com/docs/get-the-most-out-of-lockdown-enterprise) +- [Wiki](https://github.com/ansible-lockdown/UBUNTU18-CIS/wiki) +- [Repo GitHub Page](https://ansible-lockdown.github.io/UBUNTU18-CIS/) + +Requirements +------------ + +**General:** + +- Basic knowledge of Ansible, below are some links to the Ansible documentation to help get started if you are unfamiliar with Ansible + - [Main Ansible documentation page](https://docs.ansible.com) + - [Ansible Getting Started](https://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html) + - [Tower User Guide](https://docs.ansible.com/ansible-tower/latest/html/userguide/index.html) + - [Ansible Community Info](https://docs.ansible.com/ansible/latest/community/index.html) +- Functioning Ansible and/or Tower Installed, configured, and running. This includes all of the base Ansible/Tower configurations, needed packages installed, and infrastructure setup. +- Please read through the tasks in this role to gain an understanding of what each control is doing. Some of the tasks are disruptive and can have unintended consiquences in a live production system. Also familiarize yourself with the variables in the defaults/main.yml file or the [Main Variables Wiki Page](https://github.com/ansible-lockdown/UBUNTU18-CIS/wiki/Main-Variables). + +**Technical Dependencies:** + +- Running Ansible/Tower setup (this role is tested against Ansible version 2.9.1 and newer) +- Python3 Ansible run environment + +Auditing (new) +-------------- + +This can be turned on or off within the defaults/main.yml file with the variable run_audit. The value is false by default, please refer to the wiki for more details. + +This is a much quicker, very lightweight, checking (where possible) config compliance and live/running settings. + +A new form of auditing has been develeoped, by using a small (12MB) go binary called [goss](https://github.com/aelsabbahy/goss) along with the relevant configurations to check. Without the need for infrastructure or other tooling. +This audit will not only check the config has the correct setting but aims to capture if it is running with that configuration also trying to remove [false positives](https://www.mindpointgroup.com/blog/is-compliance-scanning-still-relevant/) in the process. + +Refer to [UBUNTU18-CIS-Audit](https://github.com/ansible-lockdown/UBUNTU18-CIS-Audit). + +Further audit documentation can be found at [Audit-Docs](https://github.com/ansible-lockdown/UBUNTU18-CIS-Audit/docs/Security_remediation_and_auditing.md). + + +Role Variables +-------------- + +This role is designed that the end user should not have to edit the tasks themselves. All customizing should be done via the defaults/main.yml file or with extra vars within the project, job, workflow, etc. These variables can be found [here](https://github.com/ansible-lockdown/UBUNTU18-CIS/wiki/Main-Variables) in the Main Variables Wiki page. All variables are listed there along with descriptions. + +Branches +-------- + +- **devel** - This is the default branch and the working development branch. Community pull requests will pull into this branch +- **main** - This is the release branch +- **reports** - This is a protected branch for our scoring reports, no code should ever go here +- **gh-pages** - This is the github pages branch +- **all other branches** - Individual community member branches + +Community Contribution +---------------------- + +We encourage you (the community) to contribute to this role. Please read the rules below. + +- Your work is done in your own individual branch. Make sure to Signed-off and GPG sign all commits you intend to merge. +- All community Pull Requests are pulled into the devel branch +- Pull Requests into devel will confirm your commits have a GPG signature, Signed-off, and a functional test before being approved +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/defaults/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/defaults/main.yml new file mode 100644 index 0000000..b339343 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/defaults/main.yml @@ -0,0 +1,707 @@ +--- +# If you would like a report at the end accordin to OpenSCAP as to the report results +# then you should set ubtu18cis_oscap_scan to true/yes. +# NOTE: This requires the python_xmltojson package on the control host. +ubtu18cis_oscap_scan: false +ubtu18cis_report_dir: /tmp + +ubtu18cis_section1_patch: true +ubtu18cis_section2_patch: true +ubtu18cis_section3_patch: true +ubtu18cis_section4_patch: true +ubtu18cis_section5_patch: true +ubtu18cis_section6_patch: true + +# System will reboot if false, can give better audit results +ubtu18_skip_reboot: True + +## Benchmark name used by auditing control role +# The audit variable found at the base +benchmark: UBUNTU18-CIS + +### Audit Binary is required on the remote host +setup_audit: false +# How to retrieve audit binary +# Options are copy or download - detailed settings at the bottom of this file +# you will need to access to either github or the file already dowmloaded +get_audit_binary_method: download + +# how to get audit files onto host options +# options are git/copy/get_url other e.g. if you wish to run from already downloaded conf +audit_content: git + +# enable audits to run - this runs the audit and get the latest content +run_audit: false + +# Run heavy tests - some tests can have more impact on a system enabling these can have greater impact on a system +audit_run_heavy_tests: true +# Timeout for those cmds that take longer to run where timeout set +audit_cmd_timeout: 60000 + +### End Audit enablements #### + +# We've defined complexity-high to mean that we cannot automatically remediate +# the rule in question. In the future this might mean that the remediation +# may fail in some cases. +ubtu18cis_complexity_high: false + +# Show "changed" for complex items not remediated per complexity-high setting +# to make them stand out. "changed" items on a second run of the role would +# indicate items requiring manual review. +ubtu18cis_audit_complex: true + +# We've defined disruption-high to indicate items that are likely to cause +# disruption in a normal workflow. These items can be remediated automatically +# but are disabled by default to avoid disruption. +# Value of true runs duscruptive tasks, value of false will skip disruptive tasks +ubtu18cis_disruption_high: true + +# Show "changed" for disruptive items not remediated per disruption-high +# setting to make them stand out. +ubtu18cis_audit_disruptive: true + +ubtu18cis_skip_for_travis: false + +ubtu18cis_workaround_for_disa_benchmark: true +ubtu18cis_workaround_for_ssg_benchmark: true + +# tweak role to run in a chroot, such as in kickstart %post script +ubtu18cis_system_is_chroot: "{{ ansible_is_chroot | default(False) }}" + +# tweak role to run in a non-privileged container +ubtu18cis_system_is_container: false + +# disables controls that break under EC2 instance +system_is_ec2: false + +# Section 1 Fixes +# Section 1 is Initial setup (FileSystem Configuration, Configure Software Updates, Filesystem Integrity Checking, Secure Boot Settings, +# Additional Process Hardening, Mandatory Access Control, Warning Banners, and GNOME Display Manager) +ubtu18cis_rule_1_1_1_1: true +ubtu18cis_rule_1_1_1_2: true +ubtu18cis_rule_1_1_1_3: true +ubtu18cis_rule_1_1_1_4: true +ubtu18cis_rule_1_1_1_5: true +ubtu18cis_rule_1_1_1_6: true +ubtu18cis_rule_1_1_1: true +ubtu18cis_rule_1_1_2: true +ubtu18cis_rule_1_1_3: true +ubtu18cis_rule_1_1_4: true +ubtu18cis_rule_1_1_5: true +ubtu18cis_rule_1_1_6: true +ubtu18cis_rule_1_1_7: true +ubtu18cis_rule_1_1_8: true +ubtu18cis_rule_1_1_9: true +ubtu18cis_rule_1_1_10: true +ubtu18cis_rule_1_1_11: true +ubtu18cis_rule_1_1_12: true +ubtu18cis_rule_1_1_13: true +ubtu18cis_rule_1_1_14: true +ubtu18cis_rule_1_1_15: true +ubtu18cis_rule_1_1_16: true +ubtu18cis_rule_1_1_17: true +ubtu18cis_rule_1_1_18: true +ubtu18cis_rule_1_1_19: true +ubtu18cis_rule_1_1_20: true +ubtu18cis_rule_1_1_21: true +ubtu18cis_rule_1_1_22: true +ubtu18cis_rule_1_1_23: true +ubtu18cis_rule_1_1_24: true +ubtu18cis_rule_1_2_1: true +ubtu18cis_rule_1_2_2: true +ubtu18cis_rule_1_3_1: true +ubtu18cis_rule_1_3_2: true +ubtu18cis_rule_1_4_1: true +ubtu18cis_rule_1_4_2: true +ubtu18cis_rule_1_4_3: true +ubtu18cis_rule_1_4_4: true +ubtu18cis_rule_1_5_1: true +ubtu18cis_rule_1_5_2: true +ubtu18cis_rule_1_5_3: true +ubtu18cis_rule_1_5_4: true +ubtu18cis_rule_1_6_1_1: true +ubtu18cis_rule_1_6_1_2: true +ubtu18cis_rule_1_6_1_3: true +ubtu18cis_rule_1_6_1_4: true +ubtu18cis_rule_1_7_1: true +ubtu18cis_rule_1_7_2: true +ubtu18cis_rule_1_7_3: true +ubtu18cis_rule_1_7_4: true +ubtu18cis_rule_1_7_5: true +ubtu18cis_rule_1_7_6: true +ubtu18cis_rule_1_8_1: true +ubtu18cis_rule_1_8_2: true +ubtu18cis_rule_1_8_3: true +ubtu18cis_rule_1_8_4: true +ubtu18cis_rule_1_9: true + +# Section 2 Fixes +# Section 2 is Services (Special Purpose, and Service Clients) +ubtu18cis_rule_2_1_1_1: true +ubtu18cis_rule_2_1_1_2: true +ubtu18cis_rule_2_1_1_3: true +ubtu18cis_rule_2_1_1_4: true +ubtu18cis_rule_2_1_2: true +ubtu18cis_rule_2_1_3: true +ubtu18cis_rule_2_1_4: true +ubtu18cis_rule_2_1_5: true +ubtu18cis_rule_2_1_6: true +ubtu18cis_rule_2_1_7: true +ubtu18cis_rule_2_1_8: true +ubtu18cis_rule_2_1_9: true +ubtu18cis_rule_2_1_10: true +ubtu18cis_rule_2_1_11: true +ubtu18cis_rule_2_1_12: true +ubtu18cis_rule_2_1_13: true +ubtu18cis_rule_2_1_14: true +ubtu18cis_rule_2_1_15: true +ubtu18cis_rule_2_1_16: true +ubtu18cis_rule_2_1_17: true +ubtu18cis_rule_2_2_1: true +ubtu18cis_rule_2_2_2: true +ubtu18cis_rule_2_2_3: true +ubtu18cis_rule_2_2_4: true +ubtu18cis_rule_2_2_5: true +ubtu18cis_rule_2_2_6: true +ubtu18cis_rule_2_3: true + +# Section 3 Fixes +# Section 3 is Network Configuration (Disable unused network protocols and devices, Network Parameters(host), Network Parameters (host and router), Uncommon Network Protocols, and Firewall Configuration) +ubtu18cis_rule_3_1_1: true +ubtu18cis_rule_3_1_2: true +ubtu18cis_rule_3_2_1: true +ubtu18cis_rule_3_2_2: true +ubtu18cis_rule_3_3_1: true +ubtu18cis_rule_3_3_2: true +ubtu18cis_rule_3_3_3: true +ubtu18cis_rule_3_3_4: true +ubtu18cis_rule_3_3_5: true +ubtu18cis_rule_3_3_6: true +ubtu18cis_rule_3_3_7: true +ubtu18cis_rule_3_3_8: true +ubtu18cis_rule_3_3_9: true +ubtu18cis_rule_3_4_1: true +ubtu18cis_rule_3_4_2: true +ubtu18cis_rule_3_4_3: true +ubtu18cis_rule_3_4_4: true +ubtu18cis_rule_3_5_1_1: true +ubtu18cis_rule_3_5_1_2: true +ubtu18cis_rule_3_5_1_3: true +ubtu18cis_rule_3_5_1_4: true +ubtu18cis_rule_3_5_1_5: true +ubtu18cis_rule_3_5_1_6: true +ubtu18cis_rule_3_5_1_7: true +ubtu18cis_rule_3_5_2_1: true +ubtu18cis_rule_3_5_2_2: true +ubtu18cis_rule_3_5_2_3: true +ubtu18cis_rule_3_5_2_4: true +ubtu18cis_rule_3_5_2_5: true +ubtu18cis_rule_3_5_2_6: true +ubtu18cis_rule_3_5_2_7: true +ubtu18cis_rule_3_5_2_8: true +ubtu18cis_rule_3_5_2_9: true +ubtu18cis_rule_3_5_2_10: true +ubtu18cis_rule_3_5_3_1_1: true +ubtu18cis_rule_3_5_3_1_2: true +ubtu18cis_rule_3_5_3_1_3: true +ubtu18cis_rule_3_5_3_2_1: true +ubtu18cis_rule_3_5_3_2_2: true +ubtu18cis_rule_3_5_3_2_3: true +ubtu18cis_rule_3_5_3_2_4: true +ubtu18cis_rule_3_5_3_3_1: true +ubtu18cis_rule_3_5_3_3_2: true +ubtu18cis_rule_3_5_3_3_3: true +ubtu18cis_rule_3_5_3_3_4: true + +# Section 4 Fixes +# Section 4 is Logging and Auditing (Configure System Accounting and Configure Logging) +ubtu18cis_rule_4_1_1_1: true +ubtu18cis_rule_4_1_1_2: true +ubtu18cis_rule_4_1_1_3: true +ubtu18cis_rule_4_1_1_4: true +ubtu18cis_rule_4_1_2_1: true +ubtu18cis_rule_4_1_2_2: true +ubtu18cis_rule_4_1_2_3: true +ubtu18cis_rule_4_1_3: true +ubtu18cis_rule_4_1_4: true +ubtu18cis_rule_4_1_5: true +ubtu18cis_rule_4_1_6: true +ubtu18cis_rule_4_1_7: true +ubtu18cis_rule_4_1_8: true +ubtu18cis_rule_4_1_9: true +ubtu18cis_rule_4_1_10: true +ubtu18cis_rule_4_1_11: true +ubtu18cis_rule_4_1_12: true +ubtu18cis_rule_4_1_13: true +ubtu18cis_rule_4_1_14: true +ubtu18cis_rule_4_1_15: true +ubtu18cis_rule_4_1_16: true +ubtu18cis_rule_4_1_17: true +ubtu18cis_rule_4_2_1_1: true +ubtu18cis_rule_4_2_1_2: true +ubtu18cis_rule_4_2_1_3: true +ubtu18cis_rule_4_2_1_4: true +ubtu18cis_rule_4_2_1_5: true +ubtu18cis_rule_4_2_1_6: true +ubtu18cis_rule_4_2_2_1: true +ubtu18cis_rule_4_2_2_2: true +ubtu18cis_rule_4_2_2_3: true +ubtu18cis_rule_4_2_3: true +ubtu18cis_rule_4_3: true +ubtu18cis_rule_4_4: true + +# Section 5 Fixes +# Section 5 is Access, Authentication, and Authorization (Configure Time-Based Job Schedulers, Configure sudo, Configure SSH Server, Configure PAM +# and User Accounts and Environment) +ubtu18cis_rule_5_1_1: true +ubtu18cis_rule_5_1_2: true +ubtu18cis_rule_5_1_3: true +ubtu18cis_rule_5_1_4: true +ubtu18cis_rule_5_1_5: true +ubtu18cis_rule_5_1_6: true +ubtu18cis_rule_5_1_7: true +ubtu18cis_rule_5_1_8: true +ubtu18cis_rule_5_1_9: true +ubtu18cis_rule_5_2_1: true +ubtu18cis_rule_5_2_2: true +ubtu18cis_rule_5_2_3: true +ubtu18cis_rule_5_3_1: true +ubtu18cis_rule_5_3_2: true +ubtu18cis_rule_5_3_3: true +ubtu18cis_rule_5_3_4: true +ubtu18cis_rule_5_3_5: true +ubtu18cis_rule_5_3_6: true +ubtu18cis_rule_5_3_7: true +ubtu18cis_rule_5_3_8: true +ubtu18cis_rule_5_3_9: true +ubtu18cis_rule_5_3_10: true +ubtu18cis_rule_5_3_11: true +ubtu18cis_rule_5_3_12: true +ubtu18cis_rule_5_3_13: true +ubtu18cis_rule_5_3_14: true +ubtu18cis_rule_5_3_15: true +ubtu18cis_rule_5_3_16: true +ubtu18cis_rule_5_3_17: true +ubtu18cis_rule_5_3_18: true +ubtu18cis_rule_5_3_19: true +ubtu18cis_rule_5_3_20: true +ubtu18cis_rule_5_3_21: true +ubtu18cis_rule_5_3_22: true +ubtu18cis_rule_5_4_1: true +ubtu18cis_rule_5_4_2: true +ubtu18cis_rule_5_4_3: true +ubtu18cis_rule_5_4_4: true +ubtu18cis_rule_5_5_1_1: true +ubtu18cis_rule_5_5_1_2: true +ubtu18cis_rule_5_5_1_3: true +ubtu18cis_rule_5_5_1_4: true +ubtu18cis_rule_5_5_1_5: true +ubtu18cis_rule_5_5_2: true +ubtu18cis_rule_5_5_3: true +ubtu18cis_rule_5_5_4: true +ubtu18cis_rule_5_5_5: true +ubtu18cis_rule_5_6: true +ubtu18cis_rule_5_7: true + +# Section 6 Fixes +# Section 6 is System Maintenance (System File Permissions and User and Group Settings) +ubtu18cis_rule_6_1_1: true +ubtu18cis_rule_6_1_2: true +ubtu18cis_rule_6_1_3: true +ubtu18cis_rule_6_1_4: true +ubtu18cis_rule_6_1_5: true +ubtu18cis_rule_6_1_6: true +ubtu18cis_rule_6_1_7: true +ubtu18cis_rule_6_1_8: true +ubtu18cis_rule_6_1_9: true +ubtu18cis_rule_6_1_10: true +ubtu18cis_rule_6_1_11: true +ubtu18cis_rule_6_1_12: true +ubtu18cis_rule_6_1_13: true +ubtu18cis_rule_6_1_14: true +ubtu18cis_rule_6_2_1: true +ubtu18cis_rule_6_2_2: true +ubtu18cis_rule_6_2_3: true +ubtu18cis_rule_6_2_4: true +ubtu18cis_rule_6_2_5: true +ubtu18cis_rule_6_2_6: true +ubtu18cis_rule_6_2_7: true +ubtu18cis_rule_6_2_8: true +ubtu18cis_rule_6_2_9: true +ubtu18cis_rule_6_2_10: true +ubtu18cis_rule_6_2_11: true +ubtu18cis_rule_6_2_12: true +ubtu18cis_rule_6_2_13: true +ubtu18cis_rule_6_2_14: true +ubtu18cis_rule_6_2_15: true +ubtu18cis_rule_6_2_16: true +ubtu18cis_rule_6_2_17: true + +# Service configuration variables, set to true to keep service +ubtu18cis_allow_autofs: false +ubtu18cis_allow_usb_storage: false +ubtu18cis_avahi_server: false +ubtu18cis_cups_server: false +ubtu18cis_dhcp_server: false +ubtu18cis_ldap_server: false +ubtu18cis_nfs_server: false +ubtu18cis_dns_server: false +ubtu18cis_vsftpd_server: false +ubtu18cis_httpd_server: false +ubtu18cis_dovecot_server: false +ubtu18cis_smb_server: false +ubtu18cis_squid_server: false +ubtu18cis_snmp_server: false +ubtu18cis_mail_server: false +ubtu18cis_rsync_server: false +ubtu18cis_nis_server: false +ubtu18cis_rpc_server: false + +# Clients in use variables +ubtu18cis_nis_required: false +ubtu18cis_rsh_required: false +ubtu18cis_talk_required: false +ubtu18cis_telnet_required: false +ubtu18cis_ldap_clients_required: false +ubtu18cis_is_router: false + +# IPv6 requirement toggle +ubtu18cis_ipv6_required: true + +# Other system wide variables +# ubtu18cis_desktop_required is the toggle for requiring desktop environments. True means you use a desktop and will not disable/remove needed items to run a desktop (not recommented for servers) +# false means you do not require a desktop +ubtu18cis_desktop_required: false +# Toggle to have automation install gdm3. +# The gdm related handlers won't run if you have this set to true but gdm3 is not installed. +ubtu18cis_install_gdm3: true + +# Section 1 Control Variables +# Control 1.1.2/1.1.3/1.1.4/1.1.5 +# ubtu18cis_tmp_fstab_options are the file system options for the fstabs configuration for /tmp +# To conform to CIS control 1.1.2 could use any settings +# To conform to CIS control 1.1.3 nodev needs to be present +# To conform to CIS control 1.1.4 nosuid needs to be present +# To conform to CIS control 1.1.5 noexec needs to present +ubtu18cis_tmp_fstab_options: "defaults,rw,nosuid,nodev,noexec,relatime" + +# Control 1.1.6/1.1.7/1.1.8/1.1.9 +# ubtu18cis_shm_fstab_options are the file system options for the fstabs configuration for /dev/shm +# To conform to CIS control 1.1.6 could use any settings +# To conform to CIS control 1.1.7 nodev needs to be present +# To conform to CIS control 1.1.8 nosuid needs to be present +# To conform to CIS control 1.1.9 no exec needs to be present +ubtu18cis_shm_fstab_options: "defaults,noexec,nodev,nosuid,seclabel" + +# Control 1.1.12/1.1.13/1.1.14 +# These are the settings for the /var/tmp mount +# To conform to CIS control 1.1.12 nodev needs to be present in opts +# To conform to CIS control 1.1.13 nosuid needs to be present in opts +# To conform to CIS control 1.1.14 noexec needs to be present in opts +ubtu18cis_vartmp: + source: /tmp + fstype: none + opts: "defaults,nodev,nosuid,noexec,bind" + enabled: false + +# Control 1.3.2 +# These are the crontab settings for file system integrity enforcement +ubtu18cis_aide_cron: + cron_user: root + cron_file: /etc/crontab + aide_job: '/usr/bin/aide.wrapper --config /etc/aide/aide.conf --check' + aide_minute: 0 + aide_hour: 5 + aide_day: '*' + aide_month: '*' + aide_weekday: '*' + +# Control 1.4.2 +ubtu18cis_bootloader_password_hash: 'grub.pbkdf2.sha512.changethispassword' + +# Control 1.4.4 +# THIS VARIABLE SHOULD BE CHANGED AND INCORPORATED INTO VAULT +# THIS VALUE IS WHAT THE ROOT PW WILL BECOME!!!!!!!! +# HAVING THAT PW EXPOSED IN RAW TEXT IS NOT SECURE!!!! +ubtu18cis_root_pw: "Password1" + +# Control 1.8.2 +# This will be the motd banner must not contain the below items in order to be compliant with Ubuntu 18 CIS +# \m, \r, \s, \v or references to the OS platform +ubtu18cis_warning_banner: | + 'Authorized uses only. All activity may be monitored and reported.' + +# Section 2 Control Variables +# Control 2.1.1.1 +# ubtu18cis_time_sync_tool is the tool in which to synchronize time +# The two options are chrony or ntp +ubtu18cis_time_sync_tool: "ntp" + +# Control 2.1.1.4 +# ubtu18cis_chrony_server_options is the server options for chrony +ubtu18cis_chrony_server_options: "minpoll 8" +# ubtu18cis_ntp_server_options is the server options for ntp +ubtu18cis_ntp_server_options: "iburst" +# ubtu18cis_time_synchronization_servers are the synchronization servers +ubtu18cis_time_synchronization_servers: + - 0.pool.ntp.org + - 1.pool.ntp.org + - 2.pool.ntp.org + - 3.pool.ntp.org +ubtu18cis_ntp_fallback_server_list: "2.debian.pool.ntp.org 3.debian.pool.ntp.org" + +# Section 3 Control Variables +# Control 3.3.2 +# values for the /etc/hosts.allow file for IP addresses permitted to connect to the host. +ubtu18cis_host_allow: + - "10.0.0.0/255.0.0.0" + - "172.16.0.0/255.240.0.0" + - "192.168.0.0/255.255.0.0" + +# Control 3.5.1.1 +# ubtu18cis_firewall_package is the firewall package you will be using. +# the options are ufw, nftables, or iptables +# you much chose only one firewall package +ubtu18cis_firewall_package: "ufw" +# ubtu18cis_iptables_v6 toggles iptables vs ip6tables CIS firewall rules and is used with +# variable ubtu18cis_firewall_package set to iptables +ubtu18cis_iptables_v6: true +# ubtu18cis_ufw_system_sysctlconf changes the /etc/default/ufw IPT_SYSCTL to use the main /etc/sysctl.conf file +# By default UFW will use it's own sysctl.conf file located in /etc/ufw which could/will override /etc/sysctl.conf +# Setting this value to true will change the UFW configuration to use the /etc/sysctl.conf file +ubtu18cis_ufw_system_sysctlconf: true + +# Control 3.5.1.5 +# ubtu18cis_ufw_allow_out_ports are the ports for the firewall to allow +# if you want to allow out on all ports set variable to "all", example ubtu18cis_ufw_allow_out_ports: all +# ubtu18cis_ufw_allow_out_ports: all +ubtu18cis_ufw_allow_out_ports: + - 53 + - 80 + - 443 + +# Controls 3.5.4.1.1 through 3.5.4.1.4 +# The iptables module only writes to memory which means a reboot could revert settings +# The below toggle will install iptables-persistent and save the rules in memory (/etc/iptables/rules.v4 or rules.v6) +# This makes the CIS role changes permenant +ubtu18cis_save_iptables_cis_rules: true + +# Control 3.5.3.2 +# ubtu18cis_nftables_table_name is the name of the table in nftables you want to create +# the default nftables table name is inet filter. This variable name will be the one all +# nftables configs are applied to +ubtu18cis_nftables_table_name: "inet filter" + +# Section 4 Control Variables +# Control 4.1.1.4 +# ubtu18cis_audit_back_log_limit is the audit_back_log limit and should be set to a sufficient value +# The example from CIS uses 8192 +ubtu18cis_audit_back_log_limit: 8192 + +# Control 4.1.2.1 +# ubtu18cis_max_log_file_size is largest the log file will become in MB +# This shoudl be set based on your sites policy +ubtu18cis_max_log_file_size: 10 + +# Control 4.1.2.2 +ubtu18cis_auditd: + admin_space_left_action: halt + max_log_file_action: keep_logs + +# Control 4.2.1.3 +# ubtu18cis_rsyslog_ansible_managed will toggle ansible automated configurations of rsyslog +# You should set the rsyslog to your side specific needs. This toggle will use the example from +# page 347 to set rsyslog loggin based on those configuration suggestions. Settings can be seen +# in control 4.2.1.3 +ubtu18cis_rsyslog_ansible_managed: true + +# Control 4.2.1.5 +# ubtu18cis_remote_log_server is the remote logging server +ubtu18cis_remote_log_server: 192.168.2.100 + +# Control 4.2.1.6 +ubtu18cis_system_is_log_server: true + +# Control 4.3 +# ubtu18cis_logrotate is the log rotate frequencey. Options are daily, weekly, monthly, and yearly +ubtu18cis_logrotate: "daily" + +# Section 5 Control Variables +# Control 5.2.1 +# ubtu18cis_sudo_package is the name of the sudo package to install +# The possible values are "sudo" or "sudo-ldap" +ubtu18cis_sudo_package: "sudo" + +# Control 5.2.3 +# ubtu18cis_sudo_logfile is the path and file name of the sudo log file +ubtu18cis_sudo_logfile: "/var/log/sudo.log" + +# ubtu18cis_sshd will contain all sshd variables. The task association and variable descriptions for each section are listed below +# Control 5.3.4 +# allow_users is the users allowed to ssh into the system +# allow_groups is teh groups allowed to ssh into the system +# deny_users is the users to deny from ssh'ing into the system +# deny_groups is the groups to deny from ssh'ing into the the system +# Control 5.3.5 +# log_level is the log level variable. This needs to be set to VERBOSE or INFO to conform to CIS standards +# Control 5.3.7 +# max_auth_tries is the max number of authentication attampts per connection. +# This value should be 4 or less to conform to CIS standards +# Control 5.3.13 +# ciphers is a comma seperated list of site approved ciphers +# ONLY USE STRONG CIPHERS. Weak ciphers are listed below +# DO NOT USE: 3des-cbc, aes128-cbc, aes192-cbc, aes256-cbc, arcfour, arcfour128, arcfour256, blowfish-cbc, cast128-cbc, rijndael-cbc@lysator.liu.se +# Control 5.3.14 +# MACs is the comma seperated list of site approved MAC algorithms that SSH can use during communication +# ONLY USE STRONG ALGORITHMS. Weak algorithms are listed below +# DO NOT USE: hmac-md5, hmac-md5-96, hmac-ripemd160, hmac-sha1, hmac-sha1-96, umac-64@openssh.com, umac-128@openssh.com, hmac-md5-etm@openssh.com, +# hmac-md5-96-etm@openssh.com, hmac-ripemd160-etm@openssh.com, hmac-sha1-etm@openssh.com, hmac-sha1-96-etm@openssh.com, umac-64-etm@openssh.com, umac-128-etm@openssh.com +# Control 5.3.15 +# kex_algorithms is comma seperated list of the algorithms for key exchange methods +# ONLY USE STRONG ALGORITHMS. Weak algorithms are listed below +# DO NOT USE: diffie-hellman-group1-sha1, diffie-hellman-group14-sha1, diffie-hellman-group-exchange-sha1 +# Control 5.3.16 +# client_alive_interval is the amount of time idle before ssh session terminated. Set to 300 or less to conform to CIS standards +# client_alive_count_max will send client alive messages at the configured interval. Set to 3 or less to conform to CIS standards +# Control 5.3.17 +# login_grace_time is the time allowed for successful authentication to the SSH server. This needs to be set to 60 seconds or less to conform to CIS standards +# Control 5.3.22 +# max_sessions is the max number of open sessions permitted. Set the value to 10 or less to conform to CIS standards +ubtu18cis_sshd: + log_level: "INFO" + max_auth_tries: 4 + ciphers: "chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr" + macs: "hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512,hmac-sha2-256" + kex_algorithms: "curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256" + client_alive_interval: 300 + client_alive_count_max: 0 + login_grace_time: 60 + max_sessions: 10 + # WARNING: make sure you understand the precedence when working with these values!! + # allow_users and allow_groups can be single user/group or multiple users/groups. For multiple list them with a space seperating them + allow_users: "vagrant ubuntu" + allow_groups: "vagrant ubuntu" + # deny_users: + # deny_groups: + +# Control 5.4.3 +# ubtu18cis_pamd_pwhistory_remember is number of password chnage cycles a user can re-use a password +# This needs to be 5 or more to conform to CIS standards +ubtu18cis_pamd_pwhistory_remember: 5 + + +# ubtu18cis_pass will be password based variables +# Control 5.5.1.1 +# min_days is the min number of days allowed between changing passwords. Set to 1 or more to conform to CIS standards +# # Control 5.5.1.2 +# max_days forces passwords to expire in configured number of days. Set to 365 or less to conform to CIS standards +# Control 5.5.1.3 +# warn_age is how many days before pw expiry the user will be warned. Set to 7 or more to conform to CIS standards +# Control 5.5.1.4 +# inactive the number of days of inactivity before the account will lock. Set to 30 day sor less to conform to CIS standards +ubtu18cis_pass: + max_days: 365 + min_days: 1 + warn_age: 7 + inactive: 30 + +# Control 5.5.4 +# ubtu18cis_bash_umask is the umask to set in the /etc/bash.bashrc and /etc/profile. +# The value needs to be 027 or more restrictive to comply with CIS standards +ubtu18cis_bash_umask: '027' + +# Control 5.5.5 +# Session timeout setting file (TMOUT setting can be set in multiple files) +# Timeout value is in seconds. Set value to 900 seconds or less +ubtu18cis_shell_session_timeout: + file: /etc/profile.d/tmout.sh + timeout: 900 + +# Control 5.7 +# ubtu18cis_su_group is the su group to use with pam_wheel +ubtu18cis_su_group: "wheel" + +# Section 6 Control Variables +# Control 6.1.10 +# ubtu18cis_no_world_write_adjust will toggle the automated fix to remove world-writable perms from all files +# Setting to true will remove all world-writable permissions, and false will leave as-is +ubtu18cis_no_world_write_adjust: true + +# Control 6.1.11 +# ubtu18cis_un_owned_owner is the owner to set files to that have no owner +ubtu18cis_unowned_owner: root +# ubtu18cis_no_owner_adjust will toggle the automated fix to give a user to unowned files/directories +# true will give the owner from ubtu18cis_un_owned_owner to all unowned files/directories and false will skip +ubtu18cis_no_owner_adjust: true +# Control 6.1.12 +# ubtu18cis_ungrouped_group is the group to set files to that have no group +ubtu18cis_ungrouped_group: root +# ubtu18cis_no_group_adjust will toggle the automated fix to give a group to ungrouped files/directories +# true will give the group from ubtu18cis_un_owned_group to all ungrouped files/directories and false will skip +ubtu18cis_no_group_adjust: true + +# Cotnrol 6.1.13 +# ubtu18cis_suid_adjust is the toggle to remove the SUID bit from all files on all mounts +# Set to true this role will remove that bit, set to false we will just warn about the files +ubtu18cis_suid_adjust: false + +# Control 6.2.5 +# ubtu18cis_int_gid is the UID for interactive users to start at +ubtu18cis_int_gid: 1000 + +# Control 6.2.7 +# This control toggles automation to set all users dot files to not group or world writable +# Value of true will change file permissiosn, value of false will skip the automation changing permissions. +ubtu18cis_dotperm_ansibleManaged: true + +# Control 6.2.9 Allow ansible to adjust world-writable files. False will just display world-writable files, True will remove world-writable +ubtu18cis_passwd_label: "{{ (this_item | default(item)).id }}: {{ (this_item | default(item)).dir }}" + + +#### Audit Configuration Settings #### + +### Audit binary settings ### +audit_bin_version: + release: v0.3.16 + checksum: 'sha256:827e354b48f93bce933f5efcd1f00dc82569c42a179cf2d384b040d8a80bfbfb' +audit_bin_path: /usr/local/bin/ +audit_bin: "{{ audit_bin_path }}goss" +audit_format: json + +# if get_audit_binary_method == download change accordingly +audit_bin_url: "https://github.com/aelsabbahy/goss/releases/download/{{ audit_bin_version.release }}/goss-linux-amd64" + +## if get_audit_binary_method - copy the following needs to be updated for your environment +## it is expected that it will be copied from somewhere accessible to the control node +## e.g copy from ansible control node to remote host +audit_bin_copy_location: /some/accessible/path + +### Goss Audit Benchmark file ### +## managed by the control audit_content +# git +audit_file_git: "https://github.com/ansible-lockdown/{{ benchmark }}-Audit.git" +audit_git_version: main + +# archive or copy: +audit_conf_copy: "some path to copy from" + +# get_url: +audit_files_url: "some url maybe s3?" + +## Goss configuration information +# Where the goss configs and outputs are stored +audit_out_dir: '/var/tmp' +# Where the goss audit configuration will be stored +audit_conf_dir: "{{ audit_out_dir }}/{{ benchmark }}-Audit/" + +# If changed these can affect other products +pre_audit_outfile: "{{ audit_out_dir }}/{{ ansible_hostname }}_pre_scan_{{ ansible_date_time.epoch }}.{{ audit_format }}" +post_audit_outfile: "{{ audit_out_dir }}/{{ ansible_hostname }}_post_scan_{{ ansible_date_time.epoch }}.{{ audit_format }}" + +## The following should not need changing +audit_control_file: "{{ audit_conf_dir }}goss.yml" +audit_vars_path: "{{ audit_conf_dir }}/vars/{{ ansible_hostname }}.yml" +audit_results: | + The pre remediation results are: {{ pre_audit_summary }}. + The post remediation results are: {{ post_audit_summary }}. + Full breakdown can be found in {{ audit_out_dir }} \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/files/.DS_Store b/Linux/ansible-lockdown/UBUNTU18-CIS/files/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..e412cbf315ed784cd3fe292626826499981feea3 GIT binary patch literal 6148 zcmeH~I}XA?3`A|9K%z-WxeW*41}g+7-~xzG7bykM@6j258Ui(n&{(qH+8Zm$6xmor zboab&MLH2#!i}=AFfv6xmzzA~a=l;r^Dy*Qa+@Ws0Pke9pW6f#paN8Y3Qz$mFd+r< zAYZN~^h|sdDnJFMp@4lK3fx$eE$E*P1Rnvw1lA49C} z?O@64YO)2RT{MRe%{!}2F))pG(Sih~)xkgosK7{pdE}j){|ETD`G3^HlnPLRKT|*# z`{RCxm&&vC + +/usr/bin/ssh { + #include + + /lib/x86_64-linux-gnu/ld-*.so mr, + /usr/bin/ssh mr, + +} \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/files/etc/systemd/.DS_Store b/Linux/ansible-lockdown/UBUNTU18-CIS/files/etc/systemd/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..c7f175f42326c4f01a07f5d1e40f837a2f64205f GIT binary patch literal 6148 zcmeHKISv9b4733uBpOP}e1RWC2wuPk5Jv~101CRTco$D&d=#LC4hqm%awc&+i894{ zEh0KSZzdum5gEV@w=YMzz)z2S+n`Ya zDnJFO02QDDA6Fnt>|prulX)N&paQ?HfPEhd+^{A#f&S^h;4J{Khp-#w-b(G z{r@wGD=I(*{z?Ju%@(sMo|Lt<^*F1w1-^t^&JAvcxl=HBIR<(;#=^?+*pni!*c|&c Vu?ciK;!X$hXTWr!QGstOa0e=g6|evR literal 0 HcmV?d00001 diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/files/etc/systemd/system/tmp.mount b/Linux/ansible-lockdown/UBUNTU18-CIS/files/etc/systemd/system/tmp.mount new file mode 100644 index 0000000..47ca662 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/files/etc/systemd/system/tmp.mount @@ -0,0 +1,25 @@ +# This file is part of systemd. +# +# systemd is free software; you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. + +[Unit] +Description=Temporary Directory +Documentation=man:hier(7) +Documentation=http://www.freedesktop.org/wiki/Software/systemd/APIFileSystems +ConditionPathIsSymbolicLink=!/tmp +DefaultDependencies=no +Conflicts=umount.target +Before=local-fs.target umount.target + +[Mount] +What=tmpfs +Where=/tmp +Type=tmpfs +Options=mode=1777,strictatime,noexec,nodev,nosuid + +# Make 'systemctl enable tmp.mount' work: +[Install] +WantedBy=local-fs.target diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/handlers/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/handlers/main.yml new file mode 100644 index 0000000..f489b02 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/handlers/main.yml @@ -0,0 +1,54 @@ +--- +- name: grub update + command: update-grub + failed_when: false + +- name: restart postfix + service: + name: postfix + state: restarted + +- name: restart exim4 + service: + name: exim4 + state: restarted + +- name: sysctl flush ipv4 route table + sysctl: + name: net.ipv4.route.flush + value: '1' + sysctl_set: yes + when: ansible_virtualization_type != "docker" + +- name: sysctl flush ipv6 route table + sysctl: + name: net.ipv6.route.flush + value: '1' + sysctl_set: yes + when: ansible_virtualization_type != "docker" + +- name: reload ufw + ufw: + state: reloaded + +- name: restart auditd + service: + name: auditd + state: restarted + when: + - not ubtu18cis_skip_for_travis + tags: + - skip_ansible_lint + +- name: restart rsyslog + service: + name: rsyslog + state: restarted + +- name: restart sshd + service: + name: sshd + state: restarted + +- name: reload gdm + command: dpkg-reconfigure gdm3 diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/library/goss.py b/Linux/ansible-lockdown/UBUNTU18-CIS/library/goss.py new file mode 100644 index 0000000..d4dfbc7 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/library/goss.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python +# FROM: https://github.com/indusbox/goss-ansible +import os +from ansible.module_utils.basic import * + +DOCUMENTATION = ''' +--- +module: goss +author: Mathieu Corbin +short_description: Launch goss (https://github.com/aelsabbahy/goss) tests +description: + - Launch goss tests. + This module always returns `changed = false` for idempotence. +options: + path: + required: true + description: + - Test file to validate. + The test file must be on the remote machine. + goss_path: + required: false + description: + - Path location for the goss executable. + Default is "goss" (ie.`no absolute path, goss executable must be available in $PATH). + vars_path: + required: false + description: + - Path location for a variables YAML/JSON file to use as templating inputs. + format: + required: false + description: + - Output goss format. + Goss format list : goss v --format => [documentation json junit nagios nagios_verbose rspecish tap silent]. + Default is "rspecish". + output_file: + required: false + description: + - Save the result of the goss command in a file whose path is output_file +examples: + - name: run goss against the gossfile /path/to/file.yml + goss: + path: "/path/to/file.yml" + - name: run goss against the gossfile /path/to/file.yml with nagios output + goss: + path: "/path/to/file.yml" + format: "nagios" + - name: run /usr/local/bin/goss against the gossfile /path/to/file.yml + goss: + path: "/path/to/file.yml" + goss_path: "/usr/local/bin/goss" + - name: run /usr/local/bin/goss with a variables file + goss: + vars_path: "/path/to/file.yml" + - name: run goss against multiple gossfiles and write the result in JSON format to /my/output/ for each file + goss: + path: "{{ item }}" + format: json + output_file : /my/output/{{ item }} + with_items: "{{ goss_files }}" +''' + + +# launch goss validate command on the file +def check(module, test_file_path, output_format, goss_path, vars_path): + cmd = "{0} --gossfile {1}".format(goss_path, test_file_path) + # goss parent command flags + if vars_path is not None: + cmd += " --vars {0}".format(vars_path) + + # validate sub-command flags + cmd += " validate" + if output_format is not None: + cmd += " --format {0}".format(output_format) + + return module.run_command(cmd) + + +# write goss result to output_file_path +def output_file(output_file_path, out): + if output_file_path is not None: + with open(output_file_path, 'w') as output_file: + output_file.write(out) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + path=dict(required=True, type='str'), + format=dict(required=False, type='str'), + output_file=dict(required=False, type='str'), + vars_path=dict(required=False, type='str'), + goss_path=dict(required=False, default='goss', type='str'), + ), + supports_check_mode=False + ) + + test_file_path = module.params['path'] # test file path + output_format = module.params['format'] # goss output format + output_file_path = module.params['output_file'] + goss_path = module.params['goss_path'] + vars_path = module.params['vars_path'] + + if test_file_path is None: + module.fail_json(msg="test file path is null") + + test_file_path = os.path.expanduser(test_file_path) + + # test if access to test file is ok + if not os.access(test_file_path, os.R_OK): + module.fail_json(msg="Test file %s not readable" % (test_file_path)) + + # test if test file is not a dir + if os.path.isdir(test_file_path): + module.fail_json(msg="Test file must be a file ! : %s" % (test_file_path)) + + (rc, out, err) = check(module, test_file_path, output_format, goss_path, vars_path) + + if output_file_path is not None: + output_file_path = os.path.expanduser(output_file_path) + # check if output_file is a file + if output_file_path.endswith(os.sep): + module.fail_json(msg="output_file must be a file. Actually : %s " + % (output_file_path)) + + output_dirname = os.path.dirname(output_file_path) + + # check if output directory exists + if not os.path.exists(output_dirname): + module.fail_json(msg="directory %s does not exists" % (output_dirname)) + + # check if writable + if not os.access(os.path.dirname(output_file_path), os.W_OK): + module.fail_json(msg="Destination %s not writable" % (os.path.dirname(output_file_path))) + # write goss result on the output file + output_file(output_file_path, out) + + if rc is not None and rc != 0: + error_msg = "err : {0} ; out : {1}".format(err, out) + module.fail_json(msg=error_msg) + + result = {} + result['stdout'] = out + result['changed'] = False + + module.exit_json(**result) + +main() \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/meta/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/meta/main.yml new file mode 100644 index 0000000..de5adc4 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/meta/main.yml @@ -0,0 +1,20 @@ +galaxy_info: + author: "George Nalen, Mark Bolwell, and DFed" + description: "Apply the Ubuntu 18 CIS" + company: "MindPoint Group" + license: MIT + min_ansible_version: 2.9.0 + + platforms: + - name: Ubuntu + versions: + - bionic + + galaxy_tags: + - system + - security + - ubuntu1804 + - cis + - hardening + +dependencies: [] diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/site.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/site.yml new file mode 100644 index 0000000..471a7b5 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/site.yml @@ -0,0 +1,12 @@ +--- +- hosts: all + become: true + vars: + is_container: false + + roles: + + - role: "{{ playbook_dir }}" + ubtu18cis_system_is_container: "{{ is_container | default(false) }}" + ubtu18cis_skip_for_travis: false + ubtu18cis_oscap_scan: yes diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/.DS_Store b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0=') + tags: + - always + +- import_tasks: prelim.yml + tags: + - prelim_tasks + - run_audit + +- import_tasks: pre_remediation_audit.yml + when: + - run_audit + tags: run_audit + +- include: parse_etc_password.yml + when: + - ubtu18cis_section5_patch or + ubtu18cis_section6_patch + +- name: Gather the package facts + package_facts: + manager: auto + tags: + - always + +- name: Include section 1 patches + include: section_1/main.yml + when: ubtu18cis_section1_patch + tags: + - section1 + +- name: Include section 2 patches + import_tasks: section_2/main.yml + when: ubtu18cis_section2_patch + tags: + - section2 + +- name: Include section 3 patches + import_tasks: section_3/main.yml + when: ubtu18cis_section3_patch + tags: + - section3 + +- name: Include section 4 patches + import_tasks: section_4/main.yml + when: ubtu18cis_section4_patch + tags: + - section4 + +- name: Include section 5 patches + import_tasks: section_5/main.yml + when: ubtu18cis_section5_patch + tags: + - section5 + +- name: Include section 6 patches + import_tasks: section_6/main.yml + when: ubtu18cis_section6_patch | bool + tags: + - section6 + +- name: flush handlers + meta: flush_handlers + +- name: reboot system + block: + - name: reboot system if not skipped + reboot: + when: + - not ubtu18_skip_reboot + + - name: Warning a reboot required but skip option set + debug: + msg: "Warning!! changes have been made that require a reboot to be implemented but skip reboot was set - Can affect compliance check results" + changed_when: true + when: + - ubtu18_skip_reboot + +- import_tasks: post_remediation_audit.yml + when: + - run_audit + +- name: Show Audit Summary + debug: + msg: "{{ audit_results.split('\n') }}" + when: + - run_audit diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/parse_etc_password.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/parse_etc_password.yml new file mode 100644 index 0000000..a9c8764 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/parse_etc_password.yml @@ -0,0 +1,32 @@ +--- +- name: "PRELIM | {{ ubtu18cis_passwd_tasks }} | Parse /etc/passwd" + block: + - name: "PRELIM | {{ ubtu18cis_passwd_tasks }} | Parse /etc/passwd" + command: cat /etc/passwd + changed_when: false + check_mode: false + register: ubtu18cis_passwd_file_audit + + - name: "PRELIM | {{ ubtu18cis_passwd_tasks }} | Split passwd entries" + set_fact: + ubtu18cis_passwd: "{{ ubtu18cis_passwd_file_audit.stdout_lines | map('regex_replace', ld_passwd_regex, ld_passwd_yaml) | map('from_yaml') | list }}" + + with_items: "{{ ubtu18cis_passwd_file_audit.stdout_lines }}" + vars: + ld_passwd_regex: >- + ^(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*) + ld_passwd_yaml: | + id: >-4 + \g + password: >-4 + \g + uid: \g + gid: \g + gecos: >-4 + \g + dir: >-4 + \g + shell: >-4 + \g + tags: + - always diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/post_remediation_audit.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/post_remediation_audit.yml new file mode 100644 index 0000000..17ef3f8 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/post_remediation_audit.yml @@ -0,0 +1,43 @@ +--- + +- name: "Post Audit | Run post_remediation {{ benchmark }} audit" + shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ post_audit_outfile }} -g {{ group_names }}" + vars: + warn: false + +- name: Post Audit | ensure audit files readable by users + file: + path: "{{ item }}" + mode: 0644 + state: file + loop: + - "{{ post_audit_outfile }}" + - "{{ pre_audit_outfile }}" + +- name: Post Audit | Capture audit data if json format + block: + - name: "capture data {{ post_audit_outfile }}" + command: "cat {{ post_audit_outfile }}" + register: post_audit + changed_when: false + + - name: Capture post-audit result + set_fact: + post_audit_summary: "{{ post_audit.stdout | from_json |json_query(summary) }}" + vars: + summary: 'summary."summary-line"' + when: + - audit_format == "json" + +- name: Post Audit | Capture audit data if documentation format + block: + - name: "Post Audit | capture data {{ post_audit_outfile }}" + command: "tail -2 {{ post_audit_outfile }}" + register: post_audit + changed_when: false + + - name: Post Audit | Capture post-audit result + set_fact: + post_audit_summary: "{{ post_audit.stdout_lines }}" + when: + - audit_format == "documentation" diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/pre_remediation_audit.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/pre_remediation_audit.yml new file mode 100644 index 0000000..6df2db2 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/pre_remediation_audit.yml @@ -0,0 +1,118 @@ +--- + +- name: Audit Binary Setup | Setup the LE audit + include_tasks: LE_audit_setup.yml + when: + - setup_audit + tags: + - setup_audit + +- name: "Pre Audit Setup | Ensure {{ audit_conf_dir }} exists" + file: + path: "{{ audit_conf_dir }}" + state: directory + mode: '0755' + +- name: Pre Audit Setup | If using git for content set up + block: + - name: Pre Audit Setup | Install git (rh8 python3) + package: + name: git + state: present + when: ansible_distribution_major_version == '8' + + - name: Pre Audit Setup | Install git (rh7 python2) + package: + name: git + state: present + vars: + ansible_python_interpreter: "{{ python2_bin }}" + when: ansible_distribution_major_version == '7' + + - name: Pre Audit Setup | retrieve audit content files from git + git: + repo: "{{ audit_file_git }}" + dest: "{{ audit_conf_dir }}" + version: "{{ audit_git_version }}" + when: + - audit_content == 'git' + +- name: Pre Audit Setup | copy to audit content files to server + copy: + src: "{{ audit_local_copy }}" + dest: "{{ audit_conf_dest }}" + mode: 0644 + when: + - audit_content == 'copy' + +- name: Pre Audit Setup | unarchive audit content files on server + unarchive: + src: "{{ audit_conf_copy }}" + dest: "{{ audit_conf_dest }}" + when: + - audit_content == 'archived' + +- name: Pre Audit Setup | get audit content from url + get_url: + url: "{{ audit_files_url }}" + dest: "{{ audit_conf_dir }}" + when: + - audit_content == 'get_url' + +- name: Pre Audit Setup | Check Goss is available + block: + - name: Pre Audit Setup | Check for goss file + stat: + path: "{{ audit_bin }}" + register: goss_available + + - name: Pre Audit Setup | If audit ensure goss is available + assert: + msg: "Audit has been selected: unable to find goss binary at {{ audit_bin }}" + when: + - not goss_available.stat.exists + when: + - run_audit + +- name: Pre Audit Setup | Copy ansible default vars values to test audit + template: + src: ansible_vars_goss.yml.j2 + dest: "{{ audit_vars_path }}" + mode: 0600 + when: + - run_audit + tags: + - goss_template + +- name: "Pre Audit | Run pre_remediation {{ benchmark }} audit" + shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ pre_audit_outfile }} -g {{ group_names }}" + vars: + warn: false + +- name: Pre Audit | Capture audit data if json format + block: + - name: "capture data {{ pre_audit_outfile }}" + command: "cat {{ pre_audit_outfile }}" + register: pre_audit + changed_when: false + + - name: Pre Audit | Capture pre-audit result + set_fact: + pre_audit_summary: "{{ pre_audit.stdout | from_json |json_query(summary) }}" + vars: + summary: 'summary."summary-line"' + when: + - audit_format == "json" + +- name: Pre Audit | Capture audit data if documentation format + block: + - name: "capture data {{ pre_audit_outfile }}" + command: "tail -2 {{ pre_audit_outfile }}" + register: pre_audit + changed_when: false + + - name: Pre Audit | Capture pre-audit result + set_fact: + pre_audit_summary: "{{ pre_audit.stdout_lines }}" + when: + - audit_format == "documentation" diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/prelim.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/prelim.yml new file mode 100644 index 0000000..c5802c3 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/prelim.yml @@ -0,0 +1,50 @@ +--- +# List users in order to look files inside each home directory +- name: "PRELIM | List users accounts" + command: "awk -F: '{print $1}' /etc/passwd" + changed_when: false + register: ubtu18cis_users + when: + - ubtu18cis_rule_6_2_8 or + ubtu18cis_rule_6_2_9 or + ubtu18cis_rule_6_2_10 + +- name: "PRELIM | Check for autofs service" + shell: "systemctl show autofs | grep LoadState | cut -d = -f 2" + register: ubtu18cis_autofs_service_status + changed_when: false + check_mode: false + when: + - ubtu18cis_rule_1_1_23 + tags: + - skip_ansible_lint + +- name: "PRELIM | Run apt update" + apt: + update_cache: yes + when: + - ubtu18cis_rule_1_3_1 + +- name: "PRELIM | Check for avahi-daemon service" + shell: "systemctl show avahi-daemon | grep LoadState | cut -d = -f 2" + register: avahi_service_status + changed_when: false + check_mode: false + tags: + - skip_ansible_lint + +- name: "PRELIM | Install gdm3" + apt: + name: gdm3 + state: present + when: + - ubtu18cis_desktop_required + - ubtu18cis_install_gdm3 + - ubtu18cis_rule_1_8_2 or + ubtu18cis_rule_1_8_3 + +- name: "PRELIM | Install nftables" + apt: + name: nftables + state: present + when: ubtu18cis_firewall_package == "nftables" diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.1.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.1.x.yml new file mode 100644 index 0000000..3ba62bc --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.1.x.yml @@ -0,0 +1,496 @@ +--- +- name: "AUTOMATED | 1.1.1.1 | PATCH | Ensure mounting of cramfs filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.1 | PATCH | Ensure mounting of cramfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/cramfs.conf + regexp: "^(#)?install cramfs(\\s|$)" + line: install cramfs /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.1 | PATCH | Ensure mounting of cramfs filesystems is disabled | Disable cramfs" + modprobe: + name: cramfs + state: absent + when: ansible_connection != 'docker' + when: + - ubtu18cis_rule_1_1_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.1 + - cramfs + +- name: "AUTOMATED | 1.1.1.2 | PATCH | Ensure mounting of freevxfs filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.2 | PATCH | Ensure mounting of freevxfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/freevxfs.conf + regexp: "^(#)?install freevxfs(\\s|$)" + line: install freevxfs /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.2 | PATCH | Ensure mounting of freevxfs filesystems is disabled | Disable freevxfs" + modprobe: + name: freevxfs + state: absent + when: ansible_connection != 'docker' + when: + - ubtu18cis_rule_1_1_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.2 + - freevxfs + +- name: "AUTOMATED | 1.1.1.3 | PATCH | Ensure mounting of jffs2 filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.3 | PATCH | Ensure mounting of jffs2 filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/jffs2.conf + regexp: "^(#)?install jffs2(\\s|$)" + line: install jffs2 /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.3 | PATCH | Ensure mounting of jffs2 filesystems is disabled | Disable jffs2" + modprobe: + name: jffs2 + state: absent + when: ansible_connection != 'docker' + when: + - ubtu18cis_rule_1_1_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.3 + - jffs2 + +- name: "AUTOMATED | 1.1.1.4 | PATCH | Ensure mounting of hfs filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.4 | PATCH | Ensure mounting of hfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/hfs.conf + regexp: "^(#)?install hfs(\\s|$)" + line: install hfs /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.4 | PATCH | Ensure mounting of hfs filesystems is disabled | Disable hfs" + modprobe: + name: hfs + state: absent + when: ansible_connection != 'docker' + when: + - ubtu18cis_rule_1_1_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.4 + - hfs + +- name: "AUTOMATED | 1.1.1.5 | PATCH | Ensure mounting of hfsplus filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.5 | PATCH | Ensure mounting of hfsplus filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/hfsplus.conf + regexp: "^(#)?install hfsplus(\\s|$)" + line: install hfsplus /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.5 | PATCH | Ensure mounting of hfsplus filesystems is disabled | Disable hfsplus" + modprobe: + name: hfsplus + state: absent + when: ansible_connection != 'docker' + when: + - ubtu18cis_rule_1_1_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.5 + - hfsplus + +- name: "AUTOMATED | 1.1.1.6 | PATCH | Ensure mounting of udf filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.6 | PATCH | Ensure mounting of udf filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/udf.conf + regexp: "^(#)?install udf(\\s|$)" + line: install udf /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.6 | PATCH | Ensure mounting of udf filesystems is disabled | Disable udf" + modprobe: + name: udf + state: absent + when: ansible_connection != 'docker' + when: + - ubtu18cis_rule_1_1_1_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.6 + - udf + +- name: "AUTOMATED | 1.1.2 | PATCH | Ensure /tmp is configured" + mount: + path: /tmp + src: /tmp + state: mounted + fstype: tmpfs + opts: "{{ ubtu18cis_tmp_fstab_options }}" + when: + - ubtu18cis_rule_1_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.2 + - tmp + +- name: | + "AUTOMATED | 1.1.3 | PATCH | Ensure nodev option set on /tmp partition" + "AUTOMATED | 1.1.4 | PATCH | Ensure nosuid option set on /tmp partition" + "AUTOMATED | 1.1.5 | PATCH | Ensure noexec option set on /tmp partition" + mount: + name: /tmp + src: /tmp + state: remounted + fstype: tmpfs + opts: "{{ ubtu18cis_tmp_fstab_options }}" + when: + - ubtu18cis_rule_1_1_3 or + ubtu18cis_rule_1_1_4 or + ubtu18cis_rule_1_1_5 + - ubtu18cis_vartmp['enabled'] + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.3 + - rule_1.1.4 + - rule_1.1.5 + - tmp + +- name: "AUTOMATED | 1.1.6 | PATCH | Ensure /dev/shm is configured" + mount: + path: /dev/shm + src: /dev/shm + state: mounted + fstype: tmpfs + opts: "{{ ubtu18cis_shm_fstab_options }}" + when: + - ubtu18cis_rule_1_1_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.6 + - tmp + +- name: | + "AUTOMATED | 1.1.7 | PATCH | Ensure nodev option set on /dev/shm partition" + "AUTOMATED | 1.1.8 | PATCH | Ensure nosuid option set on /dev/shm partition" + "AUTOMATED | 1.1.9 | PATCH | Ensure noexec option set on /dev/shm partition" + mount: + name: /dev/shm + src: tmpfs + state: mounted + fstype: tmpfs + opts: "{{ ubtu18cis_shm_fstab_options }}" + when: + - ubtu18cis_rule_1_1_7 or + ubtu18cis_rule_1_1_8 or + ubtu18cis_rule_1_1_9 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.7 + - rule_1.1.8 + - rule_1.1.9 + - /dev/shm + +- name: "AUTOMATED | 1.1.10 | AUDIT | Ensure separate partition exists for /var" + block: + - name: "AUTOMATED | 1.1.10 | AUDIT | Ensure separate partition exists for /var | Gather /var partition" + shell: mount | grep "on /var " + changed_when: false + failed_when: false + args: + warn: false + register: ubtu18cis_1_1_10_var_mounted + + - name: "AUTOMATED | 1.1.10 | AUDIT | Ensure separate partition exists for /var | Alert if /var partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /var" + - "Please create a separate partition for /var" + when: ubtu18cis_1_1_10_var_mounted.stdout | length == 0 + when: + - ubtu18cis_rule_1_1_10 + tags: + - level2-server + - level2-workstation + - automated + - audit + - rule_1.1.10 + - var + +- name: "AUTOMATED | 1.1.11 | AUDIT | Ensure separate partition exists for /var/tmp" + block: + - name: "AUTOMATED | 1.1.11 | AUDIT | Ensure separate partition exists for /var/tmp | Gather /var/tmp partition" + shell: mount | grep "on /var/tmp " + changed_when: false + failed_when: false + args: + warn: false + register: ubtu18cis_1_1_11_var_tmp_mounted + + - name: "AUTOMATED | 1.1.7 | AUDIT | Ensure separate partition exists for /var/tmp | Alert if /var/tmp partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /var/tmp" + - "Please create a separate partition for /var/tmp" + when: ubtu18cis_1_1_11_var_tmp_mounted.stdout | length == 0 + when: + - ubtu18cis_rule_1_1_11 + tags: + - level2-server + - level2-workstation + - automated + - audit + - rule_1.1.11 + - var/tmp + +- name: | + "AUTOMATED | 1.1.12 | PATCH | Ensure nodev option set on /var/tmp partition" + "AUTOMATED | 1.1.13 | PATCH | Ensure nosuid option set on /var/tmp partition" + "AUTOMATED | 1.1.14 | PATCH | Ensure noexec option set on /var/tmp partition" + mount: + name: /var/tmp + src: "{{ ubtu18cis_vartmp['source'] }}" + state: present + fstype: "{{ ubtu18cis_vartmp['fstype'] }}" + opts: "{{ ubtu18cis_vartmp['opts'] }}" + + when: + - ubtu18cis_rule_1_1_12 or + ubtu18cis_rule_1_1_13 or + ubtu18cis_rule_1_1_14 + - ubtu18cis_vartmp['enabled'] + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.12 + - rule_1.1.13 + - rule_1.1.14 + - var/tmp + +- name: "AUTOMATED | 1.1.15 | AUDIT | Ensure separate partition exists for /var/log" + block: + - name: "AUTOMATED | 1.1.15 | AUDIT | Ensure separate partition exists for /var/log | Gather /var/log partition" + shell: mount | grep "on /var/log " + changed_when: false + failed_when: false + register: ubtu18cis_1_1_15_var_log_mounted + args: + warn: false + + - name: "AUTOMATED | 1.1.15 | AUDIT | Ensure separate partition exists for /var/log | Alert if /var/log partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /var/log" + - "Please create a separate partition for /var/log" + when: ubtu18cis_1_1_15_var_log_mounted.stdout | length == 0 + when: + - ubtu18cis_rule_1_1_15 + tags: + - level2-server + - level2-workstation + - automated + - audit + - rule_1.1.15 + - var/log + +- name: "AUTOMATED | 1.1.16 | AUDIT | Ensure separate partition exists for /var/log/audit" + block: + - name: "AUTOMATED | 1.1.16 | AUDIT | Ensure separate partition exists for /var/log/audit | Gather /var/log/audit" + shell: mount | grep "on /var/log/audit " + changed_when: false + failed_when: false + register: ubtu18cis_1_1_16_var_log_audit_mounted + args: + warn: false + + - name: "AUTOMATED | 1.1.16 | AUDIT | Ensure separate partition exists for /var/log/audit | Alert if /var/log/audit partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /var/log/audit" + - "Please create a separate partition for /var/log/audit" + when: ubtu18cis_1_1_16_var_log_audit_mounted.stdout | length == 0 + when: + - ubtu18cis_rule_1_1_16 + tags: + - level2-server + - level2-workstation + - automated + - audit + - rule_1.1.16 + - var/log/audit + +- name: "AUTOMATED | 1.1.17 | AUDIT | Ensure separate partition exists for /home" + block: + - name: "AUTOMATED | 1.1.17 | AUDIT | Ensure separate partition exists for /home | Gather /home" + shell: mount | grep "on /home" + changed_when: false + failed_when: false + register: ubtu18cis_1_1_17_home_mounted + args: + warn: false + + - name: "AUTOMATED | 1.1.17 | AUDIT | Ensure separate partition exists for /home | Alert if /home partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /home" + - "Please create a separate partition for /home" + when: ubtu18cis_1_1_17_home_mounted.stdout | length == 0 + when: + - ubtu18cis_rule_1_1_17 + tags: + - level2-server + - level2-workstation + - automated + - audit + - rule_1.1.17 + - /home + +- name: "AUTOMATED | 1.1.18 | PATCH | Ensure /home partition includes the nodev option" + mount: + name: "/home" + src: "{{ item.device }}" + state: mounted + fstype: "{{ item.fstype }}" + opts: "nodev" + with_items: "{{ ansible_mounts }}" + when: + - ubtu18cis_rule_1_1_18 + - item.mount == "/home" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.18 + - /home + +- name: "MANUAL | 1.1.19 | AUDIT | Ensure nodev option set on removable media partitions" + debug: + msg: "Warning!!!! Not relevant control" + when: + - ubtu18cis_rule_1_1_19 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.1.19 + - removable_media + +- name: "MANUAL | 1.1.20 | AUDIT | Ensure nosuid option set on removable media partitions" + debug: + msg: "Warning!!!! Not relevant control" + when: + - ubtu18cis_rule_1_1_20 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.1.20 + - removable_media + +- name: "MANUAL | 1.1.21 | AUDIT | Ensure noexec option set on removable media partitions" + debug: + msg: "Warning!!!! Not relevant control" + when: + - ubtu18cis_rule_1_1_21 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.1.21 + - removable_media + +- name: "AUTOMATED | 1.1.22 | PATCH | Ensure sticky bit is set on all world-writable directories" + shell: df --local -P | awk '{if (NR!=1) print $6}' | xargs -I '{}' find '{}' -xdev -type d \( -perm -0002 -a ! -perm -1000 \) 2>/dev/null | xargs -I '{}' chmod a+t '{}' + failed_when: ubtu18cis_1_1_22_status.rc>0 + register: ubtu18cis_1_1_22_status + when: + - ubtu18cis_rule_1_1_22 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.22 + - sticky_bit + +- name: "AUTOMATED | 1.1.23 | PATCH | Disable Automounting" + service: + name: autofs + state: stopped + enabled: no + when: + - ubtu18cis_rule_1_1_23 + - ubtu18cis_autofs_service_status.stdout == "loaded" + - not ubtu18cis_allow_autofs + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.23 + - automounting + +- name: "AUTOMATED | 1.1.24 | PATCH | Disable USB Storage" + block: + - name: "AUTOMATED | 1.1.24 | PATCH | Disable USB Storage | Set modprobe config" + lineinfile: + path: /etc/modprobe.d/usb_storage.conf + regexp: '^install usb-storage' + line: 'install usb-storage /bin/true' + create: yes + + - name: "AUTOMATED | 1.1.24 | PATCH | Disable USB Storage | Remove usb-storage module" + modprobe: + name: usb-storage + state: absent + when: ansible_connection != 'docker' + when: + - ubtu18cis_rule_1_1_24 + tags: + - level1-server + - level2-workstation + - automated + - patch + - rule_1.1.24 + - usb_storage diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.2.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.2.x.yml new file mode 100644 index 0000000..e54c7a4 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.2.x.yml @@ -0,0 +1,50 @@ +--- +- name: "MANUAL | 1.2.1 | AUDIT | Ensure package manager repositories are configured" + block: + - name: "MANUAL | 1.2.1 | AUDIT | Ensure package manager repositories are configured | Get repositories" + command: apt-cache policy + changed_when: false + failed_when: false + register: ubtu18cis_1_2_1_apt_policy + + - name: "MANUAL | 1.2.1 | AUDIT | Ensure package manager repositories are configured | Message out repository configs" + debug: + msg: + - "Alert!!!! Below are the apt package repositories" + - "Please review to make sure they conform to your sites policies" + - "{{ ubtu18cis_1_2_1_apt_policy.stdout_lines }}" + when: + - ubtu18cis_rule_1_2_1 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.2.1 + - apt + +- name: "MANUAL | 1.2.2 | AUDIT | Ensure GPG keys are configured" + block: + - name: "MANUAL | 1.2.2 | AUDIT | Ensure GPG keys are configured | Get apt gpg keys" + command: apt-key list + changed_when: false + failed_when: false + register: ubtu18cis_1_2_2_apt_gpgkeys + + - name: "MANUAL | 1.2.2 | AUDIT | Ensure GPG keys are configured | Message out apt gpg keys" + debug: + msg: + - "Alert!!!! Below are the apt gpg kyes configured" + - "Please review to make sure they are configured" + - "in accordance with site policy" + - "{{ ubtu18cis_1_2_2_apt_gpgkeys.stdout_lines }}" + when: + - ubtu18cis_rule_1_2_2 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.2.2 + - gpg + - keys diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.3.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.3.x.yml new file mode 100644 index 0000000..1f889a6 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.3.x.yml @@ -0,0 +1,35 @@ +--- +- name: "AUTOMATED | 1.3.1 | PATCH | Ensure AIDE is installed" + apt: + name: ['aide', 'aide-common'] + state: present + when: + - ubtu18cis_rule_1_3_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.3.1 + - aide + +- name: "AUTOMATED | 1.3.2 | PATCH | Ensure filesystem integrity is regularly checked" + cron: + name: Run AIDE integrity check + cron_file: "{{ ubtu18cis_aide_cron['cron_file'] }}" + user: "{{ ubtu18cis_aide_cron['cron_user'] }}" + minute: "{{ ubtu18cis_aide_cron['aide_minute'] | default('0') }}" + hour: "{{ ubtu18cis_aide_cron['aide_hour'] | default('5') }}" + day: "{{ ubtu18cis_aide_cron['aide_day'] | default('*') }}" + month: "{{ ubtu18cis_aide_cron['aide_month'] | default('*') }}" + weekday: "{{ ubtu18cis_aide_cron['aide_weekday'] | default('*') }}" + job: "{{ ubtu18cis_aide_cron['aide_job'] }}" + when: + - ubtu18cis_rule_1_3_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.3.2 + - cron diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.4.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.4.x.yml new file mode 100644 index 0000000..4ad7811 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.4.x.yml @@ -0,0 +1,84 @@ +--- +- name: "AUTOMATED | 1.4.1 | PATCH | Ensure permissions on bootloader config are not overridden" + block: + - name: "AUTOMATED | 1.4.1 | PATCH | Ensure permissions on bootloader config are not overridden | Change chmod setting" + replace: + path: /usr/sbin/grub-mkconfig + regexp: 'chmod\s\d\d\d\s\${grub_cfg}.new' + replace: 'chmod 400 ${grub_cfg}.new' + + - name: "AUTOMATED | 1.4.1 | PATCH | Ensure permissions on bootloader config are not overridden | Remove check on password" + lineinfile: + path: /usr/sbin/grub-mkconfig + regexp: 'if \[ \"x\$\{grub_cfg\}\" != "x" \] && ! grep "\^password" \${grub_cfg}.new' + line: if [ "x${grub_cfg}" != "x" ]; then + when: + - ubtu18cis_rule_1_4_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.4.1 + - grub + +# --------------- +# --------------- +# The RHEL7 based control for this does not use a valid module +# I need to research best way to set grub pw for Ubuntu using the +# grub-mkpasswd-pbkdf2 command and passing the data at the same time. +# --------------- +# --------------- +- name: "AUTOMATED | 1.4.2 | PATCH | Ensure bootloader password is set" + command: /bin/true + changed_when: false + failed_when: false + when: + - ubtu18cis_rule_1_4_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.4.2 + - grub + - notimplemented + +- name: "AUTOMATED | 1.4.3 | PATCH | Ensure permissions on bootloader config are configured" + block: + - name: "AUTOMATED | 1.4.3 | AUDIT | Ensure permissions on bootloader config are configured | Check for Grub file" + stat: + path: /boot/grub/grub.cfg + register: ubtu18cis_1_4_3_grub_cfg_status + + - name: "AUTOMATED | 1.4.3 | PATCH | Ensure permissions on bootloader config are configured | Set permissions" + file: + path: /boot/grub/grub.cfg + owner: root + group: root + mode: 0400 + when: + - ubtu18cis_1_4_3_grub_cfg_status.stat.exists + when: + - ubtu18cis_rule_1_4_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.4.3 + - grub + +- name: "AUTOMATED | 1.4.4 | PATCH | Ensure authentication required for single user mode" + user: + name: root + password: "{{ ubtu18cis_root_pw }}" + when: + - ubtu18cis_rule_1_4_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.4.4 + - passwd diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.5.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.5.x.yml new file mode 100644 index 0000000..6d7efe8 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.5.x.yml @@ -0,0 +1,94 @@ +--- +- name: "MANUAL | 1.5.1 | AUDIT | Ensure XD/NX support is enabled" + block: + - name: "MANUAL | 1.5.1 | AUDIT | Ensure XD/NX support is enabled | Find status of XD/NX" + shell: "journalctl | grep 'protection: active'" + changed_when: false + failed_when: false + register: ubtu18cis_1_5_1_xdnx_status + + - name: "MANUAL | 1.5.1 | AUDIT | Ensure XD/NX support is enabled | Alert if XD/NX is not enabled" + debug: + msg: + - "ALERT!!!!You do not have XD/NX (Execute Disable/No Execute) enabled" + - "To conform to CIS standards this needs to be enabled" + when: "'active' not in ubtu18cis_1_5_1_xdnx_status.stdout" + when: + - ubtu18cis_rule_1_5_1 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.5.1 + - xd/nx + +- name: "AUTOMATED | 1.5.2 | PATCH | Ensure address space layout randomization (ASLR) is enabled" + block: + - name: "AUTOMATED | 1.5.2 | PATCH | Ensure address space layout randomization (ASLR) is enabled | Set ASLR settings" + lineinfile: + path: /etc/sysctl.conf + regexp: '^kernel.randomize_va_space' + line: 'kernel.randomize_va_space = 2' + + - name: "AUTOMATED | 1.5.2 | PATCH | Ensure address space layout randomization (ASLR) is enabled | Set active kernel parameter" + sysctl: + name: kernel.randomize_va_space + value: '2' + when: + - ubtu18cis_rule_1_5_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.5.2 + - aslr + +- name: "AUTOMATED | 1.5.3 | PATCH | Ensure prelink is disabled" + block: + - name: "AUTOMATED | 1.5.3 | PATCH | Ensure prelink is disabled | Restore binaries to normal" + command: prelink -ua + changed_when: false + failed_when: false + + - name: "AUTOMATED | 1.5.3 | PATCH | Ensure prelink is disabled | Remove prelink package" + apt: + name: prelink + state: absent + when: + - ubtu18cis_rule_1_5_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.5.3 + - prelink + +- name: "AUTOMATED | 1.5.4 | PATCH | Ensure core dumps are restricted" + block: + - name: "AUTOMATED | 1.5.4 | PATCH | Ensure core dumps are restricted" + lineinfile: + path: /etc/security/limits.conf + regexp: '^#?\\*.*core' + line: '* hard core 0' + insertbefore: '^# End of file' + + - name: "AUTOMATED | 1.5.4 | PATCH | Ensure core dumps are restricted" + sysctl: + name: fs.suid_dumpable + value: '0' + state: present + reload: yes + sysctl_set: yes + ignoreerrors: yes + when: + - ubtu18cis_rule_1_5_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.5.4 + - coredump diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.6.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.6.x.yml new file mode 100644 index 0000000..5f79dcf --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.6.x.yml @@ -0,0 +1,81 @@ +--- +- name: "AUTOMATED | 1.6.1.1 | PATCH | Ensure AppArmor is installed" + apt: + name: ['apparmor', 'apparmor-utils'] + state: present + when: + - ubtu18cis_rule_1_6_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.6.1.1 + - apparmor + +- name: "AUTOMATED | 1.6.1.2 | PATCH | Ensure AppArmor is enabled in the bootloader configuration" + block: + - name: "AUTOMATED | 1.6.1.2 | AUDIT | Ensure AppArmor is enabled in the bootloader configuration | Get current settings" + shell: grep "GRUB_CMDLINE_LINUX=" /etc/default/grub | cut -f2 -d'"' + changed_when: false + failed_when: false + register: ubtu18cis_1_6_1_2_cmdline_settings + + - name: "AUTOMATED | 1.6.1.2 | PATCH | Ensure AppArmor is enabled in the bootloader configuration | Set apparmor settings if none exist" + lineinfile: + path: /etc/default/grub + regexp: '^GRUB_CMDLINE_LINUX' + line: 'GRUB_CMDLINE_LINUX="apparmor=1 security=apparmor {{ ubtu18cis_1_6_1_2_cmdline_settings.stdout }}"' + insertafter: '^GRUB_' + when: + - "'apparmor' not in ubtu18cis_1_6_1_2_cmdline_settings.stdout" + - "'security' not in ubtu18cis_1_6_1_2_cmdline_settings.stdout" + notify: grub update + + - name: "AUTOMATED | 1.6.1.2 | PATCH | Ensure AppArmor is enabled in the bootloader configuration | Set apparmor settings if none exist | Replace apparmor settings when exists" + replace: + path: /etc/default/grub + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - { regexp: 'apparmor=\S+', replace: 'apparmor=1' } + - { regexp: 'security=\S+', replace: 'security=apparmor' } + when: + - "'apparmor' in ubtu18cis_1_6_1_2_cmdline_settings.stdout" + - "'security' in ubtu18cis_1_6_1_2_cmdline_settings.stdout" + notify: grub update + when: + - ubtu18cis_rule_1_6_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.6.1.2 + - apparmor + +- name: "AUTOMATED | 1.6.1.3 | PATCH | Ensure all AppArmor Profiles are in enforce or complain mode" + command: aa-enforce /etc/apparmor.d/* + failed_when: false + when: + - ubtu18cis_rule_1_6_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.1.3 + - apparmor + +- name: "AUTOMATED | 1.6.1.4 | PATCH | Ensure all AppArmor Profiles are enforcing" + command: aa-enforce /etc/apparmor.d/* + failed_when: false + when: + - ubtu18cis_rule_1_6_1_4 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_1.6.1.4 + - apparmor diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.7.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.7.x.yml new file mode 100644 index 0000000..95118a6 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.7.x.yml @@ -0,0 +1,93 @@ +--- +- name: "AUTOMATED | 1.7.1 | PATCH | Ensure message of the day is configured properly" + template: + src: etc/motd.j2 + dest: /etc/motd + when: + - ubtu18cis_rule_1_7_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.1 + - motd + +- name: "AUTOMATED | 1.7.2 | PATCH | Ensure permissions on /etc/issue.net are configured" + file: + path: /etc/issue.net + owner: root + group: root + mode: 0644 + when: + - ubtu18cis_rule_1_7_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.2 + - permissions + - banner + +- name: "AUTOMATED | 1.7.3 | PATCH | Ensure permissions on /etc/issue are configured" + file: + path: /etc/issue + owner: root + group: root + mode: 0644 + when: + - ubtu18cis_rule_1_7_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.3 + - permissions + - banner + +- name: "AUTOMATED | 1.7.4 | PATCH | Ensure permissions on /etc/motd are configured" + file: + path: /etc/motd + owner: root + group: root + mode: 0644 + when: + - ubtu18cis_rule_1_7_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.4 + - permissions + - motd + +- name: "AUTOMATED | 1.7.5 | PATCH | Ensure remote login warning banner is configured properly" + template: + src: etc/issue.net.j2 + dest: /etc/issue.net + when: + - ubtu18cis_rule_1_7_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.5 + - banner + +- name: "AUTOMATED | 1.7.6 | PATCH | Ensure local login warning banner is configured properly" + template: + src: etc/issue.j2 + dest: /etc/issue + when: + - ubtu18cis_rule_1_7_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.6 + - banner diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.8.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.8.x.yml new file mode 100644 index 0000000..9c8b157 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.8.x.yml @@ -0,0 +1,77 @@ +--- +- name: "MANUAL | 1.8.1 | PATCH | Ensure GNOME Display Manager is removed" + apt: + name: gdm3 + state: absent + when: + - ubtu18cis_rule_1_8_1 + - not ubtu18cis_desktop_required + tags: + - level2-server + - manual + - patch + - rule_1.8.1 + - gnome + +- name: "AUTOMATED | 1.8.2 | PATCH | Ensure GDM login banner is configured" + lineinfile: + path: /etc/gdm3/greeter.dconf-defaults + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + create: yes + owner: root + group: root + mode: 0644 + notify: reload gdm + with_items: + - { regexp: '\[org\/gnome\/login-screen\]', line: '[org/gnome/login-screen]', insertafter: EOF } + - { regexp: 'banner-message-enable', line: 'banner-message-enable=true', insertafter: '\[org\/gnome\/login-screen\]'} + - { regexp: 'banner-message-text', line: 'banner-message-text={{ ubtu18cis_warning_banner }}', insertafter: 'banner-message-enable' } + when: + - ubtu18cis_rule_1_8_2 + - ubtu18cis_desktop_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.8.2 + - banner + +- name: "AUTOMATED | 1.8.3 | PATCH | Ensure disable-user-list is enabled" + lineinfile: + path: /etc/gdm3/greeter.dconf-defaul + regexp: '^disable-user-list=' + line: 'disable-user-list=true' + insertafter: 'banner-message-text=' + create: yes + owner: root + group: root + mode: 0644 + notify: reload gdm + when: + - ubtu18cis_rule_1_8_3 + - ubtu18cis_desktop_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.8.3 + - gdm3 + +- name: "AUTOMATED | 1.8.4 | PATCH | Ensure XDCMP is not enabled" + lineinfile: + path: /etc/gdm3/custom.conf + regexp: '^Enable=true' + state: absent + when: + - ubtu18cis_rule_1_8_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.8.4 + - xdcmp diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.9.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.9.yml new file mode 100644 index 0000000..8f3d0bf --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/cis_1.9.yml @@ -0,0 +1,14 @@ +--- +- name: "MANUAL | 1.9 | PATCH | Ensure updates, patches, and additional security software are installed" + apt: + name: "*" + state: latest + when: + - ubtu18cis_rule_1_9 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_1.9 + - patching diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/main.yml new file mode 100644 index 0000000..0ac3943 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_1/main.yml @@ -0,0 +1,27 @@ +--- +- name: "SECTION | 1.1 | Disable Unused Filesystems" + include: cis_1.1.x.yml + +- name: "SECTION | 1.2 | Configure Software Updates" + include: cis_1.2.x.yml + +- name: "SECTION | 1.3. | Filesystem Integrity Checking" + include: cis_1.3.x.yml + +- name: "SECTION | 1.4 | Secure Boot Settings" + include: cis_1.4.x.yml + +- name: "SECTION | 1.5 | Additional Process Hardening" + include: cis_1.5.x.yml + +- name: "SECTION | 1.6 | Mandatory Access Control" + include: cis_1.6.x.yml + +- name: "SECTION | 1.7 | Command Line Warning Banners" + include: cis_1.7.x.yml + +- name: "SECTION | 1.8 | GNOME Display Manager" + include: cis_1.8.x.yml + +- name: "SECTION | 1.9 | Ensure updates, patches, and additional security software are installed" + include: cis_1.9.yml diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.1.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.1.x.yml new file mode 100644 index 0000000..c25c7f9 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.1.x.yml @@ -0,0 +1,407 @@ +--- +- name: "AUTOMATED | 2.1.1.1 | PATCH | Ensure time synchronization is in use" + apt: + name: "{{ ubtu18cis_time_sync_tool }}" + state: present + when: + - ubtu18cis_rule_2_1_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.1.1 + - chrony + +- name: "MANUAL | 2.1.1.2 | PATCH | Ensure systemd-timesyncd is configured" + block: + - name: "MANUAL | 2.1.1.2 | PATCH | Ensure systemd-timesyncd is configured | Set configuration for systemd-timesyncd" + lineinfile: + path: /etc/systemd/timesyncd.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: '^\[Time\]', line: '[Time]', insertafter: EOF } + - { regexp: '^#NTP|^NTP', line: 'NTP=0.ubuntu.pool.ntp.org 1.ubuntu.pool.ntp.org 2.ubuntu.pool.ntp.org', insertafter: '\[Time\]' } + - { regexp: '^#FallbackNTP|^FallbackNTP', line: 'FallbackNTP=ntp.ubuntu.com 3.ubuntu.pool.ntp.org', insertafter: '\[Time\]' } + - { regexp: '^#RootDistanceMaxSec|^RootDistanceMaxSec', line: 'RootDistanceMaxSec=1', insertafter: '\[Time\]'} + + - name: "MANUAL | 2.1.1.2 | PATCH | Ensure systemd-timesyncd is configured | Start and enable the systemd-timesyncd service" + service: + name: systemd-timesyncd.service + state: started + enabled: yes + + - name: "MANUAL | 2.1.1.2 | PATCH | Ensure systemd-timesyncd is configured | Set timedatectl to ntp" + command: timedatectl set-ntp true + when: + - ubtu18cis_rule_2_1_1_2 + tags: + - level1-server + - level1-workstation + - notscored + - patch + - rule_2.1.1.2 + - systemd-timesyncd + +- name: "AUTOMATED | 2.1.1.3 | PATCH | Ensure chrony is configured" + block: + - name: "AUTOMATED | 2.1.1.3 | AUDIT | Ensure chrony is configured | Check for chrony user" + shell: grep chrony /etc/passwd + changed_when: false + failed_when: false + register: ubtu18cis_2_1_1_3_chrony_user_status + + - name: "AUTOMATED | 2.1.1.3 | PATCH | Ensure chrony is configured | Set chrony.conf file" + template: + src: chrony.conf.j2 + dest: /etc/chrony/chrony.conf + owner: root + group: root + mode: 0644 + + - name: "AUTOMATED | 2.1.1.3 | PATCH | Ensure chrony is configured | Create chrony user" + user: + name: chrony + shell: /usr/sbin/nologin + system: true + when: ubtu18cis_2_1_1_3_chrony_user_status.stdout | length > 0 + + - name: "AUTOMATED | 2.1.1.3 | PATCH | Ensure chrony is configured | Set option to use chrony user" + lineinfile: + path: /etc/default/chrony + regexp: '^DAEMON_OPTS' + line: 'DAEMON_OPTS="-u chrony"' + when: + - ubtu18cis_rule_2_1_1_3 + - ubtu18cis_time_sync_tool == "chrony" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.1.3 + - chrony + +- name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured" + block: + - name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured | Set ntp.conf settings" + template: + src: ntp.conf.j2 + dest: /etc/ntp.conf + owner: root + group: root + mode: 0644 + + - name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured | Modify sysconfig/ntpd" + lineinfile: + path: /etc/sysconfig/ntpd + regexp: "{{ item.regexp }}" + line: "{{ item. line }}" + create: yes + with_items: + - { regexp: '^OPTIONS', line: 'OPTIONS="-u ntp:ntp"'} + - { regexp: '^NTPD_OPTIONS', line: 'NTPD_OPTIONS="-u ntp:ntp"' } + + - name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured | Modify /etc/init.d/npt" + lineinfile: + path: /etc/init.d/ntp + regexp: '^RUNAUSER' + line: 'RUNAUSER=npt' + when: + - ubtu18cis_rule_2_1_1_4 + - ubtu18cis_time_sync_tool == "ntp" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.1.4 + - ntp + +- name: "AUTOMATED | 2.1.2 | PATCH | Ensure X Window System is not installed" + apt: + name: xserver-xorg* + state: absent + when: + - ubtu18cis_rule_2_1_2 + - not ubtu18cis_desktop_required + tags: + - level1-server + - automated + - patch + - rule_2.1.2 + - xwindows + +- name: "AUTOMATED | 2.1.3 | PATCH | Ensure Avahi Server is not installed" + block: + - name: "AUTOMATED | 2.1.3 | PATCH | Ensure Avahi Server is not installed | Stop and disable Avahi service" + service: + name: avahi-daemon + state: stopped + enabled: no + when: avahi_service_status.stdout == "loaded" + + - name: "AUTOMATED | 2.1.3 | PATCH | Ensure Avahi Server is not installed | Uninstall Avahi service" + apt: + name: avahi-daemon + state: absent + when: + - ubtu18cis_rule_2_1_3 + - not ubtu18cis_avahi_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.3 + - avahi + - services + +- name: "AUTOMATED | 2.1.4 | PATCH | Ensure CUPS is not installed" + apt: + name: cups + state: absent + when: + - ubtu18cis_rule_2_1_4 + - not ubtu18cis_cups_server + tags: + - level1-server + - level2-workstation + - automated + - patch + - rule_2.1.4 + - cups + - services + +- name: "AUTOMATED | 2.1.5 | PATCH | Ensure DHCP Server is not installed" + apt: + name: isc-dhcp-server + state: absent + when: + - ubtu18cis_rule_2_1_5 + - not ubtu18cis_dhcp_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.5 + - dhcp + - services + +- name: "AUTOMATED | 2.1.6 | PATCH | Ensure LDAP server is not installed" + apt: + name: slapd + state: absent + when: + - ubtu18cis_rule_2_1_6 + - not ubtu18cis_ldap_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.6 + - ldap + - services + +- name: "AUTOMATED | 2.1.7 | PATCH | Ensure NFS is not installed" + apt: + name: nfs-kernel-server + state: absent + when: + - ubtu18cis_rule_2_1_7 + - not ubtu18cis_nfs_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.7 + - nfs + - services + +- name: "AUTOMATED | 2.1.8 | PATCH | Ensure DNS Server is not installed" + apt: + name: bind9 + state: absent + when: + - ubtu18cis_rule_2_1_8 + - not ubtu18cis_dns_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.8 + - dns + - service + +- name: "AUTOMATED | 2.1.9 | PATCH | Ensure FTP Server is not installed" + apt: + name: vsftpd + state: absent + when: + - ubtu18cis_rule_2_1_9 + - not ubtu18cis_vsftpd_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.9 + - ftp + - service + +- name: "AUTOMATED | 2.1.10 | PATCH | Ensure HTTP server is not installed" + apt: + name: apache2 + state: absent + when: + - ubtu18cis_rule_2_1_10 + - not ubtu18cis_httpd_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.10 + - httpd + - service + +- name: "AUTOMATED | 2.1.11 | PATCH | Ensure IMAP and POP3 server are not installed" + apt: + name: ['dovecot-imapd', 'dovecot-pop3d'] + state: absent + when: + - ubtu18cis_rule_2_1_11 + - not ubtu18cis_dovecot_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.11 + - dovecot + - service + +- name: "AUTOMATED | 2.1.12 | PATCH | Ensure Samba is not installed" + apt: + name: samba + state: absent + when: + - ubtu18cis_rule_2_1_12 + - not ubtu18cis_smb_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.12 + - samba + - service + +- name: "AUTOMATED | 2.1.13 | PATCH | Ensure HTTP Proxy Server is not installed" + apt: + name: squid + state: absent + when: + - ubtu18cis_rule_2_1_13 + - not ubtu18cis_squid_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.13 + - http_proxy + - service + +- name: "AUTOMATED | 2.1.14 | PATCH | Ensure SNMP Server is not installed" + apt: + name: snmpd + state: absent + when: + - ubtu18cis_rule_2_1_14 + - not ubtu18cis_snmp_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.14 + - snmp + - service + +- name: "AUTOMATED | 2.1.15 | PATCH | Ensure mail transfer agent is configured for local-only mode" + block: + - name: "AUTOMATED | 2.1.15 | PATCH | Ensure mail transfer agent is configured for local-only mode | For postfix" + lineinfile: + path: /etc/postfix/main.cf + regexp: '^(#)?inet_interfaces' + line: 'inet_interfaces = loopback-only' + notify: restart postfix + when: "'postfix' in ansible_facts.packages" + + - name: "AUTOMATED | 2.1.15 | PATCH | Ensure mail transfer agent is configured for local-only mode | for exim4" + lineinfile: + path: /etc/exim4/update-exim4.conf.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + notify: restart exim4 + with_items: + - { regexp: '^dc_eximconfig_configtype=', line: dc_eximconfig_configtype='local' } + - { regexp: '^dc_local_interfaces=', line: "dc_local_interfaces='127.0.0.1 ; ::1'" } + - { regexp: '^dc_readhost=', line: dc_readhost='' } + - { regexp: '^dc_relay_domains=', line: dc_relay_domains='' } + - { regexp: '^dc_minimaldns=', line: dc_minimaldns='false' } + - { regexp: '^dc_relay_nets=', line: dc_relay_nets='' } + - { regexp: '^dc_smarthost=', line: dc_smarthost='' } + - { regexp: '^dc_use_split_config=', line: dc_use_split_config='false' } + - { regexp: '^dc_hide_mailname=', line: dc_hide_mailname='' } + - { regexp: '^dc_mailname_in_oh=', line: dc_mailname_in_oh='true' } + - { regexp: '^dc_localdelivery=', line: dc_localdelivery='mail_spool' } + when: "'exim4' in ansible_facts.packages" + when: + - ubtu18cis_rule_2_1_15 + - ubtu18cis_mail_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.15 + - postfix + +- name: "AUTOMATED | 2.1.16 | PATCH | Ensure rsync service is not installed" + apt: + name: rsync + state: absent + purge: yes + when: + - ubtu18cis_rule_2_1_16 + - not ubtu18cis_rsync_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.16 + - rsync + +- name: "AUTOMATED | 2.1.17 | PATCH | Ensure NIS Server is not enabled" + apt: + name: nis + state: absent + when: + - ubtu18cis_rule_2_1_17 + - not ubtu18cis_nis_server + tags: + - level1-server + - level1-workstation + - automated + - rule_2.1.17 + - nis + - service diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.2.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.2.x.yml new file mode 100644 index 0000000..bdca487 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.2.x.yml @@ -0,0 +1,91 @@ +--- +- name: "AUTOMATED | 2.2.1 | PATCH | Ensure NIS Client is not installed" + apt: + name: nis + state: absent + when: + - ubtu18cis_rule_2_2_1 + - not ubtu18cis_nis_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.1 + - nis + +- name: "AUTOMATED | 2.2.2 | PATCH | Ensure rsh client is not installed" + apt: + name: rsh-client + state: absent + when: + - ubtu18cis_rule_2_2_2 + - not ubtu18cis_rsh_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.2 + - rsh + +- name: "AUTOMATED | 2.2.3 | PATCH | Ensure talk client is not installed" + apt: + name: talk + state: absent + when: + - ubtu18cis_rule_2_2_3 + - not ubtu18cis_talk_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.3 + - talk + +- name: "AUTOMATED | 2.2.4 | PATCH | Ensure telnet client is not installed" + apt: + name: telnet + state: absent + when: + - ubtu18cis_rule_2_2_4 + - not ubtu18cis_telnet_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.4 + - telnet + +- name: "AUTOMATED | 2.2.5 | PATCH | Ensure LDAP client is not installed" + apt: + name: ldap-utils + state: absent + when: + - ubtu18cis_rule_2_2_5 + - not ubtu18cis_ldap_clients_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.5 + - ldap + +- name: "AUTOMATED | 2.2.6 | PATCH | Ensure RPC is not installed" + apt: + name: rpcbind + state: absent + when: + - ubtu18cis_rule_2_2_6 + - not ubtu18cis_rpc_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.7 + - rpc + - services diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.3.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.3.yml new file mode 100644 index 0000000..1f8e5cf --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/cis_2.3.yml @@ -0,0 +1,23 @@ +--- +- name: "MANUAL | 2.3 | AUDIT | Ensure nonessential services are removed or masked" + block: + - name: "MANUAL | 2.3 | AUDIT | Ensure nonessential services are removed or masked | Get list of all services" + shell: lsof -i -P -n | grep -v "(ESTABLISHED)" + changed_when: false + failed_when: false + register: ubtu18cis_2_3_services + + - name: "MANUAL | 2.3 | AUDIT | Ensure nonessential services are removed or masked | Display services" + debug: + msg: + - "Alert! Below are the list of services. Please make sure all are required and remove any non-required services" + - "{{ ubtu18cis_2_3_services.stdout_lines }}" + when: + - ubtu18cis_rule_2_3 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_2.3 + - nonessential_services diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/main.yml new file mode 100644 index 0000000..92bdbeb --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_2/main.yml @@ -0,0 +1,9 @@ +--- +- name: "SECTION | 2.1 | Special Purpose Services" + include: cis_2.1.x.yml + +- name: "SECTION | 2.2 | Service Clients" + include: cis_2.2.x.yml + +- name: "SECTION | 2.3 | Ensure nonessential services are removed or masked" + include: cis_2.3.yml diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.1.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.1.x.yml new file mode 100644 index 0000000..e6ebcac --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.1.x.yml @@ -0,0 +1,49 @@ +--- +- name: "MANUAL | 3.1.1 | PATCH | Disable IPv6" + lineinfile: + path: /etc/default/grub + regexp: '^(GRUB_CMDLINE_LINUX=.*(?!.*ipv6\.disable=1)\"[^\"]+)(\".*)' + line: '\1 ipv6.disable=1\2' + backrefs: yes + notify: grub update + when: + - ubtu18cis_rule_3_1_1 + - not ubtu18cis_ipv6_required + tags: + - level2-server + - level2-workstation + - manual + - patch + - rule_3.1.1 + - ipv6 + +- name: "AUTOMATED | 3.1.2 | PATCH | Ensure wireless interfaces are disabled" + block: + - name: "AUTOMATED | 3.1.2 | AUDIT | Ensure wireless interfaces are disabled | Check if nmcli command is available" + command: dpkg -s network-manager + args: + warn: no + check_mode: false + changed_when: false + register: ubtu18cis_nmcli_available + failed_when: no + + - name: "AUTOMATED | 3.1.2 | AUDIT | Ensure wireless interfaces are disabled | Check if wifi is enabled" + command: nmcli radio wifi + register: ubtu18cis_wifi_enabled + check_mode: false + changed_when: ubtu18cis_wifi_enabled.stdout != "disabled" + when: ubtu18cis_nmcli_available.rc == 0 + + - name: "AUTOMATED | 3.1.2 | PATCH | Ensure wireless interfaces are disabled | Disable wifi if enabled" + command: nmcli radio wifi off + when: ubtu18cis_wifi_enabled is changed + when: + - ubtu18cis_rule_3_1_2 + tags: + - level1-server + - level2-workstation + - automated + - patch + - rule_3.1.2 + - wireless diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.2.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.2.x.yml new file mode 100644 index 0000000..40820ac --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.2.x.yml @@ -0,0 +1,60 @@ +--- +- name: "AUTOMATED | 3.2.1 | PATCH | Ensure packet redirect sending is disabled" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.send_redirects + - net.ipv4.conf.default.send_redirects + notify: sysctl flush ipv4 route table + when: + - ubtu18cis_rule_3_2_1 + - not ubtu18cis_is_router + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.2.1 + - packet_redirect + - sysctl + +- name: "AUTOMATED | 3.2.2 | PATCH | Ensure IP forwarding is disabled" + block: + - name: "AUTOMATED | 3.2.2 | PATCH | Ensure IP forwarding is disabled | IPv4 settings" + sysctl: + name: net.ipv4.ip_forward + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: + - sysctl flush ipv4 route table + + - name: "AUTOMATED | 3.2.2 | PATCH | Ensure IP forwarding is disabled | IPv6 settings" + sysctl: + name: net.ipv6.conf.all.forwarding + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: + - sysctl flush ipv6 route table + when: ubtu18cis_ipv6_required + when: + - ubtu18cis_rule_3_2_2 + - not ubtu18cis_is_router + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.2.2 + - ip_forwarding + - sysctl diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.3.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.3.x.yml new file mode 100644 index 0000000..002949b --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.3.x.yml @@ -0,0 +1,233 @@ +--- +- name: "AUTOMATED | 3.3.1 | PATCH | Ensure source routed packets are not accepted" + block: + - name: "AUTOMATED | 3.3.1 | PATCH | Ensure source routed packets are not accepted | IPv4 settings" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.accept_source_route + - net.ipv4.conf.default.accept_source_route + notify: sysctl flush ipv4 route table + + - name: "AUTOMATED | 3.3.1 | PATCH | Ensure source routed packets are not accepted | IPv6 settings" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv6.conf.all.accept_source_route + - net.ipv6.conf.default.accept_source_route + notify: sysctl flush ipv6 route table + when: ubtu18cis_ipv6_required + when: + - ubtu18cis_rule_3_3_1 + - not ubtu18cis_is_router + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.1 + - routed_packets + - sysctl + +- name: "AUTOMATED | 3.3.2 | PATCH | Ensure ICMP redirects are not accepted" + block: + - name: "AUTOMATED | 3.3.2 | PATCH | Ensure ICMP redirects are not accepted | IPv4 settings" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.accept_redirects + - net.ipv4.conf.default.accept_redirects + notify: sysctl flush ipv4 route table + + - name: "AUTOMATED | 3.3.2 | PATCH | Ensure ICMP redirects are not accepted | IPv6 settings" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv6.conf.all.accept_redirects + - net.ipv6.conf.default.accept_redirects + notify: sysctl flush ipv6 route table + when: ubtu18cis_ipv6_required + when: + - ubtu18cis_rule_3_3_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.2 + - icmp + - sysctl + +- name: "AUTOMATED | 3.3.3 | PATCH | Ensure secure ICMP redirects are not accepted" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.secure_redirects + - net.ipv4.conf.default.secure_redirects + notify: sysctl flush ipv4 route table + when: + - ubtu18cis_rule_3_3_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.3 + - icmp + - sysctl + +- name: "AUTOMATED | 3.3.4 | PATCH | Ensure suspicious packets are logged" + sysctl: + name: "{{ item }}" + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.log_martians + - net.ipv4.conf.default.log_martians + notify: sysctl flush ipv4 route table + when: + - ubtu18cis_rule_3_3_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.4 + - suspicious_packets + - sysctl + +- name: "AUTOMATED | 3.3.5 | PATCH | Ensure broadcast ICMP requests are ignored" + sysctl: + name: net.ipv4.icmp_echo_ignore_broadcasts + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - ubtu18cis_rule_3_3_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.5 + - icmp + - sysctl + +- name: "AUTOMATED | 3.3.6 | PATCH | Ensure bogus ICMP responses are ignored" + sysctl: + name: net.ipv4.icmp_ignore_bogus_error_responses + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - ubtu18cis_rule_3_3_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.6 + - icmp + - sysctl + +- name: "AUTOMATED | 3.3.7 | PATCH | Ensure Reverse Path Filtering is enabled" + sysctl: + name: "{{ item }}" + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.rp_filter + - net.ipv4.conf.default.rp_filter + notify: sysctl flush ipv4 route table + when: + - ubtu18cis_rule_3_3_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.7 + - reverse_path_filtering + - sysctl + +- name: "AUTOMATED | 3.3.8 | PATCH | Ensure TCP SYN Cookies is enabled" + sysctl: + name: net.ipv4.tcp_syncookies + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - ubtu18cis_rule_3_3_8 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.8 + - tcp_syn_cookies + - sysctl + +- name: "AUTOMATED | 3.3.9 | PATCH | Ensure IPv6 router advertisements are not accepted" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv6.conf.all.accept_ra + - net.ipv6.conf.default.accept_ra + notify: sysctl flush ipv6 route table + when: + - ubtu18cis_rule_3_3_9 + - ubtu18cis_ipv6_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.9 + - ipv6 + - router_advertisements + - sysctl diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.4.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.4.x.yml new file mode 100644 index 0000000..c20ab1a --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.4.x.yml @@ -0,0 +1,64 @@ +--- +- name: "SCORED | 3.4.1 | PATCH | Ensure DCCP is disabled" + lineinfile: + path: /etc/modprobe.d/dccp.conf + regexp: '^(#)?install dccp(\\s|$)' + line: 'install dccp /bin/true' + create: yes + when: + - ubtu18cis_rule_3_4_1 + tags: + - level2-server + - level2-workstation + - scored + - patch + - rule_3.4.1 + - DCCP + +- name: "SCORED | 3.4.2 | PATCH | Ensure SCTP is disabled" + lineinfile: + path: /etc/modprobe.d/sctp.conf + regexp: "^(#)?install sctp(\\s|$)" + line: 'install sctp /bin/true' + create: yes + when: + - ubtu18cis_rule_3_4_2 + tags: + - level2-server + - level2-workstation + - scored + - patch + - rule_3.4.2 + - sctp + +- name: "SCORED | 3.4.3 | PATCH | Ensure RDS is disabled" + lineinfile: + path: /etc/modprobe.d/rds.conf + regexp: '^(#)?install rds(\\s|$)' + line: 'install rds /bin/true' + create: yes + when: + - ubtu18cis_rule_3_4_3 + tags: + - level2-server + - level2-workstation + - scored + - patch + - rule_3.4.3 + - rds + +- name: "SCORED | 3.4.4 | PATCH | Ensure TIPC is disabled" + lineinfile: + path: /etc/modprobe.d/tipc.conf + regexp: '^(#)?install tipc(\\s|$)' + line: 'install tipc /bin/true' + create: yes + when: + - ubtu18cis_rule_3_4_4 + tags: + - level2-server + - level2-workstation + - scored + - patch + - rule_3.4.4 + - tipc diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.5.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.5.x.yml new file mode 100644 index 0000000..5f45132 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/cis_3.5.x.yml @@ -0,0 +1,798 @@ +--- +- name: "AUTOMATED | 3.5.1.1 | PATCH | Ensure ufw is installed" + block: + - name: "AUTOMATED | 3.5.1.1 | PATCH | Ensure ufw is installed | Install firewall package" + apt: + name: ufw + state: present + + - name: "AUTOMATED | 3.5.1.1 | PATCH | Ensure ufw is installed | Adjust sysctl.conf for UFW" + lineinfile: + path: /etc/default/ufw + regexp: '^IPT_SYSCTL=' + line: 'IPT_SYSCTL=/etc/sysctl.conf' + when: ubtu18cis_ufw_system_sysctlconf + when: + - ubtu18cis_rule_3_5_1_1 + - ubtu18cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.1 + - apt + - firewall + +- name: "AUTOMATED | 3.5.1.2 | PATCH | Ensure iptables-persistent is not installed with ufw" + apt: + name: iptables-persistent + state: absent + when: + - ubtu18cis_rule_3_5_1_2 + - ubtu18cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.2 + - apt + - firewall + + +# Adding the allow OpenSSH rule while enabling ufw to allow ansible to run after enabling +- name: "AUTOMATED | 3.5.1.3 | PATCH | Ensure ufw service is enabled" + ufw: + rule: allow + name: OpenSSH + state: enabled + when: + - ubtu18cis_rule_3_5_1_3 + - ubtu18cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.3 + - ufw + +- name: "AUTOMATED | 3.5.1.4 | PATCH | Ensure loopback traffic is configured" + block: + - name: "AUTOMATED | 3.5.1.4 | PATCH | Ensure loopback traffic is configured | Set allow ufw rules" + ufw: + rule: allow + direction: in + interface: lo + notify: reload ufw + + - name: "AUTOMATED | 3.5.1.4 | PATCH | Ensure loopback traffic is configured | Set deny ufw rules IPv4" + ufw: + rule: deny + direction: in + from_ip: 127.0.0.0/8 + notify: reload ufw + + - name: "AUTOMATED | 3.5.1.4 | PATCH | Ensure loopback traffic is configured | Set deny ufw rules IPv6" + ufw: + rule: deny + direction: in + from_ip: "::1" + notify: reload ufw + when: ubtu18cis_ipv6_required + when: + - ubtu18cis_rule_3_5_1_4 + - ubtu18cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.4 + - ufw + +- name: "MANUAL | 3.5.1.5 | PATCH | Ensure outbound connections are configured" + block: + - name: "MANUAL | 3.5.1.5 | PATCH | Ensure outbound connections are configured | Custom ports" + ufw: + rule: allow + direction: out + to_port: '{{ item }}' + with_items: + - "{{ ubtu18cis_ufw_allow_out_ports }}" + notify: reload ufw + when: ubtu18cis_ufw_allow_out_ports != "all" + + - name: "MANUAL | 3.5.1.5 | PATCH | Ensure outbound connections are configured | Allow all" + ufw: + default: allow + direction: outgoing + notify: reload ufw + when: "'all' in ubtu18cis_ufw_allow_out_ports" + when: + - ubtu18cis_rule_3_5_1_5 + - ubtu18cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_3.5.1.5 + - ufw + +- name: "MANUAL | 3.5.1.6 | AUDIT | Ensure firewall rules exist for all open ports" + block: + - name: "MANUAL | 3.5.1.6 | AUDIT | Ensure firewall rules exist for all open ports | Get list of open ports" + command: ss -4tuln + changed_when: false + failed_when: false + register: ubtu18cis_3_5_1_6_open_listen_ports + + - name: "MANUAL | 3.5.1.6 | AUDIT | Ensure firewall rules exist for all open ports | Get list of firewall rules" + command: ufw status + changed_when: false + failed_when: false + register: ubtu18cis_3_5_1_6_firewall_rules + + - name: "MANUAL | 3.5.1.6 | AUDIT | Ensure firewall rules exist for all open ports | Message out settings" + debug: + msg: + - "ALERT!!!!Below are the listening ports and firewall rules" + - "Please create firewall rule for any open ports if not already done" + - "*****---Open Listen Ports---*****" + - "{{ ubtu18cis_3_5_1_6_open_listen_ports.stdout_lines }}" + - "*****---Firewall Rules---*****" + - "{{ ubtu18cis_3_5_1_6_firewall_rules.stdout_lines }}" + when: + - ubtu18cis_rule_3_5_1_6 + - ubtu18cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_3.5.1.6 + - ufw + +- name: "AUTOMATED | 3.5.1.7 | PATCH | Ensure ufw default deny firewall policy" + ufw: + default: deny + direction: "{{ item }}" + notify: reload ufw + with_items: + - incoming + - outgoing + - routed + when: + - ubtu18cis_rule_3_5_1_7 + - ubtu18cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.7 + - ufw + +# # --------------- +# # --------------- +# # NFTables is unsupported with this role. However I have the actions commented out as a guide +# # --------------- +# # --------------- +- name: "AUTOMATED | 3.5.2.1 | AUDIT | Ensure nftables is installed" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + # apt: + # name: nftables + # state: present + when: + - ubtu18cis_rule_3_5_2_1 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.1 + - nftables + - firewall + - notimplemented + +- name: "AUTOMATED | 3.5.2.2 | AUDIT | Ensure ufw is uninstalled or disabled with nftables" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + # apt: + # name: ufw + # state: absent + when: + - ubtu18cis_rule_3_5_2_2 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.2 + - nftables + - firewall + - notimplemented + +- name: "MANUAL | 3.5.2.3 | PATCH | Ensure iptables are flushed" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + # iptables: + # flush: yes + + when: + - ubtu18cis_rule_3_5_2_3 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_3.5.2.3 + - nftables + - notimplemented + +- name: "AUTOMATED | 3.5.2.4 | PATCH | Ensure a nftables table exists" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + # command: "nft create table {{ ubtu18cis_nftables_table_name }}" + # changed_when: ubtu18cis_3_5_2_4_new_table.rc == 0 + # failed_when: false + # register: ubtu18cis_3_5_2_4_new_table + when: + - ubtu18cis_rule_3_5_2_4 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.4 + - nftables + - notimplemented + +- name: "AUTOMATED | 3.5.2.5 | PATCH | Ensure nftables base chains exist" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + # block: + # - name: "AUTOMATED | 3.5.2.5 | PATCH | Ensure base chains exist | Input entry" + # shell: 'nft create chain inet {{ ubtu18cis_nftables_table_name }} input { type filter hook input priority 0 \; }' + # changed_when: ubtu18cis_3_5_2_5_base_chains_input.rc == 0 + # failed_when: false + # register: ubtu18cis_3_5_2_5_base_chains_input + + # - name: "AUTOMATED | 3.5.2.5 | PATCH | Ensure base chains exist | Forward entry" + # shell: 'nft create chain inet {{ ubtu18cis_nftables_table_name }} forward { type filter hook forward priority 0 \; }' + # changed_when: ubtu18cis_3_5_2_5_base_chains_forward.rc == 0 + # failed_when: false + # register: ubtu18cis_3_5_2_5_base_chains_forward + + # - name: "AUTOMATED | 3.5.2.5 | PATCH | Ensure base chains exist | Output entry" + # shell: 'nft create chain inet {{ ubtu18cis_nftables_table_name }} output { type filter hook output priority 0 \; }' + # changed_when: ubtu18cis_3_5_2_5_base_chains_output.rc == 0 + # failed_when: false + # register: ubtu18cis_3_5_2_5_base_chains_output + when: + - ubtu18cis_rule_3_5_2_5 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.5 + - nftables + - notimplemented + +- name: "AUTOMATED | 3.5.2.6 | PATCH | Ensure nftables loopback traffic is configured" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + # block: + # - name: "AUTOMATED | 3.5.2.6 | AUDIT | Ensure nftables loopback traffic is configured | Get input iif lo accept status" + # shell: nft list ruleset | awk '/hook input/,/}/' | grep 'iif "lo" accept' + # changed_when: false + # failed_when: false + # register: ubtu18cis_3_5_2_6_loopback_iif_status + + # - name: "AUTOMATED | 3.5.2.6 | AUDIT | Ensure nftables loopback traffic is configured | Get input iif lo accept status" + # shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip saddr' + # changed_when: false + # failed_when: false + # register: ubtu18cis_3_5_2_6_loopback_input_drop_status + + # - name: "AUTOMATED | 3.5.2.6 | AUDIT | Ensure nftables loopback traffic is configured | Get input iif lo accept status" + # shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip6 saddr' + # changed_when: false + # failed_when: false + # register: ubtu18cis_3_5_2_6_loopback_ipv6_drop_status + + # - name: "AUTOMATED | 3.5.2.6 | PATCH | Ensure nftables loopback traffic is configured | Loopback iif lo accept" + # command: 'nft add rule inet {{ ubtu18cis_nftables_table_name }} input iif lo accept' + # changed_when: ubtu18cis_3_5_3_4_loopback_iif.rc == 0 + # failed_when: false + # register: ubtu18cis_3_5_3_4_loopback_iif + # when: "'iif \"lo\" accept' not in ubtu18cis_3_5_3_4_loopback_iif_status.stdout" + + # - name: "AUTOMATED | 3.5.2.6 | PATCH | Ensure nftables loopback traffic is configured | Loopback input drop" + # command: 'nft add rule inet {{ ubtu18cis_nftables_table_name }} input ip saddr 127\.0\.0\.0\/8 counter drop' + # changed_when: ubtu18cis_3_5_2_6_loopback_input_drop.rc == 0 + # failed_when: false + # register: ubtu18cis_3_5_2_6_loopback_input_drop + # when: + # - "'ip saddr 127.0.0.0/8' not in ubtu18cis_3_5_3_4_loopback_input_drop_status.stdout" + # - "'drop' not in ubtu18cis_3_5_2_6_loopback_input_drop_status.stdout" + + # - name: "AUTOMATED | 3.5.2.6 | PATCH | Ensure nftables loopback traffic is configured | Loopback ipv6 drop" + # command: 'nft add rule inet {{ ubtu18cis_nftables_table_name }} input ip6 saddr ::1 counter drop' + # changed_when: ubtu18cis_3_5_2_6_loopback_ipv6_drop.rc == 0 + # failed_when: false + # register: ubtu18cis_3_5_2_6_loopback_ipv6_drop + # when: + # - "'ip6 saddr' not in ubtu18cis_3_5_2_6_loopback_ipv6_drop_status.stdout" + # - "'drop' not in ubtu18cis_3_5_2_6_loopback_ipv6_drop_status.stdout" + when: + - ubtu18cis_rule_3_5_2_6 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.6 + - nftables + - notimplemented + +- name: "MANUAL | 3.5.2.7 | PATCH | Ensure nftables outbound and established connections are configured" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + when: + - ubtu18cis_rule_3_5_2_7 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_3.5.2.7 + - nftables + - notimplemented + +- name: "AUTOMATED | 3.5.2.8 | PATCH | Ensure nftables default deny firewall policy" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + when: + - ubtu18cis_rule_3_5_2_7 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.8 + - nftables + - notimplemented + +- name: "AUTOMATED | 3.5.2.9 | PATCH | Ensure nftables service is enabled" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + # service: + # name: nftables + # state: started + # enabled: yes + when: + - ubtu18cis_rule_3_5_2_9 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.9 + - nftables + - notimplemented + +- name: "AUTOMATED | 3.5.2.10 | PATCH | Ensure nftables rules are permanent" + debug: + msg: "Warning: NFTables is not supported in this role. Please us UFW or iptables" + when: + - ubtu18cis_rule_3_5_2_10 + - ubtu18cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.10 + - nftables + - notimplemented + +- name: "AUTOMATED | 3.5.3.1.1 | PATCH | Ensure iptables packages are installed" + apt: + name: ['iptables', 'iptables-persistent'] + state: present + when: + - ubtu18cis_rule_3_5_3_1_1 + - ubtu18cis_firewall_package == "iptables" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.1.1 + - iptables + - firewall + +- name: "AUTOMATED | 3.5.3.1.2 | PATCH | Ensure nftables is not installed with iptables" + apt: + name: nftables + state: absent + when: + - ubtu18cis_rule_3_5_3_1_2 + - ubtu18cis_firewall_package == "iptables" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.1.2 + - iptables + - firewall + +- name: "AUTOMATED | 3.5.3.1.3 | PATCH | Ensure ufw is uninstalled or disabled with iptables" + apt: + name: ufw + state: absent + when: + - ubtu18cis_rule_3_5_3_1_3 + - ubtu18cis_firewall_package == "iptables" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.1.3 + - iptables + - firewall + +- name: "AUTOMATED | 3.5.3.2.1 | PATCH | Ensure iptables default deny firewall policy" + block: + - name: "AUTOMATED | 3.5.3.2.1 | PATCH | Ensure iptables default deny firewall policy | Configure SSH to be allowed in" + iptables: + chain: INPUT + protocol: tcp + destination_port: 22 + jump: ACCEPT + ctstate: 'NEW,ESTABLISHED' + + - name: "AUTOMATED | 3.5.3.2.1 | PATCH | Ensure iptables default deny firewall policy | Configure SSH to be allowed out" + iptables: + chain: OUTPUT + protocol: tcp + source_port: 22 + jump: ACCEPT + ctstate: 'NEW,ESTABLISHED' + + - name: "AUTOMATED | 3.5.3.2.1 | PATCH | Ensure iptables default deny firewall policy | Enable apt traffic" + iptables: + chain: INPUT + ctstate: 'ESTABLISHED' + jump: ACCEPT + + - name: "AUTOMATED | 3.5.3.2.1 | PATCH | Ensure iptables default deny firewall policy | Set drop items" + iptables: + policy: DROP + chain: "{{ item }}" + with_items: + - INPUT + - FORWARD + - OUTPUT + when: + - ubtu18cis_rule_3_5_3_2_1 + - ubtu18cis_firewall_package == "iptables" + - not ubtu18cis_iptables_v6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.2.1 + - iptables + - firewall + +- name: "AUTOMATED | 3.5.3.2.2 | PATCH | Ensure iptables loopback traffic is configured" + block: + - name: "AUTOMATED | 3.5.3.2.2 | PATCH | Ensure iptables loopback traffic is configured | INPUT loopback ACCEPT" + iptables: + action: append + chain: INPUT + in_interface: lo + jump: ACCEPT + + - name: "AUTOMATED | 3.5.3.2.2 | PATCH | Ensure iptables loopback traffic is configured | OUTPUT loopback ACCEPT" + iptables: + action: append + chain: OUTPUT + out_interface: lo + jump: ACCEPT + + - name: "AUTOMATED | 3.5.3.2.2 | PATCH | Ensure iptables loopback traffic is configured | OUTPUT loopback ACCEPT" + iptables: + action: append + chain: INPUT + source: 127.0.0.0/8 + jump: DROP + when: + - ubtu18cis_rule_3_5_3_2_2 + - ubtu18cis_firewall_package == "iptables" + - not ubtu18cis_iptables_v6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.2.2 + - iptables + - firewall + +- name: "MANUAL | 3.5.3.2.3 | PATCH | Ensure iptables outbound and established connections are configured" + iptables: + action: append + chain: '{{ item.chain }}' + protocol: '{{ item.protocol }}' + match: state + ctstate: '{{ item.ctstate }}' + jump: ACCEPT + with_items: + - { chain: OUTPUT, protocol: tcp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: udp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: icmp, ctstate: 'NEW,ESTABLISHED' } + - { chain: INPUT, protocol: tcp, ctstate: 'ESTABLISHED' } + - { chain: INPUT, protocol: udp, ctstate: 'ESTABLISHED' } + - { chain: INPUT, protocol: icmp, ctstate: 'ESTABLISHED' } + when: + - ubtu18cis_rule_3_5_3_2_3 + - ubtu18cis_firewall_package == "iptables" + - not ubtu18cis_iptables_v6 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_3.5.3.2.3 + - iptables + - firewall + +- name: "AUTOMATED | 3.5.3.2.4 | AUDIT | Ensure iptables firewall rules exist for all open ports" + block: + - name: "AUTOMATED | 3.5.3.2.4 | AUDIT | Ensure firewall rules exist for all open ports | Get list of open ports" + command: ss -4tuln + changed_when: false + failed_when: false + register: ubtu18cis_3_5_3_2_4_open_ports + + - name: "AUTOMATED | 3.5.3.2.4 | AUDIT | Ensure firewall rules exist for all open ports | Get list of rules" + command: iptables -L INPUT -v -n + changed_when: false + failed_when: false + register: ubtu18cis_3_5_3_2_4_current_rules + + - name: "AUTOMATED | 3.5.3.2.4 | AUDIT | Ensure firewall rules exist for all open ports | Alert about settings" + debug: + msg: + - "ALERT!!!!Below is the list the open ports and current rules" + - "Please create a rule for any open port that does not have a current rule" + - "Open Ports:" + - "{{ ubtu18cis_3_5_3_2_4_open_ports.stdout_lines }}" + - "Current Rules:" + - "{{ ubtu18cis_3_5_3_2_4_current_rules.stdout_lines }}" + when: + - ubtu18cis_rule_3_5_3_2_4 + - ubtu18cis_firewall_package == "iptables" + - not ubtu18cis_iptables_v6 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.3.2.4 + - iptables + +# --------------- +# --------------- +# This is not a control however using the iptables module only writes to memery +# if a reboot occurs that means changes can revert. This task will make the +# above iptables settings permanent +# --------------- +# --------------- +- name: "Make IPTables persistent | Not a control" + block: + - name: "Make IPTables persistent | Install iptables-persistent" + apt: + name: iptables-persistent + state: present + + - name: "Make IPTables persistent | Save to persistent files" + shell: bash -c "iptables-save > /etc/iptables/rules.v4" + changed_when: ubtu18cis_iptables_save.rc == 0 + failed_when: ubtu18cis_iptables_save.rc > 0 + register: ubtu18cis_iptables_save + when: + - ubtu18cis_firewall_package == "iptables" + - not ubtu18cis_iptables_v6 + - ubtu18cis_save_iptables_cis_rules + - ubtu18cis_rule_3_5_3_2_1 or + ubtu18cis_rule_3_5_3_2_2 or + ubtu18cis_rule_3_5_3_2_3 or + ubtu18cis_rule_3_5_3_2_4 + + +- name: "AUTOMATED | 3.5.3.3.1 | PATCH | Ensure ip6tables default deny firewall policy" + block: + - name: "AUTOMATED | 3.5.3.3.1 | PATCH | Ensure ip6tables default deny firewall policy | Configure SSH to be allowed out" + iptables: + chain: OUTPUT + protocol: tcp + source_port: 22 + jump: ACCEPT + ctstate: 'NEW,ESTABLISHED' + ip_version: ipv6 + + - name: "AUTOMATED | 3.5.3.3.1 | PATCH | Ensure ip6tables default deny firewall policy | Enable apt traffic" + iptables: + chain: INPUT + ctstate: 'ESTABLISHED' + jump: ACCEPT + ip_version: ipv6 + + - name: "AUTOMATED | 3.5.3.3.1| PATCH | Ensure ip6tables default deny firewall policy | Set drop items" + iptables: + policy: DROP + chain: "{{ item }}" + ip_version: ipv6 + with_items: + - INPUT + - FORWARD + - OUTPUT + when: + - ubtu18cis_rule_3_5_3_3_1 + - ubtu18cis_firewall_package == "iptables" + - ubtu18cis_iptables_v6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.3.1 + - ip6tables + +- name: "AUTOMATED | 3.5.3.3.2 | PATCH | Ensure ip6tables loopback traffic is configured" + block: + - name: "AUTOMATED | 3.5.3.3.2 | PATCH | Ensure ip6tables loopback traffic is configured | INPUT loopback ACCEPT" + iptables: + action: append + chain: INPUT + in_interface: lo + jump: ACCEPT + ip_version: ipv6 + + - name: "AUTOMATED | 3.5.3.3.2 | PATCH | Ensure ip6tables loopback traffic is configured | OUTPUT loopback ACCEPT" + iptables: + action: append + chain: OUTPUT + out_interface: lo + jump: ACCEPT + ip_version: ipv6 + + - name: "AUTOMATED | 3.5.3.3.2 | PATCH | Ensure ip6tables loopback traffic is configured | INPUT loopback drop" + iptables: + action: append + chain: INPUT + source: ::1 + jump: DROP + ip_version: ipv6 + when: + - ubtu18cis_rule_3_5_3_3_2 + - ubtu18cis_firewall_package == "iptables" + - ubtu18cis_iptables_v6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.3.2 + - ip6tables + +- name: "MANUAL | 3.5.3.3.3 | PATCH | Ensure ip6tables outbound and established connections are configured" + iptables: + action: append + chain: '{{ item.chain }}' + protocol: '{{ item.protocol }}' + match: state + ctstate: '{{ item.ctstate }}' + jump: ACCEPT + ip_version: ipv6 + with_items: + - { chain: OUTPUT, protocol: tcp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: udp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: icmp, ctstate: 'NEW,ESTABLISHED' } + - { chain: INPUT, protocol: tcp, ctstate: 'ESTABLISHED' } + - { chain: INPUT, protocol: udp, ctstate: 'ESTABLISHED' } + - { chain: INPUT, protocol: icmp, ctstate: 'ESTABLISHED' } + when: + - ubtu18cis_rule_3_5_3_3_3 + - ubtu18cis_firewall_package == "iptables" + - ubtu18cis_iptables_v6 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_3.5.3.3.3 + - ip6tables + +- name: "AUTOMATED | 3.5.3.3.4 | AUDIT | Ensure ip6tables firewall rules exist for all open ports" + block: + - name: "AUTOMATED | 3.5.3.3.4 | AUDIT | Ensure ip6tables firewall rules exist for all open ports | Get list of open ports" + command: ss -6tuln + changed_when: false + failed_when: false + register: ubtu18cis_3_5_3_3_4_open_ports + + - name: "AUTOMATED | 3.5.3.3.4 | AUDIT | Ensure ip6tables firewall rules exist for all open ports | Get list of rules" + command: ip6tables -L INPUT -v -n + changed_when: false + failed_when: false + register: ubtu18cis_3_5_3_3_4_current_rules + + - name: "AUTOMATED | 3.5.3.3.4 | AUDIT | Ensure ip6tables firewall rules exist for all open ports | Alert about settings" + debug: + msg: + - "ALERT!!!!Below is the list the open ports and current rules" + - "Please create a rule for any open port that does not have a current rule" + - "Open Ports:" + - "{{ ubtu18cis_3_5_3_3_4_open_ports.stdout_lines }}" + - "Current Rules:" + - "{{ ubtu18cis_3_5_3_3_4_current_rules.stdout_lines }}" + when: + - ubtu18cis_rule_3_5_3_3_4 + - ubtu18cis_firewall_package == "iptables" + - ubtu18cis_iptables_v6 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.4.2.4 + - ip6tables + +# --------------- +# --------------- +# This is not a control however using the ip6tables module only writes to memery +# if a reboot occurs that means changes can revert. This task will make the +# above ip6tables settings permanent +# --------------- +# --------------- +- name: "Make IP6Tables persistent | Not a control" + block: + - name: "Make IP6Tables persistent | Install iptables-persistent" + apt: + name: iptables-persistent + state: present + + - name: "Make IP6Tables persistent | Save to persistent files" + shell: bash -c "ip6tables-save > /etc/iptables/rules.v6" + changed_when: ubtu18cis_ip6tables_save.rc == 0 + failed_when: ubtu18cis_ip6tables_save.rc > 0 + register: ubtu18cis_ip6tables_save + when: + - ubtu18cis_firewall_package == "iptables" + - ubtu18cis_iptables_v6 + - ubtu18cis_save_iptables_cis_rules + - ubtu18cis_rule_3_5_3_3_1 or + ubtu18cis_rule_3_5_3_3_2 or + ubtu18cis_rule_3_5_3_3_3 or + ubtu18cis_rule_3_5_3_3_4 diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/main.yml new file mode 100644 index 0000000..67d6812 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_3/main.yml @@ -0,0 +1,15 @@ +--- +- name: "SECTION | 3.1 | Disable unused network protocols and devices" + include: cis_3.1.x.yml + +- name: "SECTION | 3.2 | Network Parameters Host Only" + include: cis_3.2.x.yml + +- name: "SECTION | 3.3 | Network Parameters Host and Router" + include: cis_3.3.x.yml + +- name: "SECTION | 3.4 | Uncommong Network Protocols" + include: cis_3.4.x.yml + +- name: "SECTION | 3.5 | Firewall Configuration" + include: cis_3.5.x.yml diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.1.1.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.1.1.x.yml new file mode 100644 index 0000000..1a5c3f1 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.1.1.x.yml @@ -0,0 +1,81 @@ +--- +- name: "AUTOMATED | 4.1.1.1 | PATCH | Ensure auditd is installed" + apt: + name: ['auditd', 'audispd-plugins'] + state: present + when: + - ubtu18cis_rule_4_1_1_1 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.1.1 + - auditd + +- name: "AUTOMATED | 4.1.1.2 | PATCH | Ensure auditd service is enabled" + service: + name: auditd + state: started + enabled: yes + when: + - ubtu18cis_rule_4_1_1_2 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.1.2 + - auditd + +- name: "AUTOMATED | 4.1.1.3 | PATCH | Ensure auditing for processes that start prior to auditd is enabled" + block: + - name: "AUTOMATED | 4.1.1.3 | AUDIT | Ensure auditing for processes that start prior to auditd is enabled | Get GRUB_CMDLINE_LINUX" + shell: grep "GRUB_CMDLINE_LINUX=" /etc/default/grub | cut -f2 -d'"' + changed_when: false + failed_when: false + register: ubtu18cis_4_1_1_3_cmdline_settings + + - name: "AUTOMATED | 4.1.1.3 | PATCH | Ensure auditing for processes that start prior to auditd is enabled | Add setting if doesn't exist" + lineinfile: + path: /etc/default/grub + regexp: '^GRUB_CMDLINE_LINUX=' + line: 'GRUB_CMDLINE_LINUX="{{ ubtu18cis_4_1_1_3_cmdline_settings.stdout }} audit=1"' + when: "'audit=' not in ubtu18cis_4_1_1_3_cmdline_settings.stdout" + notify: grub update + + - name: "AUTOMATED | 4.1.1.3 | PATCH | Ensure auditing for processes that start prior to auditd is enabled | Update setting if exists" + replace: + dest: /etc/default/grub + regexp: 'audit=([0-9]+)' + replace: 'audit=1' + after: '^GRUB_CMDLINE_LINUX="' + before: '"' + notify: grub update + when: "'audit=' in ubtu18cis_4_1_1_3_cmdline_settings.stdout" + when: + - ubtu18cis_rule_4_1_1_3 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4_1_1_3 + - auditd + +- name: "AUTOMATED | 4.1.1.4 | PATCH | Ensure audit_backlog_limit is sufficient" + replace: + dest: /etc/default/grub + regexp: '(^GRUB_CMDLINE_LINUX\s*\=\s*)(?:")(.+)(?/dev/null; done + register: priv_procs + changed_when: no + check_mode: no + + - name: "AUTOMATED | 4.1.11 | PATCH | Ensure use of privileged commands is collected | Set privileged rules" + template: + src: audit/ubtu18cis_4_1_11_privileged.rules.j2 + dest: /etc/audit/rules.d/privileged.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu18cis_rule_4_1_11 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.11 + - auditd + +- name: "AUTOMATED | 4.1.12 | PATCH | Ensure successful file system mounts are collected" + template: + src: audit/ubtu18cis_4_1_12_audit.rules.j2 + dest: /etc/audit/rules.d/audit.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + ubtu18cis_rule_4_1_12 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.12 + - auditd + +- name: "AUTOMATED | 4.1.13 | PATCH | Ensure file deletion events by users are collected" + template: + src: audit/ubtu18cis_4_1_13_delete.rules.j2 + dest: /etc/audit/rules.d/delete.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu18cis_rule_4_1_13 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.13 + - auditd + +- name: "AUTOMATED | 4.1.14 | PATCH | Ensure changes to system administration scope (sudoers) is collected" + template: + src: audit/ubtu18cis_4_1_14_scope.rules.j2 + dest: /etc/audit/rules.d/scope.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu18cis_rule_4_1_14 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.14 + - auditd + +- name: "AUTOMATED | 4.1.15 | PATCH | Ensure system administrator command executions (sudo) are collected" + template: + src: audit/ubtu18cis_4_1_15_actions.rules.j2 + dest: /etc/audit/rules.d/actions.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu18cis_rule_4_1_15 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.15 + - auditd + +- name: "AUTOMATED | 4.1.16 | PATCH | Ensure kernel module loading and unloading is collected" + template: + src: audit/ubtu18cis_4_1_16_modules.rules.j2 + dest: /etc/audit/rules.d/modules.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu18cis_rule_4_1_16 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.16 + - auditd + +- name: "AUTOMATED | 4.1.17 | PATCH | Ensure the audit configuration is immutable" + template: + src: audit/ubtu18cis_4_1_17_99finalize.rules.j2 + dest: /etc/audit/rules.d/99-finalize.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu18cis_rule_4_1_17 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.17 + - auditd diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.1.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.1.x.yml new file mode 100644 index 0000000..a010ce4 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.1.x.yml @@ -0,0 +1,151 @@ +--- +- name: "AUTOMATED | 4.2.1.1 | PATCH | Ensure rsyslog is installed" + apt: + name: rsyslog + state: present + when: + - ubtu18cis_rule_4_2_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.1.1 + - rsyslog + - apt + +- name: "AUTOMATED | 4.2.1.2 | PATCH | Ensure rsyslog Service is enabled" + service: + name: rsyslog + enabled: yes + when: + - ubtu18cis_rule_4_2_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.1.2 + - rsyslog + +- name: "MANUAL | 4.2.1.3 | PATCH | Ensure logging is configured" + block: + - name: "MANUAL | 4.2.1.3 | PATCH | Ensure logging is configured | Find configuration file" + shell: grep -r "*.emerg" /etc/* | cut -f1 -d":" + changed_when: false + failed_when: false + register: ubtu18cis_4_2_1_3_rsyslog_config_path + + - name: "MANUAL | 4.2.1.3 | PATCH | Ensure logging is configured | Gather rsyslog current config" + command: "cat {{ ubtu18cis_4_2_1_3_rsyslog_config_path.stdout }}" + changed_when: false + failed_when: false + register: ubtu18cis_4_2_1_3_rsyslog_config + + - name: "MANUAL | 4.2.1.3 | PATCH | Ensure logging is configured | Message out config" + debug: + msg: + - "Alert!!!Below is the current logging configurations for rsyslog, please review" + - "{{ ubtu18cis_4_2_1_3_rsyslog_config.stdout_lines }}" + when: not ubtu18cis_rsyslog_ansible_managed + + - name: "MANUAL | 4.2.1.3 | PATCH | Ensure logging is configured | Automated rsyslog configuration" + lineinfile: + path: "{{ ubtu18cis_4_2_1_3_rsyslog_config_path.stdout }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: '^\*.emerg', line: '*.emerg :omusrmsg:*', insertafter: '^# Emergencies are sent to everybody logged in' } + - { regexp: '^auth,authpriv.\*', line: 'auth,authpriv.* /var/log/auth.log', insertafter: '^# First some standard log files. Log by facility' } + - { regexp: '^mail.\*|^#mail.\*', line: 'mail.* -/var/log/mail', insertafter: '^# First some standard log files' } + - { regexp: '^mail.info|^#mail.info', line: 'mail.info -/var/log/mail.info', insertafter: '^# Logging for the mail system' } + - { regexp: '^mail.warn|^#mail.warn', line: 'mail.warn -/var/log/mail.warn', insertafter: '^# Logging for the mail system.' } + - { regexp: '^mail.err|^#mail.err', line: 'mail.err /var/log/mail.err', insertafter: '^# Logging for the mail system.' } + - { regexp: '^news.crit|^#news.crit', line: 'news.crit -/var/log/news/news.crit', insertafter: '^# First some standard log files'} + - { regexp: '^news.err|^#news.err', line: 'news.err -/var/log/news/news.err', insertafter: '^# First some standard log files' } + - { regexp: '^news.notice|^#news.notice', line: 'news.notice -/var/log/news/news.notice', insertafter: '^# First some standard log files' } + - { regexp: '^\*.=warning;\*.=err|^#\*.=warning;\*.=err', line: '*.=warning;*.=err -/var/log/warn', insertafter: '^# First some standard log files' } + - { regexp: '^\*.crit|^#\*.crit', line: '*.crit /var/log/warn', insertafter: '^# First some standard log files' } + - { regexp: '^\*.\*;mail.none;news.none|^#\*.\*;mail.none;news.none', line: '*.*;mail.none;news.none -/var/log/messages', insertafter: '^# First some standard log files' } + - { regexp: '^local0,local1.\*|^#local0,local1.\*', line: 'local0,local1.* -/var/log/localmessages', insertafter: '^# First some standard log files' } + - { regexp: '^local2,local3.\*|^#local2,local3.\*', line: 'local2,local3.* -/var/log/localmessages', insertafter: '^# First some standard log files' } + - { regexp: '^local4,local5.\*|^#local4,local5.\*', line: 'local4,local5.* -/var/log/localmessages', insertafter: '^# First some standard log files' } + - { regexp: '^local6,local7.\*|^#local6,local7.\*', line: 'local6,local7.* -/var/log/localmessages', insertafter: '^# First some standard log files' } + notify: restart rsyslog + when: ubtu18cis_rsyslog_ansible_managed + when: + - ubtu18cis_rule_4_2_1_3 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_4.2.1.3 + - rsyslog + +- name: "AUTOMATED | 4.2.1.4 | PATCH | Ensure rsyslog default file permissions configured" + lineinfile: + path: /etc/rsyslog.conf + regexp: '^\$FileCreateMode|^#\$FileCreateMode' + line: '$FileCreateMode 0640' + notify: restart rsyslog + when: + - ubtu18cis_rule_4_2_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.1.4 + - rsyslog + +- name: "AUTOMATED | 4.2.1.5 | PATCH | Ensure rsyslog is configured to send logs to a remote log host" + blockinfile: + path: /etc/rsyslog.conf + block: | + ##Enable sending of logs over TCP add the following line: + *.* @@{{ ubtu18cis_remote_log_server }} + insertafter: EOF + when: + - ubtu18cis_rule_4_2_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.1.5 + - rsyslog + +- name: "MANUAL | 4.2.1.6 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts" + block: + - name: "MANUAL | 4.2.1.6 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts | When not a log host" + replace: + path: /etc/rsyslog.conf + regexp: '({{ item }})' + replace: '#\1' + with_items: + - '^(\$ModLoad)' + - '^(\$InputTCPServerRun)' + notify: restart rsyslog + when: not ubtu18cis_system_is_log_server + + - name: "MANUAL | 4.2.1.6 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts | When a log server" + lineinfile: + path: /etc/rsyslog.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^\$ModLoad|^#\$ModLoad', line: '$ModLoad imtcp' } + - { regexp: '^\$InputTCPServerRun|^#\$InputTCPServerRun', line: '$InputTCPServerRun 514' } + notify: restart rsyslog + when: ubtu18cis_system_is_log_server + when: + - ubtu18cis_rule_4_2_1_6 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_4.2.1.6 + - rsyslog diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.2.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.2.x.yml new file mode 100644 index 0000000..3163e62 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.2.x.yml @@ -0,0 +1,51 @@ +--- +- name: "AUTOMATED | 4.2.2.1 | PATCH | Ensure journald is configured to send logs to rsyslog" + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^ForwardToSyslog|^#ForwardToSyslog' + line: 'ForwardToSyslog=yes' + insertafter: '\[Journal\]' + when: + - ubtu18cis_rule_4_2_2_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.2.1 + - rsyslog + - journald + +- name: "AUTOMATED | 4.2.2.2 | PATCH | Ensure journald is configured to compress large log files" + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^Compress|^#Compress' + line: 'Compress=yes' + insertafter: '\[Journal\]' + when: + - ubtu18cis_rule_4_2_2_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.2.2 + - rsyslog + - journald + +- name: "AUTOMATED | 4.2.2.3 | PATCH | Ensure journald is configured to write logfiles to persistent disk" + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^Storage|^#Storage' + line: 'Storage=persistent' + insertafter: '\[Journal\]' + when: + - ubtu18cis_rule_4_2_2_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.2.3 + - rsyslog + - journald diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.3.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.3.x.yml new file mode 100644 index 0000000..bcbea6c --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.2.3.x.yml @@ -0,0 +1,15 @@ +--- +- name: "AUTOMATED | 4.2.3 | PATCH | Ensure permissions on all logfiles are configured" + command: find /var/log -type f -exec chmod g-wx,o-rwx "{}" + -o -type d -exec chmod g-w,o-rwx "{}" + + changed_when: ubtu18cis_4_2_3_logfile_perms_status.rc == 0 + register: ubtu18cis_4_2_3_logfile_perms_status + when: + - ubtu18cis_rule_4_2_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.3 + - logfiles + - permissions diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.3.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.3.yml new file mode 100644 index 0000000..627b6ad --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.3.yml @@ -0,0 +1,25 @@ +--- +- name: "MANUAL | 4.3 | PATCH | Ensure logrotate is configured" + block: + - name: "MANUAL | 4.3 | PATCH | Ensure logrotate is configured | Get logrotate files" + find: + paths: /etc/logrotate.d/ + register: ubtu18cis_4_3_logrotate_files + + - name: "MANUAL | 4.3 | PATCH | Ensure logrotate is configured | Set rotation configurations" + replace: + path: "{{ item.path }}" + regexp: '^(\s*)(daily|weekly|monthly|yearly)$' + replace: "\\1{{ ubtu18cis_logrotate }}" + with_items: + - "{{ ubtu18cis_4_3_logrotate_files.files }}" + - { path: "/etc/logrotate.conf" } + when: + - ubtu18cis_rule_4_3 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_4.3 + - logrotate diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.4.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.4.yml new file mode 100644 index 0000000..aa487dc --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/cis_4.4.yml @@ -0,0 +1,15 @@ +--- +- name: "AUTOMATED | 4.4 | PATCH | Ensure logrotate assigns appropriate permissions" + lineinfile: + path: /etc/logrotate.conf + regex: '^create' + line: 'create 0640 root utmp' + when: + - ubtu18cis_rule_4_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.4 + - logrotate diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/main.yml new file mode 100644 index 0000000..cc6e82c --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_4/main.yml @@ -0,0 +1,24 @@ +--- +- name: "SECTION | 4.1.1 | Configure System Accounting" + include: cis_4.1.1.x.yml + +- name: "SECTION | 4.1.2 | Configure Data Retention" + include: cis_4.1.2.x.yml + +- name: "SECTION | 4.1.x | Login Settings" + include: cis_4.1.x.yml + +- name: "SECTION | 4.2.1 | Configure rsyslog" + include: cis_4.2.1.x.yml + +- name: "SECTION | 4.2.2 | Configure journald" + include: cis_4.2.2.x.yml + +- name: "SECTION | 4.2.3 | Ensure permissions on all logfiles are configured" + include: cis_4.2.3.x.yml + +- name: "SECTION | 4.3 | Ensure logrotate is configured" + include: cis_4.3.yml + +- name: "SECTION | 4.4 | Ensure assigns appropriate permissions" + include: cis_4.4.yml diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.1.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.1.x.yml new file mode 100644 index 0000000..ec447b2 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.1.x.yml @@ -0,0 +1,158 @@ +--- +- name: "AUTOMATED | 5.1.1 | PATCH | Ensure cron daemon is enabled" + service: + name: cron + enabled: yes + when: + - ubtu18cis_rule_5_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.1 + - cron + +- name: "AUTOMATED | 5.1.2 | PATCH | Ensure permissions on /etc/crontab are configured" + file: + path: /etc/crontab + owner: root + group: root + mode: 0600 + when: + - ubtu18cis_rule_5_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.2 + - cron + +- name: "AUTOMATED | 5.1.3 | PATCH | Ensure permissions on /etc/cron.hourly are configured" + file: + path: /etc/cron.hourly + owner: root + group: root + mode: 0700 + when: + - ubtu18cis_rule_5_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.3 + - cron + +- name: "AUTOMATED | 5.1.4 | PATCH | Ensure permissions on /etc/cron.daily are configured" + file: + path: /etc/cron.daily + owner: root + group: root + mode: 0700 + when: + - ubtu18cis_rule_5_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.4 + - cron + +- name: "AUTOMATED | 5.1.5 | PATCH | Ensure permissions on /etc/cron.weekly are configured" + file: + path: /etc/cron.weekly + owner: root + group: root + mode: 0700 + when: + - ubtu18cis_rule_5_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.5 + - cron + +- name: "AUTOMATED | 5.1.6 | PATCH | Ensure permissions on /etc/cron.monthly are configured" + file: + path: /etc/cron.monthly + owner: root + group: root + mode: 0700 + when: + - ubtu18cis_rule_5_1_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.6 + - cron + +- name: "AUTOMATED | 5.1.7 | PATCH | Ensure permissions on /etc/cron.d are configured" + file: + path: /etc/cron.d + owner: root + group: root + mode: 0700 + when: + - ubtu18cis_rule_5_1_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.7 + - cron + +- name: "AUTOMATED | 5.1.8 | PATCH | Ensure cron is restricted to authorized users" + block: + - name: "AUTOMATED | 5.1.8 | PATCH | Ensure cron is restricted to authorized users | Remove deny configs" + file: + path: /etc/cron.deny + state: absent + + - name: "AUTOMATED | 5.1.8 | PATCH | Ensure cron is restricted to authorized users | Create allow files" + file: + path: /etc/cron.allow + owner: root + group: root + mode: 0640 + state: touch + when: + - ubtu18cis_rule_5_1_8 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.8 + - cron + +- name: "AUTOMATED | 5.1.9 | PATCH | Ensure at is restricted to authorized users" + block: + - name: "AUTOMATED | 5.1.9 | PATCH | Ensure at is restricted to authorized users | Remove deny configs" + file: + path: /etc/at.deny + state: absent + + - name: "AUTOMATED | 5.1.9 | PATCH | Ensure at is restricted to authorized users | Create allow files" + file: + path: /etc/at.allow + owner: root + group: root + mode: 0640 + state: touch + when: + - ubtu18cis_rule_5_1_9 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.9 + - cron diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.2.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.2.x.yml new file mode 100644 index 0000000..ed828f1 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.2.x.yml @@ -0,0 +1,46 @@ +--- +- name: "AUTOMATED | 5.2.1 | PATCH | Ensure sudo is installed" + apt: + name: "{{ ubtu18cis_sudo_package }}" + state: present + when: + - ubtu18cis_rule_5_2_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.2.1 + - sudo + +- name: "AUTOMATED | 5.2.2 | PATCH | Ensure sudo commands use pty" + lineinfile: + path: /etc/sudoers + regexp: '^Defaults use_' + line: 'Defaults use_pty' + insertafter: '^Defaults' + when: + - ubtu18cis_rule_5_2_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.2.2 + - sudo + +- name: "AUTOMATED | 5.2.3 | PATCH | Ensure sudo log file exists" + lineinfile: + path: /etc/sudoers + regexp: '^Defaults logfile' + line: 'Defaults logfile="{{ ubtu18cis_sudo_logfile }}"' + insertafter: '^Defaults' + when: + - ubtu18cis_rule_5_2_3 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_5.2.3 + - sudo diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.3.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.3.x.yml new file mode 100644 index 0000000..a50c1be --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.3.x.yml @@ -0,0 +1,413 @@ +--- +- name: "AUTOMATED | 5.3.1 | PATCH | Ensure permissions on /etc/ssh/sshd_config are configured" + file: + path: /etc/ssh/sshd_config + owner: root + group: root + mode: 0600 + when: + - ubtu18cis_rule_5_3_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.1 + - ssh + +- name: "AUTOMATED | 5.3.2 | PATCH | Ensure permissions on SSH private host key files are configured" + block: + - name: "AUTOMATED | 5.3.2 | AUDIT | Ensure permissions on SSH private host key files are configured | Find ssh_host private keys" + find: + paths: /etc/ssh + patterns: 'ssh_host_*_key' + register: ubtu18cis_5_3_2_ssh_host_priv_keys + + - name: "AUTOMATED | 5.3.2 | PATCH | Ensure permissions on SSH private host key files are configured | Set permissions" + file: + path: "{{ item.path }}" + owner: root + group: root + mode: 0600 + with_items: + - "{{ ubtu18cis_5_3_2_ssh_host_priv_keys.files }}" + when: + - ubtu18cis_rule_5_3_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.2 + - ssh + +- name: "AUTOMATED | 5.3.3 | PATCH | Ensure permissions on SSH public host key files are configured" + block: + - name: "AUTOMATED | 5.3.3 | PATCH | Ensure permissions on SSH public host key files are configured | Find ssh_host public keys" + find: + paths: /etc/ssh + patterns: 'ssh_host_*_key.pub' + register: ubtu18cis_5_3_3_ssh_host_pub_keys + + - name: "AUTOMATED | 5.3.3 | PATCH | Ensure permissions on SSH public host key files are configured | Set permissions" + file: + path: "{{ item.path }}" + owner: root + group: root + mode: 0644 + with_items: + - "{{ ubtu18cis_5_3_3_ssh_host_pub_keys.files }}" + when: + - ubtu18cis_rule_5_3_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.3 + - ssh + +- name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited" + block: + - name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited | Add allowed users" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^AllowUsers|^#AllowUsers' + line: 'AllowUsers {{ ubtu18cis_sshd.allow_users }}' + notify: restart sshd + when: "ubtu18cis_sshd['allow_users']|default('') != ''" + + - name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited | Add allowed groups" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^AllowGroups|^#AllowGroups' + line: 'AllowGroups {{ ubtu18cis_sshd.allow_groups }}' + notify: restart sshd + when: "ubtu18cis_sshd['allow_groups']|default('') != ''" + + - name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited | Add deny users" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^DenyUsers|^#DenyUsers' + line: 'DenyUsers {{ ubtu18cis_sshd.deny_users }}' + notify: restart sshd + when: "ubtu18cis_sshd['deny_users']|default('') != ''" + + - name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited | Add deny groups" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^DenyGroups|^#DenyGroups' + line: 'DenyGroups {{ ubtu18cis_sshd.deny_groups }}' + notify: restart sshd + when: "ubtu18cis_sshd['deny_groups']|default('') != ''" + when: + - ubtu18cis_rule_5_3_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.4 + - ssh + +- name: "AUTOMATED | 5.3.5 | PATCH | Ensure SSH LogLevel is appropriate" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^LogLevel|^#LogLevel' + line: 'LogLevel {{ ubtu18cis_sshd.log_level }}' + insertafter: '^# Logging' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.5 + - ssh + +- name: "AUTOMATED | 5.3.6 | PATCH | Ensure SSH X11 forwarding is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^X11Forwarding|^#X11Forwarding' + line: 'X11Forwarding no' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_6 + tags: + - level2-server + - level1-workstation + - automated + - patch + - rule_5.3.6 + - ssh + +- name: "AUTOMATED | 5.3.7 | PATCH | Ensure SSH MaxAuthTries is set to 4 or less" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^MaxAuthTries|^#MaxAuthTries' + line: 'MaxAuthTries {{ ubtu18cis_sshd.max_auth_tries }}' + insertafter: '^# Authentication' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.7 + - ssh + +- name: "AUTOMATED | 5.3.8 | PATCH | Ensure SSH IgnoreRhosts is enabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^IgnoreRhosts|^#IgnoreRhosts' + line: 'IgnoreRhosts yes' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_8 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.8 + - ssh + +- name: "AUTOMATED | 5.3.9 | PATCH | Ensure SSH HostbasedAuthentication is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^HostbasedAuthentication|^#HostbasedAuthentication' + line: 'HostbasedAuthentication no' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_9 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.9 + - ssh + +- name: "AUTOMATED | 5.3.10 | PATCH | Ensure SSH root login is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^PermitRootLogin|^#PermitRootLogin' + line: 'PermitRootLogin no' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_10 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.10 + - ssh + +- name: "AUTOMATED | 5.3.11 | PATCH | Ensure SSH PermitEmptyPasswords is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^PermitEmptyPasswords|^#PermitEmptyPasswords' + line: 'PermitEmptyPasswords no' + insertafter: '# To disable tunneled clear text passwords' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_11 + tags: + - level1-server + - level1-workstation + - scored + - patch + - rule_5.3.11 + - ssh + +- name: "AUTOMATED | 5.3.12 | PATCH | Ensure SSH PermitUserEnvironment is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^PermitUserEnvironment|^#PermitUserEnvironment' + line: 'PermitUserEnvironment no' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_12 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.12 + - ssh + +- name: "AUTOMATED | 5.3.13 | PATCH | Ensure only strong Ciphers are used" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^Ciphers|^#Ciphers' + line: 'Ciphers {{ ubtu18cis_sshd.ciphers }}' + insertafter: '^# Ciphers and keying' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_13 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.13 + - ssh + +- name: "AUTOMATED | 5.3.14 | PATCH | Ensure only strong MAC algorithms are used" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^MACs|^#MACs' + line: 'MACs {{ ubtu18cis_sshd.macs }}' + insertafter: '^# Ciphers and keying' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_14 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.14 + - ssh + +- name: "AUTOMATED | 5.3.15 | PATCH | Ensure only strong Key Exchange algorithms are used" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^KexAlgorithms|^#KexAlgorithms' + line: 'KexAlgorithms {{ ubtu18cis_sshd.kex_algorithms }}' + insertafter: '^# Ciphers and keying' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_15 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.15 + - ssh + +- name: "AUTOMATED | 5.3.16 | PATCH | Ensure SSH Idle Timeout Interval is configured" + lineinfile: + path: /etc/ssh/sshd_config + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ClientAliveInterval|^#ClientAliveInterval', line: 'ClientAliveInterval {{ ubtu18cis_sshd.client_alive_interval }}' } + - { regexp: '^ClientAliveCountMax|^#ClientAliveCountMax', line: 'ClientAliveCountMax {{ ubtu18cis_sshd.client_alive_count_max }}' } + notify: restart sshd + when: + - ubtu18cis_rule_5_3_16 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.16 + - sshd + +- name: "AUTOMATED | 5.3.17 | PATCH | Ensure SSH LoginGraceTime is set to one minute or less" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^LoginGraceTime|^#LoginGraceTime' + line: 'LoginGraceTime {{ ubtu18cis_sshd.login_grace_time }}' + insertafter: '^# Authentication' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_17 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.17 + - ssh + +- name: "AUTOMATED | 5.3.18 | PATCH | Ensure SSH warning banner is configured" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^Banner|^#Banner' + line: Banner /etc/issue.net + insertafter: '^# no default banner path' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_18 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.18 + - ssh + +- name: "AUTOMATED | 5.3.19 | PATCH | Ensure SSH PAM is enabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^UsePAM|^#UsePAM' + line: 'UsePAM yes' + insertafter: '^# and ChallengeResponseAuthentication' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_19 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.19 + - ssh + - pam + +- name: "AUTOMATED | 5.3.20 | PATCH | Ensure SSH AllowTcpForwarding is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^AllowTcpForwarding|^#AllowTcpForwarding' + line: 'AllowTcpForwarding no' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_20 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_5.3.20 + - ssh + +- name: "AUTOMATED | 5.3.21 | PATCH | Ensure SSH MaxStartups is configured" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^MaxStartups|^#MaxStartups' + line: 'MaxStartups 10:30:60' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_21 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.21 + - ssh + +- name: "AUTOMATED | 5.3.22 | PATCH | Ensure SSH MaxSessions is limited" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^MaxSessions|^#MaxSessions' + line: 'MaxSessions {{ ubtu18cis_sshd.max_sessions }}' + insertafter: '^# Authentication' + notify: restart sshd + when: + - ubtu18cis_rule_5_3_22 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.22 + - ssh diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.4.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.4.x.yml new file mode 100644 index 0000000..441ace5 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.4.x.yml @@ -0,0 +1,191 @@ +--- +- name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured" + block: + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Install pam_pwquality module" + apt: + name: libpam-pwquality + state: present + + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Add minlen" + lineinfile: + path: /etc/security/pwquality.conf + regexp: '^minlen|^# minlen' + line: minlen = 14 + + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Add minclass" + lineinfile: + path: /etc/security/pwquality.conf + regexp: '^minclass|^# minclass' + line: 'minclass = 4' + + - name: "AUTOMATED | 5.4.1 | AUDIT | Ensure password creation requirements are configured | Confirm pwquality module in common-password" + command: grep 'password.*requisite.*pam_pwquality.so' /etc/pam.d/common-password + changed_when: false + failed_when: false + register: ubtu18cis_5_4_1_pam_pwquality_state + + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Set retry to 3 if pwquality exists" + pamd: + name: common-password + type: password + control: requisite + module_path: pam_pwquality.so + module_arguments: 'retry=3' + state: args_present + when: ubtu18cis_5_4_1_pam_pwquality_state.stdout | length > 0 + + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Set retry to 3 if pwquality does not exist" + pamd: + name: common-password + type: password + control: required + module_path: pam_permit.so + new_type: password + new_control: requisite + new_module_path: pam_pwquality.so + module_arguments: 'retry=3' + state: after + when: ubtu18cis_5_4_1_pam_pwquality_state.stdout | length == 0 + when: + - ubtu18cis_rule_5_4_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.4.1 + - pam + +# ------------- +# ------------- +# There is a bug in pam_tally2.so where the use of the audit keyword may log credentials in the case of user error during authentication. +# To work around this bug the CIS documentation has you setting pam_tally2 to the account section. +# Once bug is fixed please set pam_tally2 to the auth sections. We have those commented out in the task +# ------------- +# ------------- + +# ------------- +# ------------- +# figure out why pam_deny kills vagrant user +# ------------- +# ------------- +- name: "AUTOMATED | 5.4.2 | PATCH | Ensure lockout for failed password attempts is configured" + block: + - name: "AUTOMATED | 5.4.2 | AUDIT | Ensure lockout for failed password attempts is configured | Confirm pam_tally2.so module in common-password" + # command: grep 'auth.*required.*pam_tally2.so' /etc/pam.d/common-password + command: grep 'auth.*required.*pam_tally2.so' /etc/pam.d/common-account + changed_when: false + failed_when: false + register: ubtu18cis_5_4_2_pam_tally2_state + + - name: "AUTOMATED | 5.4.2 | PATCH | Ensure lockout for failed password attempts is configured | Set pam_tally2.so settings if exists" + pamd: + # name: common-auth + name: common-account + # type: auth + type: account + control: required + module_path: pam_tally2.so + module_arguments: 'onerr=fail + audit + silent + deny=5 + unlock_time=900' + when: ubtu18cis_5_4_2_pam_tally2_state.stdout | length > 0 + + - name: "AUTOMATED | 5.4.2 | PATCH | Ensure lockout for failed password attempts is configured | Set pam_tally2.so settings if does not exist" + lineinfile: + # path: /etc/pam.d/common-auth + path: /etc/pam.d/common-account + # line: 'account required pam_tally2.so onerr=fail audit silent deny=5 unlock_time=900' + line: 'account required pam_tally2.so onerr=fail audit silent deny=5 unlock_time=900' + insertafter: '^# end of pam-auth-update config' + when: ubtu18cis_5_4_2_pam_tally2_state.stdout | length == 0 + + - name: "AUTOMATED | 5.4.2 | PATCH | Ensure lockout for failed password attempts is configured | Set pam_deny.so and pam_tally.so" + lineinfile: + path: /etc/pam.d/common-account + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: '^# end of pam-auth-update config' + with_items: + # - { regexp: '^accout.*requisite.*pam_deny.so', line: 'account requisite pam_george.so' } + - { regexp: '^account.*required.*pam_tally.so', line: 'account required pam_tally.so' } + when: + - ubtu18cis_rule_5_4_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.4.2 + - pamd + - notimplemented + +- name: "AUTOMATED | 5.4.3 | PATCH | Ensure password reuse is limited" + block: + - name: "AUTOMATED | 5.4.3 | AUDIT | Ensure password reuse is limited | Confirm pam_pwhistory.so in common-password" + command: grep 'password.*required.*pam_pwhistory.so' /etc/pam.d/common-password + changed_when: false + failed_when: false + register: ubtu18cis_5_4_3_pam_pwhistory_state + + - name: "AUTOMATED | 5.4.3 | PATCH | Ensure password reuse is limited | Set remember value if pam_pwhistory exists" + pamd: + name: common-password + type: password + control: required + module_path: pam_pwhistory.so + module_arguments: 'remember={{ ubtu18cis_pamd_pwhistory_remember }}' + state: args_present + when: ubtu18cis_5_4_3_pam_pwhistory_state.stdout | length > 0 + + - name: "AUTOMATED | 5.4.3 | PATCH | Ensure password reuse is limited | Set remember value if pam_pwhistory does no exist" + lineinfile: + path: /etc/pam.d/common-password + line: 'password required pam_pwhistory.so remember={{ ubtu18cis_pamd_pwhistory_remember }}' + insertbefore: 'pam_unix.so' + when: ubtu18cis_5_4_3_pam_pwhistory_state.stdout | length == 0 + when: + - ubtu18cis_rule_5_4_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.4.3 + - pamd + +- name: "AUTOMATED | 5.4.4 | PATCH | Ensure password hashing algorithm is SHA-512" + block: + - name: "AUTOMATED | 5.4.4 | PATCH | Ensure password hashing algorithm is SHA-512 | Confirm pam_unix.so" + shell: grep -E '^\s*password\s+(\S+\s+)+pam_unix\.so\s+(\S+\s+)*sha512\s*(\S+\s*)*(\s+#.*)?$' /etc/pam.d/common-password + changed_when: false + failed_when: false + register: ubtu18cis_5_4_4_pam_unix_state + + - name: "AUTOMATED | 5.4.4 | PATCH | Ensure password hashing algorithm is SHA-512 | Set hashing if pam_unix.so exists" + pamd: + name: common-password + type: password + control: '[success=1 default=ignore]' + module_path: pam_unix.so + module_arguments: sha512 + state: args_present + when: ubtu18cis_5_4_4_pam_unix_state.stdout | length > 0 + + - name: "AUTOMATED | 5.4.4 | PATCH | Ensure password hashing algorithm is SHA-512 | Set hashing if pam_unix.so does not exist" + lineinfile: + path: /etc/pam.d/common-password + line: 'password [success=1 default=ignore] pam_unix.so sha512' + insertbefore: '^# end of pam-auth-update config' + when: ubtu18cis_5_4_4_pam_unix_state.stdout | length == 0 + when: + - ubtu18cis_rule_5_4_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.4.4 + - pamd diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.5.1.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.5.1.x.yml new file mode 100644 index 0000000..268d331 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.5.1.x.yml @@ -0,0 +1,139 @@ +--- +- name: "AUTOMATED | 5.5.1.1 | PATCH | Ensure minimum days between password changes is configured" + block: + - name: "AUTOMATED | 5.5.1.1 | PATCH | Ensure minimum days between password changes is configured | Set /etc/login.defs PASS_MIN_DAYS" + lineinfile: + path: /etc/login.defs + regexp: '^PASS_MIN_DAYS|^#PASS_MIN_DAYS' + line: 'PASS_MIN_DAYS {{ ubtu18cis_pass.min_days }}' + + - name: "AUTOMATED | 5.5.1.1 | PATCH | Ensure minimum days between password changes is configured | Set existing users PASS_MIN_DAYS" + command: chage --mindays {{ ubtu18cis_pass.min_days }} {{ item }} + failed_when: false + with_items: + - "{{ ubtu18cis_passwd| selectattr('uid', '>=', 1000) | map(attribute='id') | list }}" + when: ubtu18cis_disruption_high + when: + - ubtu18cis_rule_5_5_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.1 + - user + - login + +- name: "AUTOMATED | 5.5.1.2 | PATCH | Ensure password expiration is 365 days or less" + block: + - name: "AUTOMATED | 5.5.1.2 | PATCH | Ensure password expiration is 365 days or less | Set /etc/login.defs PASS_MAX_DAYS" + lineinfile: + path: /etc/login.defs + regexp: '^PASS_MAX_DAYS|^#PASS_MAX_DAYS' + line: 'PASS_MAX_DAYS {{ ubtu18cis_pass.max_days }}' + insertafter: '# Password aging controls' + + - name: "AUTOMATED | 5.5.1.2 | PATCH | Ensure password expiration is 365 days or less | Set existing users PASS_MAX_DAYS" + command: chage --maxdays {{ ubtu18cis_pass.max_days }} {{ item }} + failed_when: false + with_items: + - "{{ ubtu18cis_passwd| selectattr('uid', '>=', 1000) | map(attribute='id') | list }}" + when: ubtu18cis_disruption_high + when: + - ubtu18cis_rule_5_5_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.2 + - user + - login + +- name: "AUTOMATED | 5.5.1.3 | PATCH | Ensure password expiration warning days is 7 or more" + block: + - name: "AUTOMATED | 5.5.1.3 | PATCH | Ensure password expiration warning days is 7 or more | Set /etc/login.defs PASS_WARN_AGE" + lineinfile: + path: /etc/login.defs + regexp: '^PASS_WARN_AGE|^#PASS_WARN_AGE' + line: 'PASS_WARN_AGE {{ ubtu18cis_pass.warn_age }}' + + - name: "AUTOMATED | 5.5.1.3 | PATCH | Ensure password expiration warning days is 7 or more | Set existing users PASS_WARN_AGE" + command: chage --warndays {{ ubtu18cis_pass.warn_age }} {{ item }} + failed_when: false + with_items: + - "{{ ubtu18cis_passwd| selectattr('uid', '>=', 1000) | map(attribute='id') | list }}" + when: ubtu18cis_disruption_high + when: + - ubtu18cis_rule_5_5_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.3 + - user + - login + +- name: "AUTOMATED | 5.5.1.4 | PATCH | Ensure inactive password lock is 30 days or less" + block: + - name: "AUTOMATED | 5.5.1.4 | PATCH | Ensure inactive password lock is 30 days or less | Set inactive period for new users" + command: useradd -D -f {{ ubtu18cis_pass.inactive }} + failed_when: false + + - name: "AUTOMATED | 5.5.1.4 | PATCH | Ensure inactive password lock is 30 days or less | Set inactive period for existing users" + command: chage --inactive {{ ubtu18cis_pass.inactive }} {{ item }} + failed_when: false + with_items: + - "{{ ubtu18cis_passwd| selectattr('uid', '>=', 1000) | map(attribute='id') | list }}" + when: ubtu18cis_disruption_high + when: + - ubtu18cis_rule_5_5_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.4 + - user + - login + +- name: "AUTOMATED | 5.5.1.5 | PATCH | Ensure all users last password change date is in the past" + block: + - name: "AUTOMATED | 5.5.1.5 | PATCH | Ensure all users last password change date is in the past | Get current date in Unix Time" + shell: echo $(($(date --utc --date "$1" +%s)/86400)) + changed_when: false + failed_when: false + register: ubtu18cis_5_5_1_5_current_time + + - name: "AUTOMATED | 5.5.1.5 | PATCH | Ensure all users last password change date is in the past | Get list of users with last changed PW date in future" + shell: "cat /etc/shadow | awk -F: '{if($3>{{ ubtu18cis_5_5_1_5_current_time.stdout }})print$1}'" + changed_when: false + failed_when: false + register: ubtu18cis_5_5_1_5_user_list + + - name: "AUTOMATED | 5.5.1.5 | PATCH | Ensure all users last password change date is in the past | Warn about users" + debug: + msg: + - "WARNING!!!!The following accounts have the last PW change date in the future" + - "{{ ubtu18cis_5_5_1_5_user_list.stdout_lines }}" + when: ubtu18cis_5_5_1_5_user_list.stdout | length > 0 + + - name: "AUTOMATED | 5.5.1.5 | PATCH | Ensure all users last password change date is in the past | Lock accounts with furtre PW changed dates" + command: passwd --expire {{ item }} + failed_when: false + with_items: + - "{{ ubtu18cis_5_5_1_5_user_list.stdout_lines }}" + when: + - ubtu18cis_disruption_high + - ubtu18cis_5_5_1_5_user_list.stdout | length > 0 + when: + - ubtu18cis_rule_5_5_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.5 + - user + - login diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.5.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.5.x.yml new file mode 100644 index 0000000..b057cfe --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.5.x.yml @@ -0,0 +1,100 @@ +--- +- name: "AUTOMATED | 5.5.2 | PATCH | Ensure system accounts are secured" + block: + - name: "AUTOMATED | 5.5.2 | PATCH | Ensure system accounts are secured | Set system accounts to login" + user: + name: "{{ item }}" + shell: /sbin/nologin + with_items: + - "{{ ubtu18cis_passwd | selectattr('uid', '<', 1000) | map(attribute='id') | list }}" + when: + - item != "root" + - item != "sync" + - item != "shutdown" + - item != "halt" + + - name: "AUTOMATED | 5.5.2 | PATCH | Ensure system accounts are secured | Lock non-root system accounts" + user: + name: "{{ item }}" + password_lock: true + with_items: + - "{{ ubtu18cis_passwd | selectattr('uid', '<', 1000) | map(attribute='id') | list }}" + when: + - item != "root" + when: + - ubtu18cis_rule_5_5_2 + - ubtu18cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.2 + - user + - system + +- name: "AUTOMATED | 5.5.3 | PATCH | Ensure default group for the root account is GID 0" + block: + - name: "AUTOMATED | 5.5.3 | PATCH | Ensure default group for the root account is GID 0 | Set root group to GUID 0" + group: + name: root + gid: 0 + + - name: "AUTOMATED | 5.5.3 | PATCH | Ensure default group for the root account is GID 0 | Set root user to root group" + user: + name: root + group: root + when: + - ubtu18cis_rule_5_5_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.3 + - user + - system + +- name: "AUTOMATED | 5.5.4 | PATCH | Ensure default user umask is 027 or more restrictive" + lineinfile: + path: "{{ item }}" + regexp: '^umask ' + line: "umask {{ ubtu18cis_bash_umask }}" + with_items: + - /etc/bash.bashrc + - /etc/profile + when: + - ubtu18cis_rule_5_5_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.4 + - user + +- name: "AUTOMATED | 5.5.5 | PATCH | Ensure default user shell timeout is 900 seconds or less" + blockinfile: + create: yes + mode: 0644 + dest: "{{ item.dest }}" + state: "{{ item.state }}" + marker: "# {mark} ANSIBLE MANAGED" + block: | + # Set session timeout - CIS ID 5.5.5 + TMOUT={{ ubtu18cis_shell_session_timeout.timeout }} + readonly TMOUT + export TMOUT + with_items: + - { dest: "{{ ubtu18cis_shell_session_timeout.file }}", state: present } + - { dest: /etc/profile, state: "{{ (ubtu18cis_shell_session_timeout.file == '/etc/profile') | ternary('present', 'absent') }}" } + - { dest: /etc/bash.bashrc, state: present } + when: + - ubtu18cis_rule_5_5_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.5 + - user diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.6.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.6.yml new file mode 100644 index 0000000..4718bf1 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.6.yml @@ -0,0 +1,24 @@ +--- +- name: "MANUAL | 5.6 | AUDIT | Ensure root login is restricted to system console" + block: + - name: "MANUAL | 5.6 | AUDIT | Ensure root login is restricted to system console | Get list of all terminals" + command: cat /etc/securetty + changed_when: false + failed_when: false + register: ubtu18cis_5_6_terminal_list + + - name: "MANUAL | 5.6 | AUDIT | Ensure root login is restricted to system console | Message out list" + debug: + msg: + - "WARNING!!!!Below is the list of conoles with root login access" + - "Please review for any conoles that are not in a physically secure location" + - "{{ ubtu18cis_5_6_terminal_list.stdout_lines }}" + when: + - ubtu18cis_rule_5_6 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_5.6 + - user diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.7.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.7.yml new file mode 100644 index 0000000..ea9f4ff --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/cis_5.7.yml @@ -0,0 +1,39 @@ +--- +- name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted" + block: + - name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted | Check for pam_wheel.so module" + command: grep '^auth.*required.*pam_wheel' /etc/pam.d/su + changed_when: false + failed_when: false + register: ubtu18cis_5_7_pam_wheel_status + + - name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted | Create empty sugroup" + group: + name: "{{ ubtu18cis_su_group }}" + + - name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted | Set pam_wheel if exists" + pamd: + name: su + type: auth + control: required + module_path: pam_wheel.so + module_arguments: 'use_uid group={{ ubtu18cis_su_group }}' + when: ubtu18cis_5_7_pam_wheel_status.stdout | length > 0 + + - name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted | Set pam_wheel if does not exist" + lineinfile: + path: /etc/pam.d/su + regex: (#)?auth\s+required\s+pam_wheel\.so\n|(#)?auth\s+required\s+pam_wheel\.so(.*?)use_uid + line: 'auth required pam_wheel.so use_uid group={{ ubtu18cis_su_group }}' + create: yes + insertafter: '(#)?auth\s+sufficient\s+pam_rootok' + when: ubtu18cis_5_7_pam_wheel_status.stdout | length == 0 + when: + - ubtu18cis_rule_5_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.7 + - user diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/main.yml new file mode 100644 index 0000000..e88e37d --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_5/main.yml @@ -0,0 +1,24 @@ +--- +- name: "SECTION | 5.1 | Configure time-based job schedulers" + include: cis_5.1.x.yml + +- name: "SECTION | 5.2 | Configure sudo" + include: cis_5.2.x.yml + +- name: "SECTION | 5.3 | Configure SSH Server" + include: cis_5.3.x.yml + +- name: "SECTION | 5.4.x | User PAM" + include: cis_5.4.x.yml + +- name: "SECTION | 5.5.1.x | User Accounts and Enironment part 1" + include: cis_5.5.1.x.yml + +- name: "SECTION | 5.5.x | User Accounts and Enironment part 2" + include: cis_5.5.x.yml + +- name: "SECTION | 5.6 | Ensure root login is restricted to system console" + include: cis_5.6.yml + +- name: "SECTION | 5.7 | Ensure access to the su command is restricted" + include: cis_5.7.yml diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/cis_6.1.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/cis_6.1.x.yml new file mode 100644 index 0000000..7559841 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/cis_6.1.x.yml @@ -0,0 +1,348 @@ +--- +- name: "MANUAL | 6.1.1 | AUDIT | Audit system file permissions" + block: + - name: "MANUAL | 6.1.1 | AUDIT | Audit system file permissions | Register package list" + command: ls -a /bin/ + changed_when: false + failed_when: false + register: ubtu18cis_6_1_1_packages + + # - name: "MANUAL | 6.1.1 | AUDIT | Audit system file permissions | Audit the packages" + # command: dpkg --verify {{ item }} + # changed_when: false + # failed_when: false + # with_items: + # - "{{ ubtu18cis_6_1_1_packages.stdout_lines }}" + # register: ubtu18cis_6_1_1_packages_audited + + - name: "MANUAL | 6.1.1 | AUDIT | Audit system file permissions | Message out packages results for review" + debug: + msg: + - "ALERT!!!! Below are the packages that need to be reviewed." + - "You can run dpkg --verify and if nothing is returned the package is installed correctly" + - "{{ ubtu18cis_6_1_1_packages.stdout_lines }}" + when: + - ubtu18cis_rule_6_1_1 + tags: + - level2-server + - level2-workstation + - manual + - audit + - rule_6.1.1 + - permissions + +- name: "AUTOMATED | 6.1.2 | PATCH | Ensure permissions on /etc/passwd are configured" + file: + path: /etc/passwd + owner: root + group: root + mode: 0644 + when: + - ubtu18cis_rule_6_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.2 + - permissions + +- name: "AUTOMATED | 6.1.3 | PATCH | Ensure permissions on /etc/passwd- are configured" + file: + path: /etc/passwd- + owner: root + group: root + mode: 0600 + when: + - ubtu18cis_rule_6_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.3 + - permissions + +- name: "AUTOMATED | 6.1.4 | PATCH | Ensure permissions on /etc/group are configured" + file: + path: /etc/group + owner: root + group: root + mode: 0644 + when: + - ubtu18cis_rule_6_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.4 + - permissions + +- name: "AUTOMATED | 6.1.5 | PATCH | Ensure permissions on /etc/group- are configured" + file: + path: /etc/group- + owner: root + group: root + mode: 0644 + when: + - ubtu18cis_rule_6_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.5 + - permissions + +- name: "AUTOMATED | 6.1.6 | PATCH | Ensure permissions on /etc/shadow are configured" + file: + path: /etc/shadow + owner: root + group: shadow + mode: 0640 + when: + - ubtu18cis_rule_6_1_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.6 + - permissions + +- name: "AUTOMATED | 6.1.7 | PATCH | Ensure permissions on /etc/shadow- are configured" + file: + path: /etc/shadow- + owner: root + group: shadow + mode: 0600 + when: + - ubtu18cis_rule_6_1_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.7 + - permissions + +- name: "AUTOMATED | 6.1.8 | PATCH | Ensure permissions on /etc/gshadow are configured" + file: + path: /etc/gshadow + owner: root + group: shadow + mode: 0640 + when: + - ubtu18cis_rule_6_1_8 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.8 + - permissions + +- name: "AUTOMATED | 6.1.9 | PATCH | Ensure permissions on /etc/gshadow- are configured" + file: + path: /etc/gshadow- + owner: root + group: shadow + mode: 0640 + when: + - ubtu18cis_rule_6_1_9 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.9 + - permissions + +- name: "AUTOMATED | 6.1.10 | PATCH | Ensure no world writable files exist" + block: + - name: "AUTOMATED | 6.1.10 | PATCH | Ensure no world writable files exist | Get list of world-writable files" + shell: find {{ item.mount }} -xdev -type f -perm -0002 + changed_when: false + failed_when: false + register: ubtu18cis_6_1_10_wwf + with_items: + - "{{ ansible_mounts }}" + + - name: "AUTOMATED | 6.1.10 | PATCH | Ensure no world writable files exist | Adjust world-writable files if they exist" + file: + path: "{{ item }}" + mode: o-w + with_items: + - "{{ ubtu18cis_6_1_10_wwf.results | map(attribute='stdout_lines') | flatten }}" + when: ubtu18cis_no_world_write_adjust + when: + - ubtu18cis_rule_6_1_10 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.10 + - permissions + +- name: "AUTOMATED | 6.1.11 | PATCH | Ensure no unowned files or directories exist" + block: + - name: "AUTOMATED | 6.1.11 | AUDIT | Ensure no unowned files or directories exist | Get unowned files or directories" + shell: find {{ item.mount }} -xdev -nouser + changed_when: false + failed_when: false + register: ubtu18cis_6_1_11_no_user_items + with_items: + - "{{ ansible_mounts }}" + + - name: "AUTOMATED | 6.1.11 | PATCH | Ensure no unowned files or directories exist | Flatten no_user_items results for easier use" + set_fact: + ubtu18cis_6_1_11_no_user_items_flatten: "{{ ubtu18cis_6_1_11_no_user_items.results | map(attribute='stdout_lines') | flatten }}" + + - name: "AUTOMATED | 6.1.11 | AUDIT | Ensure no unowned files or directories exist | Alert on unowned files and directories" + debug: + msg: + - "ALERT!!!You have unowned files and are configured to not auto-remediate for this task" + - "Please review the files/directories below and assign an owner" + - "{{ ubtu18cis_6_1_11_no_user_items_flatten }}" + when: + - not ubtu18cis_no_owner_adjust + - ubtu18cis_6_1_11_no_user_items_flatten | length > 0 + + - name: "AUTOMATED | 6.1.11 | PATCH | Ensure no unowned files or directories exist | Set unowned files/directories to configured owner" + file: + path: "{{ item }}" + owner: "{{ ubtu18cis_unowned_owner }}" + with_items: + - "{{ ubtu18cis_6_1_11_no_user_items_flatten }}" + when: + - ubtu18cis_no_owner_adjust + - ubtu18cis_6_1_11_no_user_items_flatten | length > 0 + when: + - ubtu18cis_rule_6_1_11 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.11 + - permissions + +- name: "AUTOMATED | 6.1.12 | PATCH | Ensure no ungrouped files or directories exist" + block: + - name: "AUTOMATED | 6.1.12 | PATCH | Ensure no ungrouped files or directories exist | Get ungrouped fiels or directories" + shell: find {{ item.mount }} -xdev -nogroup + changed_when: false + failed_when: false + register: ubtu18cis_6_1_12_ungrouped_items + with_items: + - "{{ ansible_mounts }}" + + - name: "AUTOMATED | 6.1.12 | PATCH | Ensure no ungrouped files or directories exist | Flatten ungrouped_items results for easier use" + set_fact: + ubtu18cis_6_1_12_ungrouped_items_flatten: "{{ ubtu18cis_6_1_12_ungrouped_items.results | map(attribute='stdout_lines') | flatten }}" + + - name: "AUTOMATED | 6.1.12 | PATCH | Ensure no ungrouped files or directories exist | Alert on ungrouped files and directories" + debug: + msg: + - "ALERT!!!!You have ungrouped files/directories and are configured to not auto-remediate for this task" + - "Please review the files/directories below and assign a group" + - "{{ ubtu18cis_6_1_12_ungrouped_items_flatten }}" + when: + - not ubtu18cis_no_group_adjust + - ubtu18cis_6_1_12_ungrouped_items_flatten | length > 0 + + - name: "AUTOMATED | 6.1.12 | PATCH | Ensure no ungrouped files or directories exist | Set ungrouped files/directories to configured group" + file: + path: "{{ item }}" + group: "{{ ubtu18cis_ungrouped_group }}" + with_items: + - "{{ ubtu18cis_6_1_12_ungrouped_items_flatten }}" + when: + - ubtu18cis_no_group_adjust + - ubtu18cis_6_1_12_ungrouped_items_flatten | length > 0 + when: + - ubtu18cis_rule_6_1_12 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.12 + - permissions + +- name: "MANUAL | 6.1.13 | AUDIT | Audit SUID executables" + block: + - name: "MANUAL | 6.1.13 | AUDIT | Audit SUID executables | Find SUID executables" + # shell: df --local -P | awk '{if (NR!=1) print $6}' | xargs -I '{}' find '{}' -xdev -type f -perm -4000 + shell: find {{ item.mount }} -xdev -type f -perm -4000 + changed_when: false + failed_when: false + register: ubtu18cis_6_1_13_suid_executables + with_items: + - "{{ ansible_mounts }}" + + - name: "MANUAL | 6.1.13 | AUDIT | Audit SUID executables | Flatten suid_executables results for easier use" + set_fact: + ubtu18cis_6_1_13_suid_executables_flatten: "{{ ubtu18cis_6_1_13_suid_executables.results | map(attribute='stdout_lines') | flatten }}" + + - name: "MANUAL | 6.1.13 | AUDIT | Audit SUID executables | Alert SUID executables exist" + debug: + msg: + - "ALERT!!!!You have SUID executables" + - "The files are listed below, please confirm the integrity of these binaries" + - "{{ ubtu18cis_6_1_13_suid_executables_flatten }}" + when: + - ubtu18cis_6_1_13_suid_executables_flatten | length > 0 + - not ubtu18cis_suid_adjust + + - name: "MANUAL | 6.1.13 | PATCH | Audit SUID executables | Remove SUID bit" + file: + path: "{{ item }}" + mode: 'u-s' + with_items: + - "{{ ubtu18cis_6_1_13_suid_executables_flatten }}" + when: + - ubtu18cis_suid_adjust + - ubtu18cis_6_1_13_suid_executables_flatten | length > 0 + when: + - ubtu18cis_rule_6_1_13 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_6.1.13 + - permissions + +- name: "MANUAL | 6.1.14 | AUDIT | Audit SGID executables" + block: + - name: "MANUAL | 6.1.14 | PATCH | Audit SGID executables | Find SGID executables" + shell: find {{ item }} -xdev -type f -perm -2000 + changed_when: false + failed_when: false + register: ubtu18cis_6_1_14_sgid_executables + with_items: + - "{{ ansible_mounts }}" + + - name: "MANUAL | 6.1.14 | AUDIT | Audit SGID executables | Flatten sgid_executables results for easier use" + set_fact: + ubtu18cis_6_1_14_sgid_executables_flatten: "{{ ubtu18cis_6_1_14_sgid_executables.results | map(attribute='stdout_lines') | flatten }}" + + - name: "MANUAL | 6.1.14 | AUDIT | Audit SGID executables | Alert SGID executables exist" + debug: + msg: + - "ALERT!!!!You have SGID executables" + - "The files are listed below, please review the integrity of these binaries" + - "{{ ubtu18cis_6_1_14_sgid_executables_flatten }}" + when: ubtu18cis_6_1_14_sgid_executables_flatten | length > 0 + when: + - ubtu18cis_rule_6_1_14 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_6.1.14 + - permissions diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/cis_6.2.x.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/cis_6.2.x.yml new file mode 100644 index 0000000..5059e75 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/cis_6.2.x.yml @@ -0,0 +1,545 @@ +--- +- name: "AUTOMATED | 6.2.1 | AUDIT | Ensure accounts in /etc/passwd use shadowed passwords" + block: + - name: "AUTOMATED | 6.2.1 | AUDIT | Ensure accounts in /etc/passwd use shadowed passwords | Get users not using shadowed passwords" + command: awk -F':' '($2 != "x" ) { print $1}' /etc/passwd + changed_when: false + failed_when: false + register: ubtu18cis_6_2_1_nonshadowed_users + + - name: "AUTOMATED | 6.2.1 | AUDIT | Ensure accounts in /etc/passwd use shadowed passwords | Alert on findings" + debug: + msg: + - "ALERT! You have users that are not using a shadowed password. Please convert the below accounts to use a shadowed password" + - "{{ ubtu18cis_6_2_1_nonshadowed_users.stdout_lines }}" + when: + - ubtu18cis_6_2_1_nonshadowed_users.stdout | length > 0 + when: + - ubtu18cis_rule_6_2_1 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.1 + - user_accounts + +- name: "AUTOMATED | 6.2.2 | PATCH | Ensure password fields are not empty" + block: + - name: "AUTOMATED | 6.2.2 | AUDIT | Ensure password fields are not empty | Find users with no password" + command: awk -F":" '($2 == "" ) { print $1 }' /etc/shadow + changed_when: false + failed_when: false + check_mode: false + register: ubtu18cis_6_2_2_empty_password_acct + + - name: "AUTOMATED | 6.2.2 | PATCH | Ensure password fields are not empty | Lock users with empty password" + user: + name: "{{ item }}" + password_lock: yes + with_items: + - "{{ ubtu18cis_6_2_1_empty_password_acct.stdout_lines }}" + when: ubtu18cis_6_2_2_empty_password_acct.stdout | length > 0 + when: + - ubtu18cis_rule_6_2_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.2 + - user + - permissions + +- name: "AUTOMATED | 6.2.3 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group" + block: + - name: "AUTOMATED | 6.2.3 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Check /etc/passwd entries" + shell: pwck -r | grep 'no group' | awk '{ gsub("[:\47]",""); print $2}' + changed_when: false + failed_when: false + register: ubtu18cis_6_2_3_passwd_gid_check + + - name: "AUTOMATED | 6.2.3 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Print message that all groups match between passwd and group files" + debug: + msg: "Good News! There are no users that have non-existent GUIDs (Groups)" + when: ubtu18cis_6_2_3_passwd_gid_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.3 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Print warning about users with invalid GIDs missing GID entries in /etc/group" + debug: + msg: "WARNING!!!! The following users have non-existent GIDs (Groups): {{ ubtu18cis_6_2_15_passwd_gid_check.stdout_lines | join (', ') }}" + when: ubtu18cis_6_2_3_passwd_gid_check.stdout | length > 0 + when: + - ubtu18cis_rule_6_2_3 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.3 + - groups + +- name: "AUTOMATED | 6.2.4 | PATCH | Ensure all users' home directories exist" + block: + - name: capture audit task for missing homedirs + block: &u18s_homedir_audit + - name: "AUTOMATED | 6.2.4 | PATCH | Ensure all users' home directories exist | Find users missing home directories" + shell: pwck -r | grep -P {{ ld_regex | quote }} + check_mode: false + register: ubtu18cis_users_missing_home + changed_when: ubtu18cis_6_2_4_audit | length > 0 + # failed_when: 0: success, 1: no grep match, 2: pwck found something + failed_when: ubtu18cis_users_missing_home.rc not in [0,1,2] + + ### NOTE: due to https://github.com/ansible/ansible/issues/24862 This is a shell command, and is quite frankly less than ideal. + - name: "AUTOMATED | 6.2.4 | PATCH | Ensure all users' home directories exist| Creates home directories" + command: "mkhomedir_helper {{ item }}" + # check_mode: "{{ ubtu18cis_disruptive_check_mode }}" + with_items: "{{ ubtu18cis_6_2_4_audit | map(attribute='id') | list }}" + when: + - ubtu18cis_users_missing_home is changed + - ubtu18cis_disruption_high + + ### NOTE: Now we need to address that SELINUX will not let mkhomedir_helper create home directories for UUID < 500, so the ftp user will still show up in a pwck. Not sure this is needed, I need to confirm if that user is removed in an earlier task. + ### ^ Likely doesn't matter as 6.2.7 defines "local interactive users" as those w/ uid 1000-4999 + - name: replay audit task + block: *u18s_homedir_audit + + # CAUTION: debug loops don't show changed since 2.4: + # Fix: https://github.com/ansible/ansible/pull/59958 + - name: "AUTOMATED | 6.2.4 | PATCH | Ensure all users' home directories exist | Alert about correcting owner and group" + debug: msg="You will need to mkdir -p {{ item }} and chown properly to the correct owner and group." + with_items: "{{ ubtu18cis_6_2_4_audit | map(attribute='dir') | list }}" + changed_when: ubtu18cis_audit_complex + when: + - ubtu18cis_users_missing_home is changed + vars: + ld_regex: >- + ^user '(?P.*)': directory '(?P.*)' does not exist$ + ld_users: "{{ ubtu18cis_users_missing_home.stdout_lines | map('regex_replace', ld_regex, '\\g') | list }}" + ubtu18cis_6_2_4_audit: "{{ ubtu18cis_passwd | selectattr('uid', '>=', 1000) | selectattr('id', 'in', ld_users) | list }}" + when: + - ubtu18cis_rule_6_2_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.4 + - user + +- name: "AUTOMATED | 6.2.5 | PATCH | Ensure users own their home directories" + file: + path: "{{ item.dir }}" + owner: "{{ item.id }}" + state: directory + with_items: "{{ ubtu18cis_passwd }}" + loop_control: + label: "{{ ubtu18cis_passwd_label }}" + when: + - ubtu18cis_rule_6_2_5 + - item.uid >= ubtu18cis_int_gid + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.5 + - user + +- name: "AUTOMATED | 6.2.6 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + block: + - name: "AUTOMATED | 6.2.6 | AUDIT | Ensure users' home directories permissions are 750 or more restrictive | Stat home directories" + stat: + path: "{{ item }}" + with_items: "{{ ubtu18cis_passwd | selectattr('uid', '>=', 1000) | selectattr('uid', '!=', 65534) | map(attribute='dir') | list }}" + register: ubtu18cis_6_2_6_audit + + - name: "AUTOMATED | 6.2.6 | AUDIT | Ensure users' home directories permissions are 750 or more restrictive | Find home directories more 750" + command: find -H {{ item.0 | quote }} -not -type l -perm /027 + register: ubtu18cis_6_2_6_patch_audit + changed_when: ubtu18cis_6_2_6_patch_audit.stdout | length > 0 + when: + - item.1.exists + with_together: + - "{{ ubtu18cis_6_2_6_audit.results | map(attribute='item') | list }}" + - "{{ ubtu18cis_6_2_6_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + - name: "AUTOMATED | 6.2.6 | PATCH | Ensure users' home directories permissions are 750 or more restrictive | Set home perms" + file: + path: "{{ item.0 }}" + recurse: yes + mode: a-st,g-w,o-rwx + register: ubtu18cis_6_2_6_patch + when: + - ubtu18cis_disruption_high + - item.1.exists + with_together: + - "{{ ubtu18cis_6_2_6_audit.results | map(attribute='item') | list }}" + - "{{ ubtu18cis_6_2_6_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + # set default ACLs so the homedir has an effective umask of 0027 + - name: "AUTOMATED | 6.2.6 | PATCH | Ensure users' home directories permissions are 750 or more restrictive | Set ACL's" + acl: + path: "{{ item.0 }}" + default: yes + state: present + recursive: yes + etype: "{{ item.1.etype }}" + permissions: "{{ item.1.mode }}" + when: not ubtu18cis_system_is_container + with_nested: + - "{{ (ansible_check_mode | ternary(ubtu18cis_6_2_6_patch_audit, ubtu18cis_6_2_6_patch)).results | + rejectattr('skipped', 'defined') | map(attribute='item') | map('first') | list }}" + - + - etype: group + mode: rx + - etype: other + mode: '0' + when: + - ubtu18cis_rule_6_2_6 + - ubtu18cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.6 + - user + +- name: "AUTOMATED | 6.2.7 | PATCH | Ensure users' dot files are not group or world writable" + block: + - name: "AUTOMATED | 6.2.7 | AUDIT | Ensure users' dot files are not group or world-writable | Check for files" + shell: find /home/ -name "\.*" -perm /g+w,o+w + changed_when: no + failed_when: no + register: ubtu18cis_6_2_7_audit + + - name: "AUTOMATED | 6.2.7 | AUDIT | Ensure users' dot files are not group or world-writable | Alert on no files found" + debug: + msg: "Good news! We have not found any group or world-writable dot files on your sytem" + failed_when: false + changed_when: false + when: + - ubtu18cis_6_2_7_audit.stdout | length == 0 + + - name: "AUTOMATED | 6.2.7 | PATCH | Ensure users' dot files are not group or world-writable | Changes files if configured" + file: + path: '{{ item }}' + mode: go-w + with_items: "{{ ubtu18cis_6_2_7_audit.stdout_lines }}" + when: + - ubtu18cis_6_2_7_audit.stdout | length > 0 + - ubtu18cis_dotperm_ansibleManaged + + - name: "AUTOMATED | 6.2.7 | AUDIT | Ensure users' dot files are not group or world-writable | Alert on files found" + debug: + msg: + - "ALERT! You have group or world-writable files on the system and do not have automation changing the permissions" + - "Please review the files below and adjust permissions to remove group and world writable options" + - "{{ ubtu18cis_6_2_7_audit.stdout_lines }}" + when: + - ubtu18cis_6_2_7_audit.stdout | length > 0 + - not ubtu18cis_dotperm_ansibleManaged + when: + - ubtu18cis_rule_6_2_7 + - ubtu18cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.7 + - permissions + +- name: "AUTOMATED | 6.2.8 | PATCH | Ensure no users have .netrc files" + file: + dest: "~{{ item }}/.netrc" + state: absent + with_items: + - "{{ ubtu18cis_users.stdout_lines }}" + when: + - ubtu18cis_rule_6_2_8 + - ubtu18cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.8 + - user + +- name: "AUTOMATED | 6.2.9 | PATCH | Ensure no users have .forward files" + file: + dest: "~{{ item }}/.forward" + state: absent + with_items: + - "{{ ubtu18cis_users.stdout_lines }}" + when: + - ubtu18cis_rule_6_2_9 + - ubtu18cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.9 + - user + +- name: "AUTOMATED | 6.2.10 | PATCH | Ensure no users have .rhosts files" + file: + dest: "~{{ item }}/.rhosts" + state: absent + with_items: + - "{{ ubtu18cis_users.stdout_lines }}" + when: + - ubtu18cis_rule_6_2_10 + - ubtu18cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.10 + - user + +- name: "AUTOMATED | 6.2.11 | PATCH | Ensure root is the only UID 0 account" + block: + - name: "AUTOMATED | 6.2.11 | AUDIT | Ensure root is the only UID 0 account | Get non-root users with UID of 0" + shell: awk -F":" '($3 == 0 && $1 != \"root\") {i++;print $1 }' /etc/passwd + changed_when: false + failed_when: false + register: ubtu18cis_6_2_11_uid_0_notroot + + - name: "AUTOMATED | 6.2.11 | PATCH | Ensure root is the only UID 0 account | Lock UID 0 non-root users" + user: + name: "{{ item }}" + password_lock: yes + with_items: + - "{{ ubtu18cis_6_2_11_uid_0_notroot.stdout_lines }}" + when: + - ubtu18cis_disruption_high + - ubtu18cis_6_2_11_uid_0_notroot.stdout | length > 0 + + - name: "AUTOMATED | 6.2.11 | AUDIT | Ensure root is the only UID 0 account | Alert about non-root accounts with UID 0" + debug: + msg: + - "ALERT!!!! You have non-root users with a UID of 0 and ubtu18cis_disruption_high enabled" + - "This means the following accounts were password locked and will need to have the UID's manually adjusted" + - "{{ ubtu18cis_6_2_11_uid_0_notroot.stdout_lines }}" + when: + - not ubtu18cis_disruption_high + - ubtu18cis_6_2_11_uid_0_notroot.stdout | length > 0 + when: + - ubtu18cis_rule_6_2_11 + tags: + - level1-server + - level1-workstation + - patch + - automated + - rule_6.2.11 + - user + - root + +- name: "AUTOMATED | 6.2.12 | PATCH | Ensure root PATH Integrity" + block: + - name: "AUTOMATED | 6.2.12 | AUDIT | Ensure root PATH Integrity | Determine empty value" + shell: 'echo $PATH | grep ::' + changed_when: False + failed_when: ubtu18cis_6_2_12_path_empty.rc == 0 + register: ubtu18cis_6_2_12_path_empty + + - name: "AUTOMATED | 6.2.12 | AUDIT | Ensure root PATH Integrity | Determine colon end" + shell: 'echo $PATH | grep :$' + changed_when: False + failed_when: ubtu18cis_6_2_12_path_colon_end.rc == 0 + register: ubtu18cis_6_2_12_path_colon_end + + - name: "AUTOMATED | 6.2.12 | AUDIT | Ensure root PATH Integrity | Determine working dir" + shell: echo "$PATH" + changed_when: False + failed_when: '"." in ubtu18cis_6_2_12_working_dir.stdout_lines' + register: ubtu18cis_6_2_12_working_dir + + - name: "AUTOMATED | 6.2.12 | AUDIT | Ensure root PATH Integrity | Check paths" + stat: + path: "{{ item }}" + register: ubtu18cis_6_2_12_path_stat + with_items: + - "{{ ubtu18cis_6_2_12_working_dir.stdout.split(':') }}" + + - name: "AUTOMATED | 6.2.12 | AUDIT | Ensure root PATH Integrity | Alert on empty value, colon end, and no working dir" + debug: + msg: + - "The following paths have no working directory: {{ ubtu18cis_6_2_12_path_stat.results | selectattr('stat.exists','equalto','false') | map(attribute='item') | list }}" + + - name: "AUTOMATED | 6.2.12 | PATCH | Ensure root PATH Integrity | Set permissions" + file: + path: "{{ item }}" + owner: root + mode: 'o-w,g-w' + state: directory + with_items: + - "{{ ubtu18cis_6_2_12_working_dir.stdout.split(':') }}" + when: + - ubtu18cis_disruption_high + when: + - ubtu18cis_rule_6_2_12 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.12 + - user + - root + - notimplemented + +- name: "AUTOMATED | 6.2.13 | AUDIT | Ensure no duplicate UIDs exist" + block: + - name: "AUTOMATED | 6.2.13 | AUDIT | Ensure no duplicate UIDs exist | Check for duplicate UIDs" + shell: "pwck -r | awk -F: '{if ($3 in uid) print $1 ; else uid[$3]}' /etc/passwd" + changed_when: false + failed_when: false + register: ubtu18cis_6_2_13_user_uid_check + + - name: "AUTOMATED | 6.2.13 | AUDIT | Ensure no duplicate UIDs exist | Print message that no duplicate UIDs exist" + debug: + msg: "Good News! There are no duplicate UID's in the system" + when: ubtu18cis_6_2_13_user_uid_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.13 | AUDIT | Ensure no duplicate UIDs exist | Print warning about users with duplicate UIDs" + debug: + msg: "Warning!!!! The following users have UIDs that are duplicates: {{ ubtu18cis_6_2_13_user_uid_check.stdout_lines }}" + when: ubtu18cis_6_2_13_user_uid_check.stdout | length > 0 + when: + - ubtu18cis_rule_6_2_13 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.13 + - user + +- name: "AUTOMATED | 6.2.14 | AUDIT | Ensure no duplicate GIDs exist" + block: + - name: "AUTOMATED | 6.2.14 | AUDIT | Ensure no duplicate GIDs exist | Check for duplicate GIDs" + shell: "pwck -r | awk -F: '{if ($3 in users) print $1 ; else users[$3]}' /etc/group" + changed_when: false + failed_when: false + register: ubtu18cis_6_2_14_user_check + + - name: "AUTOMATED | 6.2.14 | AUDIT | Ensure no duplicate GIDs exist | Print message that no duplicate GID's exist" + debug: + msg: "Good News! There are no duplicate GIDs in the system" + when: ubtu18cis_6_2_14_user_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.14 | AUDIT | Ensure no duplicate GIDs exist | Print warning about users with duplicate GIDs" + debug: + msg: "Warning: The following groups have duplicate GIDs: {{ ubtu18cis_6_2_14_user_check.stdout_lines }}" + when: ubtu18cis_6_2_14_user_check.stdout | length > 0 + when: + - ubtu18cis_rule_6_2_14 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.14 + - groups + +- name: "AUTOMATED | 6.2.15 | AUDIT | Ensure no duplicate user names exist" + block: + - name: "AUTOMATED | 6.2.15 | AUDIT | Ensure no duplicate user names exist | Check for duplicate User Names" + shell: "pwck -r | awk -F: '{if ($1 in users) print $1 ; else users[$1]}' /etc/passwd" + changed_when: no + failed_when: no + register: ubtu18cis_6_2_15_user_username_check + + - name: "AUTOMATED | 6.2.15 | AUDIT | Ensure no duplicate user names exist | Print message that no duplicate user names exist" + debug: + msg: "Good News! There are no duplicate user names in the system" + when: ubtu18cis_6_2_15_user_username_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.15 | AUDIT | Ensure no duplicate user names exist | Print warning about users with duplicate User Names" + debug: + msg: "Warning: The following user names are duplicates: {{ ubtu18cis_6_2_15_user_username_check.stdout_lines }}" + when: ubtu18cis_6_2_15_user_username_check.stdout | length > 0 + when: + - ubtu18cis_rule_6_2_15 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.15 + - user + +- name: "AUTOMATED | 6.2.16 | AUDIT | Ensure no duplicate group names exist" + block: + - name: "AUTOMATED | 6.2.16 | AUDIT | Ensure no duplicate group names exist | Check for duplicate group names" + shell: 'getent passwd | cut -d: -f1 | sort -n | uniq -d' + changed_when: false + failed_when: false + register: ubtu18cis_6_2_16_group_group_check + + - name: "AUTOMATED | 6.2.16 | AUDIT | Ensure no duplicate group names exist | Print message that no duplicate groups exist" + debug: + msg: "Good News! There are no duplicate group names in the system" + when: ubtu18cis_6_2_16_group_group_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.16 | AUDIT | Ensure no duplicate group names exist | Print warning about users with duplicate group names" + debug: + msg: "Warning: The following group names are duplicates: {{ ubtu18cis_6_2_16_group_group_check.stdout_lines }}" + when: ubtu18cis_6_2_16_group_group_check.stdout | length > 0 + when: + - ubtu18cis_rule_6_2_16 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.16 + - groups + +- name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty" + block: + - name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty | Get Shadow GID" + shell: grep ^shadow /etc/group | cut -f3 -d":" + changed_when: false + failed_when: false + register: ubtu18cis_6_2_17_shadow_gid + + - name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty | List of users with Shadow GID" + shell: awk -F":" '($4 == "{{ ubtu18cis_6_2_17_shadow_gid.stdout }}") { print }' /etc/passwd | cut -f1 -d":" + changed_when: false + failed_when: false + register: ubtu18cis_6_2_17_users_shadow_gid + + - name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty | Message on no users" + debug: + msg: "Good News! There are no users with the Shado GID on your system" + when: ubtu18cis_6_2_17_users_shadow_gid.stdout | length == 0 + + - name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty | Message on users with Shadow GID" + debug: + msg: + - "WARNING!!!! There are users that are in the Shadow group" + - "To conform to CIS standards no users should be in this group" + - "Please move the users below into another group" + - "{{ ubtu18cis_6_2_17_users_shadow_gid.stdout_lines }}" + when: ubtu18cis_6_2_17_users_shadow_gid.stdout | length > 0 + when: + - ubtu18cis_rule_6_2_17 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.17 + - groups + - user diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/main.yml new file mode 100644 index 0000000..e3a3fbe --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tasks/section_6/main.yml @@ -0,0 +1,6 @@ +--- +- name: "SECTION | 6.1 | System File Permissions" + include: cis_6.1.x.yml + +- name: "SECTION | 6.2 | User and Group Settings" + include: cis_6.2.x.yml diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/.DS_Store b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..0347e7f205bc594968a0b944d1c2580701ff6e94 GIT binary patch literal 6148 zcmeHK!HN?>5UozKZYLt<;G*8<;%$WlgNuiSF^-5gVMGrqyEB=v8JJ9m%*5cv$oL!j zHy-@~Kg1ufs=FK$*Y&a>xKu&+tFEr9&b)5AQvjfbi=+?G1ptkWu+hfu4?^?Q&PanT z6sK&CGATw`Y)pnjk^g}K+PfWyU;zoFu(f|1=C|QI&rE)T&a|^l7()r8cv;Ni%*1H< z8qcz{D0{tcs?luiT-nvSqdV6+Z^DULhGkSPieWT=MWbg?If+?+jz^`|qxeNSULJLC zJTiG1rTKVngVSV;F0Wsvd197Bv&fUe<_X<|)@|KB>fT(Ccno_n%SY@7p}gY|2jh;mAy9wR`P6H9^2+ z=#RnoCwr$~9{x%-HOXIQXOr`?{zm-9vZ4wD!hkSv0S5RA)K@Q%LqdfCVc>EyK=*@( zjnHST9NMb`J6i%Ew$N+^b*d#O#$)suD~C9PqFgAV3sqcVC>M@)kNtec%ApGf#l?r> zl@(Vg%C3(2J?jq2cPPCuAPj6X&~(Etz5kETum87$WF!m-1DA>c)e6F3h@Zr7>zR+E wx7NdUjE#i+Du;ClD)uQxUwVpfVY7mEj}%0ov2utO6!{}yX^>7B_@@l~04fi6k^lez literal 0 HcmV?d00001 diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/ansible_vars_goss.yml.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/ansible_vars_goss.yml.j2 new file mode 100644 index 0000000..64f2f0b --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/ansible_vars_goss.yml.j2 @@ -0,0 +1,487 @@ +## metadata for Audit benchmark +benchmark_version: '2.1.0' + +# Some audit tests may need to scan every filesystem or have an impact on a system +# these may need be scheduled to minimise impact also ability to set a timeout if taking too long +run_heavy_tests: {{ audit_run_heavy_tests }} +timeout_ms: {{ audit_cmd_timeout }} + + +ubuntu18cis_section1: true +ubuntu18cis_section2: true +ubuntu18cis_section3: true +ubuntu18cis_section4: true +ubuntu18cis_section5: true +ubuntu18cis_section6: true + +ubuntu18cis_level_1: true +ubuntu18cis_level_2: true + +ubuntu18cis_apparmor_disable: true + +# to enable rules that may have IO impact on a system e.g. full filesystem scans or CPU heavy +run_heavy_tests: true + +# True is BIOS based system else set to false +ubuntu18_legacy_boot: true + +ubuntu18_set_boot_pass: true + +# These variables correspond with the CIS rule IDs or paragraph numbers defined in +# the CIS benchmark documents. +# PLEASE NOTE: These work in coordination with the section # group variables and tags. +# You must enable an entire section in order for the variables below to take effect. +# Section 1 rules +ubuntu18cis_rule_1_1_1_1: {{ ubtu18cis_rule_1_1_1_1 }} +ubuntu18cis_rule_1_1_1_2: {{ ubtu18cis_rule_1_1_1_2 }} +ubuntu18cis_rule_1_1_1_3: {{ ubtu18cis_rule_1_1_1_3 }} +ubuntu18cis_rule_1_1_1_4: {{ ubtu18cis_rule_1_1_1_4 }} +ubuntu18cis_rule_1_1_1_5: {{ ubtu18cis_rule_1_1_1_5 }} +ubuntu18cis_rule_1_1_1_6: {{ ubtu18cis_rule_1_1_1_6 }} +ubuntu18cis_rule_1_1_2: {{ ubtu18cis_rule_1_1_2 }} +ubuntu18cis_rule_1_1_3: {{ ubtu18cis_rule_1_1_3 }} +ubuntu18cis_rule_1_1_4: {{ ubtu18cis_rule_1_1_4 }} +ubuntu18cis_rule_1_1_5: {{ ubtu18cis_rule_1_1_5 }} +ubuntu18cis_rule_1_1_6: {{ ubtu18cis_rule_1_1_6 }} +ubuntu18cis_rule_1_1_7: {{ ubtu18cis_rule_1_1_7 }} +ubuntu18cis_rule_1_1_8: {{ ubtu18cis_rule_1_1_8 }} +ubuntu18cis_rule_1_1_9: {{ ubtu18cis_rule_1_1_9 }} +ubuntu18cis_rule_1_1_10: {{ ubtu18cis_rule_1_1_10 }} +ubuntu18cis_rule_1_1_11: {{ ubtu18cis_rule_1_1_11 }} +ubuntu18cis_rule_1_1_12: {{ ubtu18cis_rule_1_1_12 }} +ubuntu18cis_rule_1_1_13: {{ ubtu18cis_rule_1_1_13 }} +ubuntu18cis_rule_1_1_14: {{ ubtu18cis_rule_1_1_14 }} +ubuntu18cis_rule_1_1_15: {{ ubtu18cis_rule_1_1_15 }} +ubuntu18cis_rule_1_1_16: {{ ubtu18cis_rule_1_1_16 }} +ubuntu18cis_rule_1_1_17: {{ ubtu18cis_rule_1_1_17 }} +ubuntu18cis_rule_1_1_18: {{ ubtu18cis_rule_1_1_18 }} +ubuntu18cis_rule_1_1_19: {{ ubtu18cis_rule_1_1_19 }} +ubuntu18cis_rule_1_1_20: {{ ubtu18cis_rule_1_1_20 }} +ubuntu18cis_rule_1_1_21: {{ ubtu18cis_rule_1_1_21 }} +ubuntu18cis_rule_1_1_22: {{ ubtu18cis_rule_1_1_22 }} +ubuntu18cis_rule_1_1_23: {{ ubtu18cis_rule_1_1_23 }} +ubuntu18cis_rule_1_1_24: {{ ubtu18cis_rule_1_1_24 }} +ubuntu18cis_rule_1_2_1: {{ ubtu18cis_rule_1_2_1 }} +ubuntu18cis_rule_1_2_2: {{ ubtu18cis_rule_1_2_2 }} +ubuntu18cis_rule_1_3_1: {{ ubtu18cis_rule_1_3_1 }} +ubuntu18cis_rule_1_3_2: {{ ubtu18cis_rule_1_3_2 }}= +ubuntu18cis_rule_1_4_1: {{ ubtu18cis_rule_1_4_1 }} +ubuntu18cis_rule_1_4_2: {{ ubtu18cis_rule_1_4_2 }} +ubuntu18cis_rule_1_4_3: {{ ubtu18cis_rule_1_4_3 }} +ubuntu18cis_rule_1_4_4: {{ ubtu18cis_rule_1_4_4 }} +ubuntu18cis_rule_1_5_1: {{ ubtu18cis_rule_1_5_1 }} +ubuntu18cis_rule_1_5_2: {{ ubtu18cis_rule_1_5_2 }} +ubuntu18cis_rule_1_5_3: {{ ubtu18cis_rule_1_5_3 }} +ubuntu18cis_rule_1_5_4: {{ ubtu18cis_rule_1_5_4 }} +ubuntu18cis_rule_1_6_1_1: {{ ubtu18cis_rule_1_6_1_1 }} +ubuntu18cis_rule_1_6_1_2: {{ ubtu18cis_rule_1_6_1_2 }} +ubuntu18cis_rule_1_6_1_3: {{ ubtu18cis_rule_1_6_1_3 }} +ubuntu18cis_rule_1_6_1_4: {{ ubtu18cis_rule_1_6_1_4 }} +ubuntu18cis_rule_1_7_1: {{ ubtu18cis_rule_1_7_1 }} +ubuntu18cis_rule_1_7_2: {{ ubtu18cis_rule_1_7_2 }} +ubuntu18cis_rule_1_7_3: {{ ubtu18cis_rule_1_7_3 }} +ubuntu18cis_rule_1_7_4: {{ ubtu18cis_rule_1_7_4 }} +ubuntu18cis_rule_1_7_5: {{ ubtu18cis_rule_1_7_5 }} +ubuntu18cis_rule_1_7_6: {{ ubtu18cis_rule_1_7_6 }} +ubuntu18cis_rule_1_8_1: {{ ubtu18cis_rule_1_8_1 }} +ubuntu18cis_rule_1_8_2: {{ ubtu18cis_rule_1_8_2 }} +ubuntu18cis_rule_1_8_3: {{ ubtu18cis_rule_1_8_3 }} +ubuntu18cis_rule_1_8_4: {{ ubtu18cis_rule_1_8_4 }} +ubuntu18cis_rule_1_9: {{ ubtu18cis_rule_1_9 }} + +# section 2 rules + +ubuntu18cis_rule_2_1_1_1: {{ ubtu18cis_rule_2_1_1_1 }} +ubuntu18cis_rule_2_1_1_2: {{ ubtu18cis_rule_2_1_1_2 }} +ubuntu18cis_rule_2_1_1_3: {{ ubtu18cis_rule_2_1_1_3 }} +ubuntu18cis_rule_2_1_1_4: {{ ubtu18cis_rule_2_1_1_4 }} +ubuntu18cis_rule_2_1_2: {{ ubtu18cis_rule_2_1_2 }} +ubuntu18cis_rule_2_1_3: {{ ubtu18cis_rule_2_1_3 }} +ubuntu18cis_rule_2_1_4: {{ ubtu18cis_rule_2_1_4 }} +ubuntu18cis_rule_2_1_5: {{ ubtu18cis_rule_2_1_5 }} +ubuntu18cis_rule_2_1_6: {{ ubtu18cis_rule_2_1_6 }} +ubuntu18cis_rule_2_1_7: {{ ubtu18cis_rule_2_1_7 }} +ubuntu18cis_rule_2_1_8: {{ ubtu18cis_rule_2_1_8 }} +ubuntu18cis_rule_2_1_9: {{ ubtu18cis_rule_2_1_9 }} +ubuntu18cis_rule_2_1_10: {{ ubtu18cis_rule_2_1_10 }} +ubuntu18cis_rule_2_1_11: {{ ubtu18cis_rule_2_1_11 }} +ubuntu18cis_rule_2_1_12: {{ ubtu18cis_rule_2_1_12 }} +ubuntu18cis_rule_2_1_13: {{ ubtu18cis_rule_2_1_13 }} +ubuntu18cis_rule_2_1_14: {{ ubtu18cis_rule_2_1_14 }} +ubuntu18cis_rule_2_1_15: {{ ubtu18cis_rule_2_1_15 }} +ubuntu18cis_rule_2_1_16: {{ ubtu18cis_rule_2_1_16 }} +ubuntu18cis_rule_2_1_17: {{ ubtu18cis_rule_2_1_17 }} +ubuntu18cis_rule_2_2_1: {{ ubtu18cis_rule_2_2_1 }} +ubuntu18cis_rule_2_2_2: {{ ubtu18cis_rule_2_2_2 }} +ubuntu18cis_rule_2_2_3: {{ ubtu18cis_rule_2_2_3 }} +ubuntu18cis_rule_2_2_4: {{ ubtu18cis_rule_2_2_4 }} +ubuntu18cis_rule_2_2_5: {{ ubtu18cis_rule_2_2_5 }} +ubuntu18cis_rule_2_2_6: {{ ubtu18cis_rule_2_2_6 }} +ubuntu18cis_rule_2_3: {{ ubtu18cis_rule_2_3 }} + +# Section 3 rules +ubuntu18cis_rule_3_1_1: {{ ubtu18cis_rule_3_1_1 }} +ubuntu18cis_rule_3_1_2: {{ ubtu18cis_rule_3_1_2 }} +ubuntu18cis_rule_3_2_1: {{ ubtu18cis_rule_3_2_1 }} +ubuntu18cis_rule_3_2_2: {{ ubtu18cis_rule_3_2_2 }} +ubuntu18cis_rule_3_3_1: {{ ubtu18cis_rule_3_3_1 }} +ubuntu18cis_rule_3_3_2: {{ ubtu18cis_rule_3_3_2 }} +ubuntu18cis_rule_3_3_3: {{ ubtu18cis_rule_3_3_3 }} +ubuntu18cis_rule_3_3_4: {{ ubtu18cis_rule_3_3_4 }} +ubuntu18cis_rule_3_3_5: {{ ubtu18cis_rule_3_3_5 }} +ubuntu18cis_rule_3_3_6: {{ ubtu18cis_rule_3_3_6 }} +ubuntu18cis_rule_3_3_7: {{ ubtu18cis_rule_3_3_7 }} +ubuntu18cis_rule_3_3_8: {{ ubtu18cis_rule_3_3_8 }} +ubuntu18cis_rule_3_3_9: {{ ubtu18cis_rule_3_3_9 }} +ubuntu18cis_rule_3_4_1: {{ ubtu18cis_rule_3_4_1 }} +ubuntu18cis_rule_3_4_2: {{ ubtu18cis_rule_3_4_2 }} +ubuntu18cis_rule_3_4_3: {{ ubtu18cis_rule_3_4_3 }} +ubuntu18cis_rule_3_4_4: {{ ubtu18cis_rule_3_4_4 }} +# UFW +ubuntu18cis_rule_3_5_1_1: {{ ubtu18cis_rule_3_5_1_1 }} +ubuntu18cis_rule_3_5_1_2: {{ ubtu18cis_rule_3_5_1_2 }} +ubuntu18cis_rule_3_5_1_3: {{ ubtu18cis_rule_3_5_1_3 }} +ubuntu18cis_rule_3_5_1_4: {{ ubtu18cis_rule_3_5_1_4 }} +ubuntu18cis_rule_3_5_1_5: {{ ubtu18cis_rule_3_5_1_5 }} +ubuntu18cis_rule_3_5_1_6: {{ ubtu18cis_rule_3_5_1_6 }} +ubuntu18cis_rule_3_5_1_7: {{ ubtu18cis_rule_3_5_1_7 }} +# NFTables +ubuntu18cis_rule_3_5_2_1: {{ ubtu18cis_rule_3_5_2_1 }} +ubuntu18cis_rule_3_5_2_2: {{ ubtu18cis_rule_3_5_2_2 }} +ubuntu18cis_rule_3_5_2_3: {{ ubtu18cis_rule_3_5_2_3 }} +ubuntu18cis_rule_3_5_2_4: {{ ubtu18cis_rule_3_5_2_4 }} +ubuntu18cis_rule_3_5_2_5: {{ ubtu18cis_rule_3_5_2_5 }} +ubuntu18cis_rule_3_5_2_6: {{ ubtu18cis_rule_3_5_2_6 }} +ubuntu18cis_rule_3_5_2_7: {{ ubtu18cis_rule_3_5_2_7 }} +ubuntu18cis_rule_3_5_2_8: {{ ubtu18cis_rule_3_5_2_8 }} +ubuntu18cis_rule_3_5_2_9: {{ ubtu18cis_rule_3_5_2_9 }} +ubuntu18cis_rule_3_5_2_10: {{ ubtu18cis_rule_3_5_2_10 }} +# IPTables +ubuntu18cis_rule_3_5_3_1_1: {{ ubtu18cis_rule_3_5_3_1_1 }} +ubuntu18cis_rule_3_5_3_1_2: {{ ubtu18cis_rule_3_5_3_1_2 }} +ubuntu18cis_rule_3_5_3_1_3: {{ ubtu18cis_rule_3_5_3_1_3 }} +ubuntu18cis_rule_3_5_3_2_1: {{ ubtu18cis_rule_3_5_3_2_1 }} +ubuntu18cis_rule_3_5_3_2_2: {{ ubtu18cis_rule_3_5_3_2_2 }} +ubuntu18cis_rule_3_5_3_2_3: {{ ubtu18cis_rule_3_5_3_2_3 }} +ubuntu18cis_rule_3_5_3_2_4: {{ ubtu18cis_rule_3_5_3_2_4 }} +ubuntu18cis_rule_3_5_3_3_1: {{ ubtu18cis_rule_3_5_3_3_1 }} +ubuntu18cis_rule_3_5_3_3_2: {{ ubtu18cis_rule_3_5_3_3_2 }} +ubuntu18cis_rule_3_5_3_3_3: {{ ubtu18cis_rule_3_5_3_3_3 }} +ubuntu18cis_rule_3_5_3_3_4: {{ ubtu18cis_rule_3_5_3_3_4 }} + +# Section 4 rules +ubuntu18cis_rule_4_1_1_1: {{ ubtu18cis_rule_4_1_1_1 }} +ubuntu18cis_rule_4_1_1_2: {{ ubtu18cis_rule_4_1_1_2 }} +ubuntu18cis_rule_4_1_1_3: {{ ubtu18cis_rule_4_1_1_3 }} +ubuntu18cis_rule_4_1_1_4: {{ ubtu18cis_rule_4_1_1_4 }} +ubuntu18cis_rule_4_1_2_1: {{ ubtu18cis_rule_4_1_2_1 }} +ubuntu18cis_rule_4_1_2_2: {{ ubtu18cis_rule_4_1_2_2 }} +ubuntu18cis_rule_4_1_2_3: {{ ubtu18cis_rule_4_1_2_3 }} +ubuntu18cis_rule_4_1_3: {{ ubtu18cis_rule_4_1_3 }} +ubuntu18cis_rule_4_1_4: {{ ubtu18cis_rule_4_1_4 }} +ubuntu18cis_rule_4_1_5: {{ ubtu18cis_rule_4_1_5 }} +ubuntu18cis_rule_4_1_6: {{ ubtu18cis_rule_4_1_6 }} +ubuntu18cis_rule_4_1_7: {{ ubtu18cis_rule_4_1_7 }} +ubuntu18cis_rule_4_1_8: {{ ubtu18cis_rule_4_1_8 }} +ubuntu18cis_rule_4_1_9: {{ ubtu18cis_rule_4_1_9 }} +ubuntu18cis_rule_4_1_10: {{ ubtu18cis_rule_4_1_10 }} +ubuntu18cis_rule_4_1_11: {{ ubtu18cis_rule_4_1_11 }} +ubuntu18cis_rule_4_1_12: {{ ubtu18cis_rule_4_1_12 }} +ubuntu18cis_rule_4_1_13: {{ ubtu18cis_rule_4_1_13}} +ubuntu18cis_rule_4_1_14: {{ ubtu18cis_rule_4_1_14 }} +ubuntu18cis_rule_4_1_15: {{ ubtu18cis_rule_4_1_15 }} +ubuntu18cis_rule_4_1_16: {{ ubtu18cis_rule_4_1_16 }} +ubuntu18cis_rule_4_1_17: {{ ubtu18cis_rule_4_1_17 }} +ubuntu18cis_rule_4_2_1_1: {{ ubtu18cis_rule_4_2_1_1 }} +ubuntu18cis_rule_4_2_1_2: {{ ubtu18cis_rule_4_2_1_2 }} +ubuntu18cis_rule_4_2_1_3: {{ ubtu18cis_rule_4_2_1_3 }} +ubuntu18cis_rule_4_2_1_4: {{ ubtu18cis_rule_4_2_1_4 }} +ubuntu18cis_rule_4_2_1_5: {{ ubtu18cis_rule_4_2_1_5 }} +ubuntu18cis_rule_4_2_1_6: {{ ubtu18cis_rule_4_2_1_6 }} +ubuntu18cis_rule_4_2_2_1: {{ ubtu18cis_rule_4_2_2_1 }} +ubuntu18cis_rule_4_2_2_2: {{ ubtu18cis_rule_4_2_2_2 }} +ubuntu18cis_rule_4_2_2_3: {{ ubtu18cis_rule_4_2_2_3 }} +ubuntu18cis_rule_4_2_3: {{ ubtu18cis_rule_4_2_3 }} +ubuntu18cis_rule_4_3: {{ ubtu18cis_rule_4_3 }} +ubuntu18cis_rule_4_4: {{ ubtu18cis_rule_4_4 }} + +# Section 5 +ubuntu18cis_rule_5_1_1: {{ ubtu18cis_rule_5_1_1 }} +ubuntu18cis_rule_5_1_2: {{ ubtu18cis_rule_5_1_2 }} +ubuntu18cis_rule_5_1_3: {{ ubtu18cis_rule_5_1_3 }} +ubuntu18cis_rule_5_1_4: {{ ubtu18cis_rule_5_1_4 }} +ubuntu18cis_rule_5_1_5: {{ ubtu18cis_rule_5_1_5 }} +ubuntu18cis_rule_5_1_6: {{ ubtu18cis_rule_5_1_6 }} +ubuntu18cis_rule_5_1_7: {{ ubtu18cis_rule_5_1_7 }} +ubuntu18cis_rule_5_1_8: {{ ubtu18cis_rule_5_1_8 }} +ubuntu18cis_rule_5_1_9: {{ ubtu18cis_rule_5_1_9 }} +ubuntu18cis_rule_5_2_1: {{ ubtu18cis_rule_5_2_1 }} +ubuntu18cis_rule_5_2_2: {{ ubtu18cis_rule_5_2_2 }} +ubuntu18cis_rule_5_2_3: {{ ubtu18cis_rule_5_2_3 }} +ubuntu18cis_rule_5_3_1: {{ ubtu18cis_rule_5_3_1 }} +ubuntu18cis_rule_5_3_2: {{ ubtu18cis_rule_5_3_2 }} +ubuntu18cis_rule_5_3_3: {{ ubtu18cis_rule_5_3_3 }} +ubuntu18cis_rule_5_3_4: {{ ubtu18cis_rule_5_3_4 }} +ubuntu18cis_rule_5_3_5: {{ ubtu18cis_rule_5_3_5 }} +ubuntu18cis_rule_5_3_6: {{ ubtu18cis_rule_5_3_6 }} +ubuntu18cis_rule_5_3_7: {{ ubtu18cis_rule_5_3_7 }} +ubuntu18cis_rule_5_3_8: {{ ubtu18cis_rule_5_3_8 }} +ubuntu18cis_rule_5_3_9: {{ ubtu18cis_rule_5_3_9 }} +ubuntu18cis_rule_5_3_10: {{ ubtu18cis_rule_5_3_10 }} +ubuntu18cis_rule_5_3_11: {{ ubtu18cis_rule_5_3_11 }} +ubuntu18cis_rule_5_3_12: {{ ubtu18cis_rule_5_3_12 }} +ubuntu18cis_rule_5_3_13: {{ ubtu18cis_rule_5_3_13 }} +ubuntu18cis_rule_5_3_14: {{ ubtu18cis_rule_5_3_14 }} +ubuntu18cis_rule_5_3_15: {{ ubtu18cis_rule_5_3_15 }} +ubuntu18cis_rule_5_3_16: {{ ubtu18cis_rule_5_3_16 }} +ubuntu18cis_rule_5_3_17: {{ ubtu18cis_rule_5_3_17 }} +ubuntu18cis_rule_5_3_18: {{ ubtu18cis_rule_5_3_18 }} +ubuntu18cis_rule_5_3_19: {{ ubtu18cis_rule_5_3_19 }} +ubuntu18cis_rule_5_3_20: {{ ubtu18cis_rule_5_3_20 }} +ubuntu18cis_rule_5_3_21: {{ ubtu18cis_rule_5_3_21 }} +ubuntu18cis_rule_5_3_22: {{ ubtu18cis_rule_5_3_22 }} +ubuntu18cis_rule_5_4_1: {{ ubtu18cis_rule_5_4_1 }} +ubuntu18cis_rule_5_4_2: {{ ubtu18cis_rule_5_4_2 }} +ubuntu18cis_rule_5_4_3: {{ ubtu18cis_rule_5_4_3 }} +ubuntu18cis_rule_5_4_4: {{ ubtu18cis_rule_5_4_4 }} +ubuntu18cis_rule_5_5_1_1: {{ ubtu18cis_rule_5_5_1_1 }} +ubuntu18cis_rule_5_5_1_2: {{ ubtu18cis_rule_5_5_1_2 }} +ubuntu18cis_rule_5_5_1_3: {{ ubtu18cis_rule_5_5_1_3 }} +ubuntu18cis_rule_5_5_1_4: {{ ubtu18cis_rule_5_5_1_4 }} +ubuntu18cis_rule_5_5_1_5: {{ ubtu18cis_rule_5_5_1_5 }} +ubuntu18cis_rule_5_5_2: {{ ubtu18cis_rule_5_5_2 }} +ubuntu18cis_rule_5_5_3: {{ ubtu18cis_rule_5_5_3 }} +ubuntu18cis_rule_5_5_4: {{ ubtu18cis_rule_5_5_4 }} +ubuntu18cis_rule_5_5_5: {{ ubtu18cis_rule_5_5_5 }} +ubuntu18cis_rule_5_6: {{ ubtu18cis_rule_5_6 }} +ubuntu18cis_rule_5_7: {{ ubtu18cis_rule_5_7 }} + +# Section 6 +ubuntu18cis_rule_6_1_1: {{ ubtu18cis_rule_6_1_1 }} +ubuntu18cis_rule_6_1_2: {{ ubtu18cis_rule_6_1_2 }} +ubuntu18cis_rule_6_1_3: {{ ubtu18cis_rule_6_1_3 }} +ubuntu18cis_rule_6_1_4: {{ ubtu18cis_rule_6_1_4 }} +ubuntu18cis_rule_6_1_5: {{ ubtu18cis_rule_6_1_5 }} +ubuntu18cis_rule_6_1_6: {{ ubtu18cis_rule_6_1_6 }} +ubuntu18cis_rule_6_1_7: {{ ubtu18cis_rule_6_1_7 }} +ubuntu18cis_rule_6_1_8: {{ ubtu18cis_rule_6_1_8 }} +ubuntu18cis_rule_6_1_9: {{ ubtu18cis_rule_6_1_9 }} +ubuntu18cis_rule_6_1_10: {{ ubtu18cis_rule_6_1_10 }} +ubuntu18cis_rule_6_1_11: {{ ubtu18cis_rule_6_1_11 }} +ubuntu18cis_rule_6_1_12: {{ ubtu18cis_rule_6_1_12 }} +ubuntu18cis_rule_6_1_13: {{ ubtu18cis_rule_6_1_13 }} +ubuntu18cis_rule_6_1_14: {{ ubtu18cis_rule_6_1_14 }} + +ubuntu18cis_rule_6_2_1: {{ ubtu18cis_rule_6_2_1 }} +ubuntu18cis_rule_6_2_2: {{ ubtu18cis_rule_6_2_2 }} +ubuntu18cis_rule_6_2_3: {{ ubtu18cis_rule_6_2_3 }} +ubuntu18cis_rule_6_2_4: {{ ubtu18cis_rule_6_2_4 }} +ubuntu18cis_rule_6_2_5: {{ ubtu18cis_rule_6_2_5 }} +ubuntu18cis_rule_6_2_6: {{ ubtu18cis_rule_6_2_6 }} +ubuntu18cis_rule_6_2_7: {{ ubtu18cis_rule_6_2_7 }} +ubuntu18cis_rule_6_2_8: {{ ubtu18cis_rule_6_2_8 }} +ubuntu18cis_rule_6_2_9: {{ ubtu18cis_rule_6_2_9 }} +ubuntu18cis_rule_6_2_10: {{ ubtu18cis_rule_6_2_10 }} +ubuntu18cis_rule_6_2_11: {{ ubtu18cis_rule_6_2_11 }} +ubuntu18cis_rule_6_2_12: {{ ubtu18cis_rule_6_2_12 }} +ubuntu18cis_rule_6_2_13: {{ ubtu18cis_rule_6_2_13 }} +ubuntu18cis_rule_6_2_14: {{ ubtu18cis_rule_6_2_14 }} +ubuntu18cis_rule_6_2_15: {{ ubtu18cis_rule_6_2_15 }} +ubuntu18cis_rule_6_2_16: {{ ubtu18cis_rule_6_2_16 }} +ubuntu18cis_rule_6_2_17: {{ ubtu18cis_rule_6_2_17 }} + +# AIDE +ubuntu18cis_config_aide: true + +# aide setup via - cron, timer +ubuntu18cis_aide_scan: cron + +# AIDE cron settings +ubuntu18_aide_cron: + cron_user: {{ ubtu18cis_aide_cron.cron_user }} + cron_file: {{ ubtu18cis_aide_cron.cron_file }} + aide_job: {{ ubtu18cis_aide_cron.aide_job }} + aide_minute: {{ ubtu18cis_aide_cron.aide_minute }} + aide_hour: {{ ubtu18cis_aide_cron.aide_hour }} + aide_day: '{{ ubtu18cis_aide_cron.aide_day }}' + aide_month: '{{ ubtu18cis_aide_cron.aide_month }}' + aide_weekday: '{{ ubtu18cis_aide_cron.aide_weekday }}' + +# 1.1 +ubuntu18cis_allow_autofs: {{ ubtu18cis_allow_autofs }} + +# 1.4 +ubuntu18cis_grub_conf_file: /boot/grub/grub.cfg +ubuntu18cis_grub_username: root +ubuntu18cis_grub_hash: blah +# 1.5.1 Bootloader password +ubuntu18cis_bootloader_password: {{ ubtu18cis_root_pw }} + +# 1.6 - Only have apparmor enforcing +ubuntu18cis_apparmor_enforce_only: false + +# Warning Banner Content (issue, issue.net, motd) +ubuntu18_warning_banner: {{ ubtu18cis_warning_banner }} +# End Banner + +# Section 2 +# Time sync - can be timesync or chriny or ntp +ubuntu18cis_time_service: {{ ubtu18cis_time_sync_tool }} +ubuntu18cis_ntp_servers: {{ ubtu18cis_time_synchronization_servers }} +ubuntu18cis_ntp_fallback: {{ ubtu18cis_ntp_fallback_server_list }} +ubuntu18cis_ntp_root_distance: + +# Whether or not to run tasks related to auditing/patching the desktop environment +ubuntu18cis_gui: {{ ubtu18cis_desktop_required }} + +# Service configuration booleans set true to keep service +ubuntu18cis_avahi_server: {{ ubtu18cis_avahi_server }} +ubuntu18cis_cups_server: {{ ubtu18cis_cups_server }} +ubuntu18cis_nfs_server: {{ ubtu18cis_nfs_server }} +ubuntu18cis_dhcp_server: {{ ubtu18cis_dhcp_server }} +ubuntu18cis_ldap_server: {{ ubtu18cis_ldap_server }} +ubuntu18cis_dns_server: {{ ubtu18cis_dns_server }} +ubuntu18cis_vsftpd_server: {{ ubtu18cis_vsftpd_server }} +ubuntu18cis_httpd_server: {{ ubtu18cis_httpd_server }} +ubuntu18cis_is_mail_server: false +ubuntu18cis_dovecot_server: {{ ubtu18cis_dovecot_server }} +ubuntu18cis_samba_server: {{ ubtu18cis_smb_server }} +ubuntu18cis_squid_server: {{ ubtu18cis_squid_server }} +ubuntu18cis_snmp_server: {{ ubtu18cis_snmp_server }} + +# Mail Server config +{% if ubtu18_cis_mail_transfer_agent is defined %} +ubuntu18cis_mailserver: {{ ubtu18_cis_mail_transfer_agent }} +{% else %} +ubuntu18cis_mailserver: Not_defined +{% endif %} +ubuntu18_exim_conf: + - dc_eximconfig_configtype='local' + - dc_local_interfaces='127.0.0.1 ; ::1' + - dc_readhost='' + - dc_relay_domains='' + - dc_minimaldns='false' + - dc_relay_nets='' + - dc_smarthost='' + - dc_use_split_config='false' + - dc_hide_mailname='' + - dc_mailname_in_oh='true' + - dc_localdelivery='mail_spool' + + +ubuntu18cis_rsyncd_server: {{ ubtu18cis_rsync_server }} +ubuntu18cis_nis_server: {{ ubtu18cis_nis_server }} + +ubuntu18cis_xwindows_required: false + +# 2.2 client services +ubuntu18cis_rsh_required: {{ ubtu18cis_rsh_required }} +ubuntu18cis_talk_required: {{ ubtu18cis_talk_required }} +ubuntu18cis_telnet_required: {{ ubtu18cis_telnet_required }} +ubuntu18cis_ldap_clients_required: {{ ubtu18cis_ldap_clients_required }} +ubuntu18cis_rpc_required: {{ ubtu18cis_rpc_server }} + + +# Section 3 +# IPv6 required +ubuntu18cis_ipv6_required: {{ ubtu18cis_ipv6_required }} + +# System network parameters (host only OR host and router) +ubuntu18cis_is_router: false + + +ubuntu18cis_firewall: {{ ubtu18cis_firewall_package }} + +ubuntu18_default_firewall_zone: public +ubuntu18_firewall_interface: + - ['ens224'] + - ['ens192'] +ubuntu18_firewall_services: + - ssh + - dhcpv6-client + +### Section 4 +## auditd settings +ubuntu18cis_auditd: + space_left_action: email + action_mail_acct: root + admin_space_left_action: {{ ubtu18cis_auditd.admin_space_left_action }} + max_log_file_action: {{ ubtu18cis_auditd.max_log_file_action }} + auditd_backlog_limit: {{ ubtu18cis_audit_back_log_limit }} + +## syslog +ubuntu18cis_is_syslog_server: {{ ubtu18cis_system_is_log_server }} +### Section 5 +ubuntu18cis_sshd_limited: false +# Note the following to understand precedence and layout +ubuntu18cis_sshd_access: + - AllowUser + - AllowGroup + - DenyUser + - DenyGroup + +ubuntu18cis_ssh_strong_ciphers: Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr +ubuntu18cis_ssh_weak_ciphers: + - 3des-cbc + - aes128-cbc + - aes192-cbc + - aes256-cbc + - arcfour + - arcfour128 + - arcfour256 + - blowfish-cbc + - cast128-cbc + - rijndael-cbc@lysator.liu.se + +ubuntu18cis_ssh_strong_macs: MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512,hmac-sha2-256 +ubuntu18cis_ssh_weak_macs: + - hmac-md5 + - hmac-md5-96 + - hmac-ripemd160 + - hmac-sha1 + - hmac-sha1-96 + - umac-64@openssh.com + - umac-128@openssh.com + - hmac-md5-etm@openssh.com + - hmac-md5-96-etm@openssh.com + - hmac-ripemd160-etm@openssh.com + - hmac-sha1-etm@openssh.com + - hmac-sha1-96-etm@openssh.com + - umac-64-etm@openssh.com + - umac-128-etm@openssh.com + +ubuntu18cis_ssh_strong_kex: KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256 +ubuntu18cis_ssh_weak_kex: + - diffie-hellman-group1-sha1 + - diffie-hellman-group14-sha1 + - diffie-hellman-group-exchange-sha1 + + +ubuntu18cis_ssh_aliveinterval: 300 +ubuntu18cis_ssh_countmax: 3 +## PAM +ubuntu18cis_pam_password: + minlen: "14" + minclass: "4" + +ubuntu18cis_pam_passwd_retry: "3" + +# choose one of below +ubuntu18cis_pwhistory_so: "14" +ubuntu18cis_unix_so: false +ubuntu18cis_passwd_remember: {{ ubtu18cis_pamd_pwhistory_remember }} + +# logins.def password settings +ubuntu18cis_pass: + max_days: {{ ubtu18cis_pass.max_days }} + min_days: {{ ubtu18cis_pass.min_days }} + warn_age: {{ ubtu18cis_pass.warn_age }} + +# set sugroup if differs from wheel +ubuntu18cis_sugroup: {{ ubtu18cis_su_group }} + +# sugroup users list +ubuntu18_sugroup_users: "root" + +# var log location variable +ubuntu18_varlog_location: {{ ubtu18cis_sudo_logfile }} diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/chrony.conf.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/chrony.conf.j2 new file mode 100644 index 0000000..51d7254 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/chrony.conf.j2 @@ -0,0 +1,3 @@ +-w /var/run/utmp -p wa -k session +-w /var/log/wtmp -p wa -k logins +-w /var/log/btmp -p wa -k logins diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_10_access.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_10_access.rules.j2 new file mode 100644 index 0000000..880c77f --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_10_access.rules.j2 @@ -0,0 +1,7 @@ +-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access +{% endif %} + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_11_privileged.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_11_privileged.rules.j2 new file mode 100644 index 0000000..0dc5f52 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_11_privileged.rules.j2 @@ -0,0 +1,4 @@ +{% for proc in priv_procs.stdout_lines -%} +-a always,exit -F path={{ proc }} -F perm=x -F auid>=1000 -F auid!=4294967295 -k privileged +{% endfor %} + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_12_audit.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_12_audit.rules.j2 new file mode 100644 index 0000000..9db0365 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_12_audit.rules.j2 @@ -0,0 +1,5 @@ +-a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts +{% endif %} + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_13_delete.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_13_delete.rules.j2 new file mode 100644 index 0000000..065757a --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_13_delete.rules.j2 @@ -0,0 +1,5 @@ +-a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete +{% endif %} + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_14_scope.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_14_scope.rules.j2 new file mode 100644 index 0000000..f1784bd --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_14_scope.rules.j2 @@ -0,0 +1,3 @@ +-w /etc/sudoers -p wa -k scope +-w /etc/sudoers.d/ -p wa -k scope + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_15_actions.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_15_actions.rules.j2 new file mode 100644 index 0000000..53824fb --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_15_actions.rules.j2 @@ -0,0 +1,5 @@ +-a always,exit -F arch=b32 -C euid!=uid -F euid=0 -F auid>=1000 -F auid!=4294967295 -S execve -k actions +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -C euid!=uid -F euid=0 -F auid>=1000 -F auid!=4294967295 -S execve -k actions +{% endif %} + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_16_modules.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_16_modules.rules.j2 new file mode 100644 index 0000000..58216e1 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_16_modules.rules.j2 @@ -0,0 +1,10 @@ +-w /sbin/insmod -p x -k modules +-w /sbin/rmmod -p x -k modules +-w /sbin/modprobe -p x -k modules +{% if ansible_architecture != 'x86_64' -%} +-a always,exit -F arch=b32 -S init_module -S delete_module -k modules +{% endif %} +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S init_module -S delete_module -k modules +{% endif %} + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_17_99finalize.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_17_99finalize.rules.j2 new file mode 100644 index 0000000..a2b3aa0 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_17_99finalize.rules.j2 @@ -0,0 +1,2 @@ +-e 2 + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_3_timechange.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_3_timechange.rules.j2 new file mode 100644 index 0000000..f531292 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_3_timechange.rules.j2 @@ -0,0 +1,8 @@ +-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change +-a always,exit -F arch=b32 -S clock_settime -k time-change +-w /etc/localtime -p wa -k time-change +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change +-a always,exit -F arch=b64 -S clock_settime -k time-change +{% endif %} + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_4_identity.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_4_identity.rules.j2 new file mode 100644 index 0000000..c8bded4 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_4_identity.rules.j2 @@ -0,0 +1,6 @@ +-w /etc/group -p wa -k identity +-w /etc/passwd -p wa -k identity +-w /etc/gshadow -p wa -k identity +-w /etc/shadow -p wa -k identity +-w /etc/security/opasswd -p wa -k identity + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_5_systemlocale.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_5_systemlocale.rules.j2 new file mode 100644 index 0000000..74e4065 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_5_systemlocale.rules.j2 @@ -0,0 +1,9 @@ +-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale +{% endif %} +-w /etc/issue -p wa -k system-locale +-w /etc/issue.net -p wa -k system-locale +-w /etc/hosts -p wa -k system-locale +-w /etc/network -p wa -k system-locale + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_6_macpolicy.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_6_macpolicy.rules.j2 new file mode 100644 index 0000000..bfbf2c3 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_6_macpolicy.rules.j2 @@ -0,0 +1,3 @@ +-w /etc/apparmor/ -p wa -k MAC-policy +-w /etc/apparmor.d/ -p wa -k MAC-policy + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_7_logins.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_7_logins.rules.j2 new file mode 100644 index 0000000..3ead283 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_7_logins.rules.j2 @@ -0,0 +1,4 @@ +-w /var/log/faillog -p wa -k logins +-w /var/log/lastlog -p wa -k logins +-w /var/log/tallylog -p wa -k logins + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_8_session.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_8_session.rules.j2 new file mode 100644 index 0000000..f9e3dbf --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_8_session.rules.j2 @@ -0,0 +1,4 @@ +-w /var/run/utmp -p wa -k session +-w /var/log/wtmp -p wa -k logins +-w /var/log/btmp -p wa -k logins + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_9_permmod.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_9_permmod.rules.j2 new file mode 100644 index 0000000..09dacb3 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/audit/ubtu18cis_4_1_9_permmod.rules.j2 @@ -0,0 +1,9 @@ +-a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod +{% endif %} + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/chrony.conf.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/chrony.conf.j2 new file mode 100644 index 0000000..348ec9d --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/chrony.conf.j2 @@ -0,0 +1,91 @@ +# Welcome to the chrony configuration file. See chrony.conf(5) for more +# information about usuable directives. + +# This will use (up to): +# - 4 sources from ntp.ubuntu.com which some are ipv6 enabled +# - 2 sources from 2.ubuntu.pool.ntp.org which is ipv6 enabled as well +# - 1 source from [01].ubuntu.pool.ntp.org each (ipv4 only atm) +# This means by default, up to 6 dual-stack and up to 2 additional IPv4-only +# sources will be used. +# At the same time it retains some protection against one of the entries being +# down (compare to just using one of the lines). See (LP: #1754358) for the +# discussion. +# +# About using servers from the NTP Pool Project in general see (LP: #104525). +# Approved by Ubuntu Technical Board on 2011-02-08. +# See http://www.pool.ntp.org/join.html for more information. + +{% for server in ubtu18cis_time_synchronization_servers -%} +server {{ server }} {{ ubtu18cis_chrony_server_options }} +{% endfor %} + +# This directive specify the location of the file containing ID/key pairs for +# NTP authentication. +keyfile /etc/chrony/chrony.keys + +# Set runtime command key. Note that if you change the key (not the +# password) to anything other than 1 you will need to edit +# /etc/ppp/ip-up.d/chrony, /etc/ppp/ip-down.d/chrony, /etc/init.d/chrony +# and /etc/cron.weekly/chrony as these scripts use it to get the password. + +#commandkey 1 + +# This directive specify the file into which chronyd will store the rate +# information. +driftfile /var/lib/chrony/chrony.drift + +# Uncomment the following line to turn logging on. +#log tracking measurements statistics + +# Log files location. +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. +maxupdateskew 100.0 + +# This directive enables kernel synchronisation (every 11 minutes) of the +# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. +rtcsync + +# Dump measurements when daemon exits. +dumponexit + +# Specify directory for dumping measurements. + +dumpdir /var/lib/chrony + +# Let computer be a server when it is unsynchronised. + +local stratum 10 + +# Allow computers on the unrouted nets to use the server. + +#allow 10/8 +#allow 192.168/16 +#allow 172.16/12 + +# This directive forces `chronyd' to send a message to syslog if it +# makes a system clock adjustment larger than a threshold value in seconds. + +logchange 0.5 + +# This directive defines an email address to which mail should be sent +# if chronyd applies a correction exceeding a particular threshold to the +# system clock. + +# mailonchange root@localhost 0.5 + +# This directive tells chrony to regulate the real-time clock and tells it +# Where to store related data. It may not work on some newer motherboards +# that use the HPET real-time clock. It requires enhanced real-time +# support in the kernel. I've commented it out because with certain +# combinations of motherboard and kernel it is reported to cause lockups. + +# rtcfile /var/lib/chrony/chrony.rtc + +# If the last line of this file reads 'rtconutc' chrony will assume that +# the CMOS clock is on UTC (GMT). If it reads '# rtconutc' or is absent +# chrony will assume local time. The line (if any) was written by the +# chrony postinst based on what it found in /etc/default/rcS. You may +# change it if necessary. +rtconutc diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/issue.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/issue.j2 new file mode 100644 index 0000000..2f44141 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/issue.j2 @@ -0,0 +1 @@ +{{ ubtu18cis_warning_banner }} diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/issue.net.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/issue.net.j2 new file mode 100644 index 0000000..2f44141 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/issue.net.j2 @@ -0,0 +1 @@ +{{ ubtu18cis_warning_banner }} diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/motd.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/motd.j2 new file mode 100644 index 0000000..2f44141 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/etc/motd.j2 @@ -0,0 +1 @@ +{{ ubtu18cis_warning_banner }} diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/hosts.allow.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/hosts.allow.j2 new file mode 100644 index 0000000..fdbaec6 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/hosts.allow.j2 @@ -0,0 +1,10 @@ +# /etc/hosts.allow: list of hosts that are allowed to access the system. +# See the manual pages hosts_access(5) and hosts_options(5). +# +# Example: ALL: LOCAL @some_netgroup +# ALL: .foobar.edu EXCEPT terminalserver.foobar.edu +# +# If you're going to protect the portmapper use the name "rpcbind" for the +# daemon name. See rpcbind(8) and rpc.mountd(8) for further information. +# +ALL: {% for iprange in ubtu18cis_host_allow -%}{{ iprange }}{% if not loop.last %}, {% endif %}{% endfor %} diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/ntp.conf.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/ntp.conf.j2 new file mode 100644 index 0000000..fd5eafe --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/ntp.conf.j2 @@ -0,0 +1,68 @@ +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +driftfile /var/lib/ntp/ntp.drift + +# Leap seconds definition provided by tzdata +leapfile /usr/share/zoneinfo/leap-seconds.list + +# Enable this if you want statistics to be logged. +#statsdir /var/log/ntpstats/ + +statistics loopstats peerstats clockstats +filegen loopstats file loopstats type day enable +filegen peerstats file peerstats type day enable +filegen clockstats file clockstats type day enable + +# Specify one or more NTP servers. + +# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board +# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for +# more information. +{% for server in ubtu18cis_time_synchronization_servers -%} +server {{ server }} {{ ubtu18cis_ntp_server_options }} +{% endfor %} + +# Use Ubuntu's ntp server as a fallback. +pool ntp.ubuntu.com + +# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for +# details. The web page +# might also be helpful. +# +# Note that "restrict" applies to both servers and clients, so a configuration +# that might be intended to block requests from certain clients could also end +# up blocking replies from your own upstream servers. + +# By default, exchange time with everybody, but don't allow configuration. +restrict -4 default kod notrap nomodify nopeer noquery +restrict -6 default kod notrap nomodify nopeer noquery + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 + +# Needed for adding pool entries +restrict source notrap nomodify noquery + +# Clients from this (example!) subnet have unlimited access, but only if +# cryptographically authenticated. +#restrict 192.168.123.0 mask 255.255.255.0 notrust + + +# If you want to provide time to your local subnet, change the next line. +# (Again, the address is an example only.) +#broadcast 192.168.123.255 + +# If you want to listen to time broadcasts on your local subnet, de-comment the +# next lines. Please do this only if you trust everybody on the network! +#disable auth +#broadcastclient + +#Changes recquired to use pps synchonisation as explained in documentation: +#http://www.ntp.org/ntpfaq/NTP-s-config-adv.htm#AEN3918 + +#server 127.127.8.1 mode 135 prefer # Meinberg GPS167 with PPS +#fudge 127.127.8.1 time1 0.0042 # relative to PPS for my hardware + +#server 127.127.22.1 # ATOM(PPS) +#fudge 127.127.22.1 flag3 1 # enable PPS API \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/templates/ubtu18cis_4_1_3_timechange64.rules.j2 b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/ubtu18cis_4_1_3_timechange64.rules.j2 new file mode 100644 index 0000000..bd8666d --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/templates/ubtu18cis_4_1_3_timechange64.rules.j2 @@ -0,0 +1,5 @@ +-a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change +-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change +-a always,exit -F arch=b64 -S clock_settime -k time-change +-a always,exit -F arch=b32 -S clock_settime -k time-change +-w /etc/localtime -p wa -k time-change \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tests/inventory b/Linux/ansible-lockdown/UBUNTU18-CIS/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/tests/test.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/tests/test.yml new file mode 100644 index 0000000..73c38bc --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - UBUNTU18-CIS \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU18-CIS/vars/main.yml b/Linux/ansible-lockdown/UBUNTU18-CIS/vars/main.yml new file mode 100644 index 0000000..0e6073b --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU18-CIS/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for UBUNTU18-CIS \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/.DS_Store b/Linux/ansible-lockdown/UBUNTU20-CIS/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..65b247f9f66373f49f0554e2803405f42c5481fd GIT binary patch literal 8196 zcmeHMO>Wab6n@jD#A$_y5K_y|8zdG{m8hi!A*7Hdi%NhHL9hVS+I46w9J`90MkNGg z!yWiJ0Y`xga1agv-6WWN0Y2>q(UBWOF3!s{-loAQU=%P47zK<1MuC4p0lc%>tU2#})oV(lfKlMT zRDhokHV($R#)(4t=s+P;0AK;l(omN_{ed<&0P7kj3ef@+iV9Rzp;HW@GY6sQ=&x)4 zi9$suA!o)odS;xK$~<-J|&s_{Vw`${*oq%#dAOE z(tzBwxA=gY(VFk=4&xL3ZQz@G68Ldat^N>8%Y~IoMax>XRxhuofjc z_=aEw;Pq$xe`frP^nmthk6QGI+SE#qPQ2|Or#7;H6?d=-C*#Mfp7txz6O4GDn)DF3 z36_^;$aPOMEGUAo!|1uXa7d+QSm9NG^CtHSr$;WyQG0X(MI2erXvkNY3fCOQRLFk+ z+aY^>G^$m1wk!7STh%|tEXO2lZp?l`XU?^H@loL72IHX3>CS{TZ$t+hF%y~`$|+e3 z2z)0eBoV?XKlA?QT{C7BSeydOYN`#+|6B9F|1WMqW+tP6QDET;h(fd7Y+%~+-?`Zp xaIS6RIK;umeiMa~fKZ2s(E>9c0;CM4FbXVGf!`qAMQ;EA literal 0 HcmV?d00001 diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/.ansible-lint b/Linux/ansible-lockdown/UBUNTU20-CIS/.ansible-lint new file mode 100755 index 0000000..f2a7e7c --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/.ansible-lint @@ -0,0 +1,11 @@ +parseable: true +quiet: true +skip_list: + - '204' + - '305' + - '303' + - '403' + - '306' + - '602' +use_default_rules: true +verbosity: 0 diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/.travis.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/.travis.yml new file mode 100644 index 0000000..36bbf62 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/.travis.yml @@ -0,0 +1,29 @@ +--- +language: python +python: "2.7" + +# Use the new container infrastructure +sudo: false + +# Install ansible +addons: + apt: + packages: + - python-pip + +install: + # Install ansible + - pip install ansible + + # Check ansible version + - ansible --version + + # Create ansible.cfg with correct roles_path + - printf '[defaults]\nroles_path=../' >ansible.cfg + +script: + # Basic role syntax check + - ansible-playbook tests/test.yml -i tests/inventory --syntax-check + +notifications: + webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/.yamllint b/Linux/ansible-lockdown/UBUNTU20-CIS/.yamllint new file mode 100755 index 0000000..93378b9 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/.yamllint @@ -0,0 +1,20 @@ +--- +ignore: | + tests/ + molecule/ + .gitlab-ci.yml + *molecule.yml + +extends: default + +rules: + indentation: + spaces: 4 + truthy: disable + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/CONTRIBUTING.rst b/Linux/ansible-lockdown/UBUNTU20-CIS/CONTRIBUTING.rst new file mode 100644 index 0000000..76c3a8a --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/CONTRIBUTING.rst @@ -0,0 +1,69 @@ +Contributing to MindPoint Group Projects +======================================== + +Rules +----- +1) All commits must be GPG signed (details in Signing section) +2) All commits must have Signed-off-by (Signed-off-by: Joan Doe ) in the commit message (details in Signing section) +3) All work is done in your own branch or own fork +4) Pull requests + a) From within the repo: All pull requests go into the devel branch. There are automated checks for signed commits, signoff in commit message, and functional testing + b) From a forked repo: All pull requests will go into a staging branch within the repo. There are automated checks for signed commits, signoff in commit message, and functional testing when going from staging to devel +4) All pull requests go into the devel branch. There are automated checks for signed commits, signoff in commit message, and functional testing) +5) Be open and nice to each other + +Workflow +-------- +- Your work is done in your own individual branch. Make sure to to Signed-off and GPG sign all commits you intend to merge +- All community Pull Requests are into the devel branch. There are automated checks for GPG signed, Signed-off in commits, and functional tests before being approved. If your pull request comes in from outside of our repo, the pull request will go into a staging branch. There is info needed from our repo for our CI/CD testing. +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release +Signing your contribution +------------------------- + +We've chosen to use the Developer's Certificate of Origin (DCO) method +that is employed by the Linux Kernel Project, which provides a simple +way to contribute to MindPoint Group projects. + +The process is to certify the below DCO 1.1 text +:: + + Developer's Certificate of Origin 1.1 + + By making a contribution to this project, I certify that: + + (a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + + (b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + + (c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + + (d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +:: + +Then, when it comes time to submit a contribution, include the +following text in your contribution commit message: + +:: + + Signed-off-by: Joan Doe + +:: + + +This message can be entered manually, or if you have configured git +with the correct `user.name` and `user.email`, you can use the `-s` +option to `git commit` to automatically include the signoff message. \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/LICENSE b/Linux/ansible-lockdown/UBUNTU20-CIS/LICENSE new file mode 100644 index 0000000..3ae3c23 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Mindpoint Group / Lockdown Enterprise / Lockdown Enterprise Releases + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/README.md b/Linux/ansible-lockdown/UBUNTU20-CIS/README.md new file mode 100644 index 0000000..f8ace54 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/README.md @@ -0,0 +1,85 @@ +Ubuntu 20 CIS +========= + +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/UBUNTU20-CIS/CommunityToDevel?label=Devel%20Build%20Status&style=plastic) +![Build Status](https://img.shields.io/github/workflow/status/ansible-lockdown/UBUNTU20-CIS/DevelToMaster?label=Main%20Build%20Status&style=plastic) +![Release](https://img.shields.io/github/v/release/ansible-lockdown/UBUNTU20-CIS?style=plastic) + +Configure Ubuntu 20 machine to be [CIS](https://www.cisecurity.org/cis-benchmarks/) v2.1.0 compliant. There are some intrusive tasks that have a toggle in defaults main.yml to disable to automated fix + +Caution(s) +--------- + +This role **will make changes to the system** that could break things. This is not an auditing tool but rather a remediation tool to be used after an audit has been conducted. + +This role was developed against a clean install of the Operating System. If you are implimenting to an existing system please review this role for any site specific changes that are needed. + +To use release version please point to main branch +Based on +[CIS_Ubuntu_Linux_20.04_LTS_Benchmark](https://community.cisecurity.org/collab/public/index.php). + +Documentation +------------- + +- [Getting Started](https://www.lockdownenterprise.com/docs/getting-started-with-lockdown) +- [Customizing Roles](https://www.lockdownenterprise.com/docs/customizing-lockdown-enterprise) +- [Per-Host Configuration](https://www.lockdownenterprise.com/docs/per-host-lockdown-enterprise-configuration) +- [Getting the Most Out of the Role](https://www.lockdownenterprise.com/docs/get-the-most-out-of-lockdown-enterprise) +- [Wiki](https://github.com/ansible-lockdown/UBUNTU20-CIS/wiki) +- [Repo GitHub Page](https://ansible-lockdown.github.io/UBUNTU20-CIS/) + +Requirements +------------ + +**General:** + +- Basic knowledge of Ansible, below are some links to the Ansible documentation to help get started if you are unfamiliar with Ansible + - [Main Ansible documentation page](https://docs.ansible.com) + - [Ansible Getting Started](https://docs.ansible.com/ansible/latest/user_guide/intro_getting_started.html) + - [Tower User Guide](https://docs.ansible.com/ansible-tower/latest/html/userguide/index.html) + - [Ansible Community Info](https://docs.ansible.com/ansible/latest/community/index.html) +- Functioning Ansible and/or Tower Installed, configured, and running. This includes all of the base Ansible/Tower configurations, needed packages installed, and infrastructure setup. +- Please read through the tasks in this role to gain an understanding of what each control is doing. Some of the tasks are disruptive and can have unintended consiquences in a live production system. Also familiarize yourself with the variables in the defaults/main.yml file or the [Main Variables Wiki Page](https://github.com/ansible-lockdown/UBUNTU20-CIS/wiki/Main-Variables). + +**Technical Dependencies:** + +- Running Ansible/Tower setup (this role is tested against Ansible version 2.9.1 and newer) +- Python3 Ansible run environment + +Auditing (new) +-------------- + +This can be turned on or off within the defaults/main.yml file with the variable run_audit. The value is false by default, please refer to the wiki for more details. + +This is a much quicker, very lightweight, checking (where possible) config compliance and live/running settings. + +A new form of auditing has been develeoped, by using a small (12MB) go binary called [goss](https://github.com/aelsabbahy/goss) along with the relevant configurations to check. Without the need for infrastructure or other tooling. +This audit will not only check the config has the correct setting but aims to capture if it is running with that configuration also trying to remove [false positives](https://www.mindpointgroup.com/blog/is-compliance-scanning-still-relevant/) in the process. + +Refer to [UBUNTU20-CIS-Audit](https://github.com/ansible-lockdown/UBUNTU20-CIS-Audit). + +Further audit documentation can be found at [Audit-Docs](https://github.com/ansible-lockdown/UBUNTU20-CIS-Audit/docs/Security_remediation_and_auditing.md). + +Role Variables +-------------- + +This role is designed that the end user should not have to edit the tasks themselves. All customizing should be done via the defaults/main.yml file or with extra vars within the project, job, workflow, etc. These variables can be found [here](https://github.com/ansible-lockdown/UBUNTU20-CIS/wiki/Main-Variables) in the Main Variables Wiki page. All variables are listed there along with descriptions. + +Branches +-------- + +- **devel** - This is the default branch and the working development branch. Community pull requests will pull into this branch +- **main** - This is the release branch +- **reports** - This is a protected branch for our scoring reports, no code should ever go here +- **gh-pages** - This is the github pages branch +- **all other branches** - Individual community member branches + +Community Contribution +---------------------- + +We encourage you (the community) to contribute to this role. Please read the rules below. + +- Your work is done in your own individual branch. Make sure to Signed-off and GPG sign all commits you intend to merge. +- All community Pull Requests are pulled into the devel branch +- Pull Requests into devel will confirm your commits have a GPG signature, Signed-off, and a functional test before being approved +- Once your changes are merged and a more detailed review is complete, an authorized member will merge your changes into the main branch for a new release diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/defaults/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/defaults/main.yml new file mode 100644 index 0000000..0d62798 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/defaults/main.yml @@ -0,0 +1,698 @@ +--- +# If you would like a report at the end accordin to OpenSCAP as to the report results +# then you should set ubtu20cis_oscap_scan to true/yes. +# NOTE: This requires the python_xmltojson package on the control host. +ubtu20cis_oscap_scan: no +ubtu20cis_report_dir: /tmp + +ubtu20cis_section1_patch: true +ubtu20cis_section2_patch: true +ubtu20cis_section3_patch: true +ubtu20cis_section4_patch: true +ubtu20cis_section5_patch: true +ubtu20cis_section6_patch: true + +# System will reboot if false, can give better audit results +ubtu20_skip_reboot: True + +## Benchmark name used by auditing control role +# The audit variable found at the base +benchmark: UBUNTU20-CIS + +### Audit Binary is required on the remote host +setup_audit: false +# How to retrieve audit binary +# Options are copy or download - detailed settings at the bottom of this file +# you will need to access to either github or the file already dowmloaded +get_audit_binary_method: download + +# how to get audit files onto host options +# options are git/copy/get_url other e.g. if you wish to run from already downloaded conf +audit_content: git + +# enable audits to run - this runs the audit and get the latest content +run_audit: false + +# Run heavy tests - some tests can have more impact on a system enabling these can have greater impact on a system +audit_run_heavy_tests: true +# Timeout for those cmds that take longer to run where timeout set +audit_cmd_timeout: 60000 + +### End Audit enablements #### + +# We've defined complexity-high to mean that we cannot automatically remediate +# the rule in question. In the future this might mean that the remediation +# may fail in some cases. +ubtu20cis_complexity_high: false + +# Show "changed" for complex items not remediated per complexity-high setting +# to make them stand out. "changed" items on a second run of the role would +# indicate items requiring manual review. +ubtu20cis_audit_complex: true + +# We've defined disruption-high to indicate items that are likely to cause +# disruption in a normal workflow. These items can be remediated automatically +# but are disabled by default to avoid disruption. +# Value of true runs duscruptive tasks, value of false will skip disruptive tasks +ubtu20cis_disruption_high: true + +# Show "changed" for disruptive items not remediated per disruption-high +# setting to make them stand out. +ubtu20cis_audit_disruptive: yes + +ubtu20cis_skip_for_travis: false + +ubtu20cis_workaround_for_disa_benchmark: true +ubtu20cis_workaround_for_ssg_benchmark: true + +# tweak role to run in a chroot, such as in kickstart %post script +ubtu20cis_system_is_chroot: "{{ ansible_is_chroot | default(False) }}" + +# tweak role to run in a non-privileged container +ubtu20cis_system_is_container: false + +# skip events for ec2 instance testing pipeline +system_is_ec2: false + +# Section 1 Fixes +# Section 1 is Initial setup (FileSystem Configuration, Configure Software Updates, Filesystem Integrity Checking, Secure Boot Settings, +# Additional Process Hardening, Mandatory Access Control, Command Line Warning Banners, and GNOME Display Manager) +ubtu20cis_rule_1_1_1_1: true +ubtu20cis_rule_1_1_1_2: true +ubtu20cis_rule_1_1_1_3: true +ubtu20cis_rule_1_1_1_4: true +ubtu20cis_rule_1_1_1_5: true +ubtu20cis_rule_1_1_1_6: true +ubtu20cis_rule_1_1_1_7: true +ubtu20cis_rule_1_1_2: true +ubtu20cis_rule_1_1_3: true +ubtu20cis_rule_1_1_4: true +ubtu20cis_rule_1_1_5: true +ubtu20cis_rule_1_1_6: true +ubtu20cis_rule_1_1_7: true +ubtu20cis_rule_1_1_8: true +ubtu20cis_rule_1_1_9: true +ubtu20cis_rule_1_1_10: true +ubtu20cis_rule_1_1_11: true +ubtu20cis_rule_1_1_12: true +ubtu20cis_rule_1_1_13: true +ubtu20cis_rule_1_1_14: true +ubtu20cis_rule_1_1_15: true +ubtu20cis_rule_1_1_16: true +ubtu20cis_rule_1_1_17: true +ubtu20cis_rule_1_1_18: true +ubtu20cis_rule_1_1_19: true +ubtu20cis_rule_1_1_20: true +ubtu20cis_rule_1_1_21: true +ubtu20cis_rule_1_1_22: true +ubtu20cis_rule_1_1_23: true +ubtu20cis_rule_1_1_24: true +ubtu20cis_rule_1_2_1: true +ubtu20cis_rule_1_2_2: true +ubtu20cis_rule_1_3_1: true +ubtu20cis_rule_1_3_2: true +ubtu20cis_rule_1_4_1: true +ubtu20cis_rule_1_4_2: true +ubtu20cis_rule_1_4_3: true +ubtu20cis_rule_1_4_4: true +ubtu20cis_rule_1_5_1: true +ubtu20cis_rule_1_5_2: true +ubtu20cis_rule_1_5_3: true +ubtu20cis_rule_1_5_4: true +ubtu20cis_rule_1_6_1_1: true +ubtu20cis_rule_1_6_1_2: true +ubtu20cis_rule_1_6_1_3: true +ubtu20cis_rule_1_6_1_4: true +ubtu20cis_rule_1_7_1: true +ubtu20cis_rule_1_7_2: true +ubtu20cis_rule_1_7_3: true +ubtu20cis_rule_1_7_4: true +ubtu20cis_rule_1_7_5: true +ubtu20cis_rule_1_7_6: true +ubtu20cis_rule_1_8_1: true +ubtu20cis_rule_1_8_2: true +ubtu20cis_rule_1_8_3: true +ubtu20cis_rule_1_8_4: true +ubtu20cis_rule_1_9: true + +# Section 2 Fixes +# Section 2 is Services (Special Purpose Services, and service clients) +ubtu20cis_rule_2_1_1_1: true +ubtu20cis_rule_2_1_1_2: true +ubtu20cis_rule_2_1_1_3: true +ubtu20cis_rule_2_1_1_4: true +ubtu20cis_rule_2_1_2: true +ubtu20cis_rule_2_1_3: true +ubtu20cis_rule_2_1_4: true +ubtu20cis_rule_2_1_5: true +ubtu20cis_rule_2_1_6: true +ubtu20cis_rule_2_1_7: true +ubtu20cis_rule_2_1_8: true +ubtu20cis_rule_2_1_9: true +ubtu20cis_rule_2_1_10: true +ubtu20cis_rule_2_1_11: true +ubtu20cis_rule_2_1_12: true +ubtu20cis_rule_2_1_13: true +ubtu20cis_rule_2_1_14: true +ubtu20cis_rule_2_1_15: true +ubtu20cis_rule_2_1_16: true +ubtu20cis_rule_2_1_17: true +ubtu20cis_rule_2_2_1: true +ubtu20cis_rule_2_2_2: true +ubtu20cis_rule_2_2_3: true +ubtu20cis_rule_2_2_4: true +ubtu20cis_rule_2_2_5: true +ubtu20cis_rule_2_2_6: true +ubtu20cis_rule_2_3: true + +# Section 3 Fixes +# Section 3 is Network Configuration (Disable Unused Networks, Network Parameters (Host Only), Network Parameters (Host and Router), Uncommon Network Protocols, and Firewall Configuration) +ubtu20cis_rule_3_1_1: true +ubtu20cis_rule_3_1_2: true +ubtu20cis_rule_3_2_1: true +ubtu20cis_rule_3_2_2: true +ubtu20cis_rule_3_3_1: true +ubtu20cis_rule_3_3_2: true +ubtu20cis_rule_3_3_3: true +ubtu20cis_rule_3_3_4: true +ubtu20cis_rule_3_3_5: true +ubtu20cis_rule_3_3_6: true +ubtu20cis_rule_3_3_7: true +ubtu20cis_rule_3_3_8: true +ubtu20cis_rule_3_3_9: true +ubtu20cis_rule_3_4_1: true +ubtu20cis_rule_3_4_2: true +ubtu20cis_rule_3_4_3: true +ubtu20cis_rule_3_4_4: true +ubtu20cis_rule_3_5_1_1: true +ubtu20cis_rule_3_5_1_2: true +ubtu20cis_rule_3_5_1_3: true +ubtu20cis_rule_3_5_1_4: true +ubtu20cis_rule_3_5_1_5: true +ubtu20cis_rule_3_5_1_6: true +ubtu20cis_rule_3_5_1_7: true +ubtu20cis_rule_3_5_2_1: true +ubtu20cis_rule_3_5_2_2: true +ubtu20cis_rule_3_5_2_3: true +ubtu20cis_rule_3_5_2_4: true +ubtu20cis_rule_3_5_2_5: true +ubtu20cis_rule_3_5_2_6: true +ubtu20cis_rule_3_5_2_7: true +ubtu20cis_rule_3_5_2_8: true +ubtu20cis_rule_3_5_2_9: true +ubtu20cis_rule_3_5_2_10: true +ubtu20cis_rule_3_5_3_1_1: true +ubtu20cis_rule_3_5_3_1_2: true +ubtu20cis_rule_3_5_3_1_3: true +ubtu20cis_rule_3_5_3_2_1: true +ubtu20cis_rule_3_5_3_2_2: true +ubtu20cis_rule_3_5_3_2_3: true +ubtu20cis_rule_3_5_3_2_4: true +ubtu20cis_rule_3_5_3_3_1: true +ubtu20cis_rule_3_5_3_3_2: true +ubtu20cis_rule_3_5_3_3_3: true +ubtu20cis_rule_3_5_3_3_4: true + +# Section 4 Fixes +# Section 4 is Logging and Auditing (Configure System Accounting (auditd), Configure Data Retention, and Configure Logging) +ubtu20cis_rule_4_1_1_1: true +ubtu20cis_rule_4_1_1_2: true +ubtu20cis_rule_4_1_1_3: true +ubtu20cis_rule_4_1_1_4: true +ubtu20cis_rule_4_1_2_1: true +ubtu20cis_rule_4_1_2_2: true +ubtu20cis_rule_4_1_2_3: true +ubtu20cis_rule_4_1_3: true +ubtu20cis_rule_4_1_4: true +ubtu20cis_rule_4_1_5: true +ubtu20cis_rule_4_1_6: true +ubtu20cis_rule_4_1_7: true +ubtu20cis_rule_4_1_8: true +ubtu20cis_rule_4_1_9: true +ubtu20cis_rule_4_1_10: true +ubtu20cis_rule_4_1_11: true +ubtu20cis_rule_4_1_12: true +ubtu20cis_rule_4_1_13: true +ubtu20cis_rule_4_1_14: true +ubtu20cis_rule_4_1_15: true +ubtu20cis_rule_4_1_16: true +ubtu20cis_rule_4_1_17: true +ubtu20cis_rule_4_2_1_1: true +ubtu20cis_rule_4_2_1_2: true +ubtu20cis_rule_4_2_1_3: true +ubtu20cis_rule_4_2_1_4: true +ubtu20cis_rule_4_2_1_5: true +ubtu20cis_rule_4_2_1_6: true +ubtu20cis_rule_4_2_2_1: true +ubtu20cis_rule_4_2_2_2: true +ubtu20cis_rule_4_2_2_3: true +ubtu20cis_rule_4_2_3: true +ubtu20cis_rule_4_3: true +ubtu20cis_rule_4_4: true + +# Section 5 Fixes +# Section 5 is Access, Authentication, and Authorization (Configure time-based job schedulers, Configure sudo, Configure SSH Server, Configure PAM +# and User Accounts and Environment) +ubtu20cis_rule_5_1_1: true +ubtu20cis_rule_5_1_2: true +ubtu20cis_rule_5_1_3: true +ubtu20cis_rule_5_1_4: true +ubtu20cis_rule_5_1_5: true +ubtu20cis_rule_5_1_6: true +ubtu20cis_rule_5_1_7: true +ubtu20cis_rule_5_1_8: true +ubtu20cis_rule_5_1_9: true +ubtu20cis_rule_5_2_1: true +ubtu20cis_rule_5_2_2: true +ubtu20cis_rule_5_2_3: true +ubtu20cis_rule_5_3_1: true +ubtu20cis_rule_5_3_2: true +ubtu20cis_rule_5_3_3: true +ubtu20cis_rule_5_3_4: true +ubtu20cis_rule_5_3_5: true +ubtu20cis_rule_5_3_6: true +ubtu20cis_rule_5_3_7: true +ubtu20cis_rule_5_3_8: true +ubtu20cis_rule_5_3_9: true +ubtu20cis_rule_5_3_10: true +ubtu20cis_rule_5_3_11: true +ubtu20cis_rule_5_3_12: true +ubtu20cis_rule_5_3_13: true +ubtu20cis_rule_5_3_14: true +ubtu20cis_rule_5_3_15: true +ubtu20cis_rule_5_3_16: true +ubtu20cis_rule_5_3_17: true +ubtu20cis_rule_5_3_18: true +ubtu20cis_rule_5_3_19: true +ubtu20cis_rule_5_3_20: true +ubtu20cis_rule_5_3_21: true +ubtu20cis_rule_5_3_22: true +ubtu20cis_rule_5_4_1: true +ubtu20cis_rule_5_4_2: true +ubtu20cis_rule_5_4_3: true +ubtu20cis_rule_5_4_4: true +ubtu20cis_rule_5_5_1_1: true +ubtu20cis_rule_5_5_1_2: true +ubtu20cis_rule_5_5_1_3: true +ubtu20cis_rule_5_5_1_4: true +ubtu20cis_rule_5_5_1_5: true +ubtu20cis_rule_5_5_2: true +ubtu20cis_rule_5_5_3: true +ubtu20cis_rule_5_5_4: true +ubtu20cis_rule_5_5_5: true +ubtu20cis_rule_5_6: true +ubtu20cis_rule_5_7: true + +# Section 6 Fixes +# Section is Systme Maintenance (System File Permissions and User and Group Settings) +ubtu20cis_rule_6_1_1: true +ubtu20cis_rule_6_1_2: true +ubtu20cis_rule_6_1_3: true +ubtu20cis_rule_6_1_4: true +ubtu20cis_rule_6_1_5: true +ubtu20cis_rule_6_1_6: true +ubtu20cis_rule_6_1_7: true +ubtu20cis_rule_6_1_8: true +ubtu20cis_rule_6_1_9: true +ubtu20cis_rule_6_1_10: true +ubtu20cis_rule_6_1_11: true +ubtu20cis_rule_6_1_12: true +ubtu20cis_rule_6_1_13: true +ubtu20cis_rule_6_1_14: true +ubtu20cis_rule_6_2_1: true +ubtu20cis_rule_6_2_2: true +ubtu20cis_rule_6_2_3: true +ubtu20cis_rule_6_2_4: true +ubtu20cis_rule_6_2_5: true +ubtu20cis_rule_6_2_6: true +ubtu20cis_rule_6_2_7: true +ubtu20cis_rule_6_2_8: true +ubtu20cis_rule_6_2_9: true +ubtu20cis_rule_6_2_10: true +ubtu20cis_rule_6_2_11: true +ubtu20cis_rule_6_2_12: true +ubtu20cis_rule_6_2_13: true +ubtu20cis_rule_6_2_14: true +ubtu20cis_rule_6_2_15: true +ubtu20cis_rule_6_2_16: true +ubtu20cis_rule_6_2_17: true + +# Service configuration variables, set to true to keep service +ubtu20cis_allow_autofs: false +ubtu20cis_allow_usb_storage: false +ubtu20cis_avahi_server: false +ubtu20cis_cups_server: false +ubtu20cis_dhcp_server: false +ubtu20cis_ldap_server: false +ubtu20cis_nfs_server: false +ubtu20cis_dns_server: false +ubtu20cis_vsftpd_server: false +ubtu20cis_httpd_server: false +ubtu20cis_dovecot_server: false +ubtu20cis_smb_server: false +ubtu20cis_squid_server: false +ubtu20cis_snmp_server: false +ubtu20cis_rsync_server: false +ubtu20cis_nis_server: false +ubtu20cis_nfs_client: false +# rpcbind is required by nfs-common which is required on client and server +ubtu20cis_rpc_required: "{{ ubtu20cis_nfs_server or ubtu20cis_nfs_client }}" + +# Clients in use variables +ubtu20cis_nis_required: false +ubtu20cis_rsh_required: false +ubtu20cis_talk_required: false +ubtu20cis_telnet_required: false +ubtu20cis_ldap_clients_required: false +ubtu20cis_is_router: false + +# IPv4 requirement toggle +ubtu20cis_ipv4_required: true + +# IPv6 requirement toggle +ubtu20cis_ipv6_required: false + +# Other system wide variables +# ubtu20cis_desktop_required is the toggle for requiring desktop environments. True means you use a desktop and will not disable/remove needed items to run a desktop (not recommented for servers) +# false means you do not require a desktop +ubtu20cis_desktop_required: false + +# Section 1 Control Variables +# Control 1.1.2/1.1.3/1.1.4/1.1.5 +# ubtu20cis_tmp_fstab_options are the file system options for the fstabs configuration +# To conform to CIS cotnrol 1.1.2 could use any settings +# To conform to CIS control 1.1.3 nodev needs to be present +# To conform to CIS control 1.1.4 nosuid needs to be present +# To conform to CIS control 1.1.5 noexec needs to present +ubtu20cis_tmp_fstab_options: "defaults,rw,nosuid,nodev,noexec,relatime" + +# Control 1.1.6/1.1.7/1.1.8/1.1.9 +# ubtu20cis_dev_shm_fstab_options are the fstab file system options for /dev/shm +# To conform to CIS control 1.1.6 could use any settings +# To conform to CIS control 1.1.7 nodev needs to be present +# To conform to CIS control 1.1.8 nosuid needs to be present +# To conform to CIS control 1.1.9 noexec needs to be present +ubtu20cis_dev_shm_fstab_options: "defaults,noexec,nodev,nosuid" + +# Control 1.1.12/1.1.13/1.1.14 +# These are the settings for the /var/tmp mount +# To conform to CIS control 1.1.12 nodev needs to be present in opts +# To conform to CIS control 1.1.13 nosuid needs to be present in opts +# To conform to CIS control 1.1.14 noexec needs to be present in opts +ubtu20cis_vartmp: + source: /tmp + fstype: none + opts: "defaults,nodev,nosuid,noexec,bind" + enabled: false + +# Control 1.3.2 +# These are the crontab settings for file system integrity enforcement +ubtu20cis_aide_cron: + cron_user: root + cron_file: /etc/crontab + aide_job: '/usr/bin/aide.wrapper --config /etc/aide/aide.conf --check' + aide_minute: 0 + aide_hour: 5 + aide_day: '*' + aide_month: '*' + aide_weekday: '*' + +# Control 1.4.4 +# THIS VARAIBLE SHOULD BE CHANGED AND INCORPORATED INTO VAULT +# THIS VALUE IS WHAT THE ROOT PW WILL BECOME!!!!!!!! +# HAVING THAT PW EXPOSED IN RAW TEXT IS NOT SECURE!!!! +ubtu20cis_root_pw: "Password1" + +# Control 1.8.2 +# This will be the motd banner must not contain the below items in order to be compliant with Ubuntu 20 CIS +# \m, \r, \s, \v or references to the OS platform +ubtu20cis_warning_banner: | + Authorized uses only. All activity may be monitored and reported. + +# Section 2 Control Variables +# Control 2.1.1.1 +# ubtu20cis_time_sync_tool is the tool in which to synchronize time +# The two options are chrony, ntp, or systemd-timesyncd +ubtu20cis_time_sync_tool: "ntp" + +# Control 2.1.1.2 +# ubtu20cis_ntp_server_list is the list ntp servers +# ubtu20cis_ntp_fallback_server_list is the list of fallback NTP servers +ubtu20cis_ntp_server_list: "0.debian.pool.ntp.org 1.debian.pool.ntp.org" +ubtu20cis_ntp_fallback_server_list: "2.debian.pool.ntp.org 3.debian.pool.ntp.org" + +# Control 2.1.1.3/2.1.1.4 +# ubtu20cis_chrony_server_options is the server options for chrony +ubtu20cis_chrony_server_options: "minpoll 8" +# ubtu20cis_time_synchronization_servers are the synchronization servers +ubtu20cis_time_synchronization_servers: + - 0.pool.ntp.org + - 1.pool.ntp.org + - 2.pool.ntp.org + - 3.pool.ntp.org +# ubtu20cis_chrony_user is the user that chrony will use, default is _chrony +ubtu20cis_chrony_user: "_chrony" +# ubtu20cis_ntp_server_options is the server options for ntp +ubtu20cis_ntp_server_options: "iburst" + +# Control 2.1.15 +# ubtu20_cis_mail_transfer_agent is the mail transfer agent in use +# The options are exim4, postfix or other +# ubtu20_cis_mail_transfer_agent: "other" + +# Section 3 Control Variables +# Control 3.1.2 +# ubtu20cis_install_network_manager determines if this role can install network manager +ubtu20cis_install_network_manager: true + +# ubtu20cis_firewall_package is the toggle for which firewall system is in use +# The valid options to use are ufw, nftables, or iptables +# Warning!! nftables is not supported in this role and will only message out if nftables is selected +# If using nftables please manually adjust firewall settings +ubtu20cis_firewall_package: "iptables" + +# Control 3.5.1.5 +# ubtu20cis_ufw_allow_out_ports are the ports for the firewall to allow +# if you want to allow out on all ports set variable to "all", example ubtu20cis_ufw_allow_out_ports: "all" +ubtu20cis_ufw_allow_out_ports: + - 53 + - 80 + - 443 + +# Control 3.5.2.4 +# nftables is not supported in this role. Some tasks have the task commented out, this is one of them +# ubtu20cis_nftables_table_name is the name of the table in nftables you want to create +# the default nftables table name is inet filter. This variable name will be the one all +# nftables configs are applied to +# ubtu20cis_nftables_table_name: "inet filter" + + +# Controls 3.5.3.2.1 through 3.5.3.3.4 +# The iptables module only writes to memory which means a reboot could revert settings +# The below toggle will install iptables-persistent and save the rules in memory (/etc/iptables/rules.v4 or rules.v6) +# This makes the CIS role changes permenant +ubtu20cis_save_iptables_cis_rules: true + +# Section 4 Control Variables +# Control 4.1.1.4 +# ubtu20cis_audit_back_log_limit is the audit_back_log limit and should be set to a sufficient value +# The example from CIS uses 8192 +ubtu20cis_audit_back_log_limit: 8192 + +# Control 4.1.2.1 +# ubtu20cis_max_log_file_size is largest the log file will become in MB +# This shoudl be set based on your sites policy +ubtu20cis_max_log_file_size: 10 + +# Control 4.1.2.2 +ubtu20cis_auditd: + admin_space_left_action: halt + max_log_file_action: keep_logs + +# Control 4.2.1.3 +# ubtu20cis_rsyslog_ansible_managed will toggle ansible automated configurations of rsyslog +# You should set the rsyslog to your side specific needs. This toggle will use the example from +# page 347 to set rsyslog loggin based on those configuration suggestions. Settings can be seen +# in control 4.2.1.3 +ubtu20cis_rsyslog_ansible_managed: true + +# Control 4.2.1.5 +# ubtu20cis_remote_log_server is the remote logging server +ubtu20cis_remote_log_server: 192.168.2.100 + +# Control 4.2.1.6 +ubtu20cis_system_is_log_server: true + +# Control 4.3 +# ubtu20cis_logrotate is the log rotate frequencey. Options are daily, weekly, monthly, and yearly +ubtu20cis_logrotate: "daily" + +# Control 4.3 +# ubtu20cis_logrotate_create_settings are the settings for the create parameter in /etc/logrotate.conf +# The permissions need to be 640 or more restrictive. +# If you would like to include user/group settings to this parameter format the var as below +# ubtu20cis_logrotate_create_settings: "0640 root utmp" +ubtu20cis_logrotate_create_settings: "0640" + +# Section 5 Control Variables +# Control 5.2.1 +# ubtu20cis_sudo_package is the name of the sudo package to install +# The possible values are "sudo" or "sudo-ldap" +ubtu20cis_sudo_package: "sudo" + +# Control 5.2.3 +# ubtu20cis_sudo_logfile is the path and file name of the sudo log file +ubtu20cis_sudo_logfile: "/var/log/sudo.log" + +# ubtu20cis_sshd will contain all sshd variables. The task association and variable descriptions for each section are listed below +# Control 5.3.4 +# allow_users, allow_groups, deny_users, and deny_groups. These are lists of users and groups to allow or deny ssh access to +# These are lists that are just space delimited, for example allow_users: "vagrant ubuntu" for the vagrant and ubuntu users +# Control 5.3.5 +# log_level is the log level variable. This needs to be set to VERBOSE or INFO to conform to CIS standards +# Control 5.3.7 +# max_auth_tries is the max number of authentication attampts per connection. +# This value should be 4 or less to conform to CIS standards +# Control 5.3.13 +# ciphers is a comma seperated list of site approved ciphers +# ONLY USE STRONG CIPHERS. Weak ciphers are listed below +# DO NOT USE: 3des-cbc, aes128-cbc, aes192-cbc, and aes256-cbc +# Control 5.3.14 +# MACs is the comma seperated list of site approved MAC algorithms that SSH can use during communication +# ONLY USE STRONG ALGORITHMS. Weak algorithms are listed below +# DO NOT USE: hmac-md5, hmac-md5-96, hmac-ripemd160, hmac-sha1, hmac-sha1-96, umac-64@openssh.com, umac-128@openssh.com, hmac-md5-etm@openssh.com, +# hmac-md5-96-etm@openssh.com, hmac-ripemd160-etm@openssh.com, hmac-sha1-etm@openssh.com, hmac-sha1-96-etm@openssh.com, umac-64-etm@openssh.com, umac-128-etm@openssh.com +# Control 5.3.15 +# kex_algorithms is comma seperated list of the algorithms for key exchange methods +# ONLY USE STRONG ALGORITHMS. Weak algorithms are listed below +# DO NOT USE: diffie-hellman-group1-sha1, diffie-hellman-group14-sha1, diffie-hellman-group-exchange-sha1 +# Control 5.3.16 +# client_alive_interval is the amount of time idle before ssh session terminated. Set to 300 or less to conform to CIS standards +# client_alive_count_max will send client alive messages at the configured interval. Set to 3 or less to conform to CIS standards +# Control 5.3.17 +# login_grace_time is the time allowed for successful authentication to the SSH server. This needs to be set to 60 seconds or less to conform to CIS standards +# Control 5.3.22 +# max_sessions is the max number of open sessions permitted. Set the value to 4 or less to conform to CIS standards +ubtu20cis_sshd: + log_level: "INFO" + max_auth_tries: 4 + ciphers: "chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr" + macs: "hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512,hmac-sha2-256" + kex_algorithms: "curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256" + client_alive_interval: 300 + client_alive_count_max: 0 + login_grace_time: 60 + max_sessions: 4 + # WARNING: make sure you understand the precedence when working with these values!! + allow_users: "vagrant ubuntu" + allow_groups: "vagrant ubuntu" + # deny_users: + # deny_groups: + +# Control 5.4.3 +# ubtu20cis_pamd_pwhistory_remember is number of password chnage cycles a user can re-use a password +# This needs to be 5 or more to conform to CIS standards +ubtu20cis_pamd_pwhistory_remember: 5 + +# ubtu20cis_pass will be password based variables +# Control 5.5.1.1 +# pass_min_days is the min number of days allowed between changing passwords. Set to 1 or more to conform to CIS standards +# Control 5.5.1.2 +# max_days forces passwords to expire in configured number of days. Set to 365 or less to conform to CIS standards +# Control 5.5.1.3 +# warn_age is how many days before pw expiry the user will be warned. Set to 7 or more to conform to CIS standards +# Control 5.5.1.4 +# inactive the number of days of inactivity before the account will lock. Set to 30 day sor less to conform to CIS standards +ubtu20cis_pass: + max_days: 365 + min_days: 1 + warn_age: 7 + inactive: 30 + +# Control 5.5.5 +# Session timeout setting file (TMOUT setting can be set in multiple files) +# Timeout value is in seconds. Set value to 900 seconds or less +ubtu20cis_shell_session_timeout: + file: /etc/profile.d/tmout.sh + timeout: 900 + +# Control 5.7 +# ubtu20cis_su_group is the su group to use with pam_wheel +ubtu20cis_su_group: "wheel" + +# Section 6 Control Variables +# Control 6.1.10 +# ubtu20cis_no_world_write_adjust will toggle the automated fix to remove world-writable perms from all files +# Setting to true will remove all world-writable permissions, and false will leave as-is +ubtu20cis_no_world_write_adjust: true + +# Control 6.1.11 +# ubtu20cis_un_owned_owner is the owner to set files to that have no owner +ubtu20cis_unowned_owner: root +# ubtu20cis_no_owner_adjust will toggle the automated fix to give a user to unowned files/directories +# true will give the owner from ubtu20cis_un_owned_owner to all unowned files/directories and false will skip +ubtu20cis_no_owner_adjust: true +# Control 6.1.12 +# ubtu20cis_ungrouped_group is the group to set files to that have no group +ubtu20cis_ungrouped_group: root +# ubtu20cis_no_group_adjust will toggle the automated fix to give a group to ungrouped files/directories +# true will give the group from ubtu20cis_un_owned_group to all ungrouped files/directories and false will skip +ubtu20cis_no_group_adjust: true + +# Cotnrol 6.1.13 +# ubtu20cis_suid_adjust is the toggle to remove the SUID bit from all files on all mounts +# Set to true this role will remove that bit, set to false we will just warn about the files +ubtu20cis_suid_adjust: false + +# Control 6.2.5 Allow ansible to adjust world-writable files. False will just display world-writable files, True will remove world-writable +# ubtu20cis_passwd_label: "{{ (this_item | default(item)).id }}: {{ (this_item | default(item)).dir }}" +ubtu20cis_passwd_label: "{{ (this_item | default(item)).id }}: {{ (this_item | default(item)).dir }}" + + +#### Audit Configuration Settings #### + +### Audit binary settings ### +audit_bin_version: + release: v0.3.16 + checksum: 'sha256:827e354b48f93bce933f5efcd1f00dc82569c42a179cf2d384b040d8a80bfbfb' +audit_bin_path: /usr/local/bin/ +audit_bin: "{{ audit_bin_path }}goss" +audit_format: json + +# if get_audit_binary_method == download change accordingly +audit_bin_url: "https://github.com/aelsabbahy/goss/releases/download/{{ audit_bin_version.release }}/goss-linux-amd64" + +## if get_audit_binary_method - copy the following needs to be updated for your environment +## it is expected that it will be copied from somewhere accessible to the control node +## e.g copy from ansible control node to remote host +audit_bin_copy_location: /some/accessible/path + +### Goss Audit Benchmark file ### +## managed by the control audit_content +# git +audit_file_git: "https://github.com/ansible-lockdown/{{ benchmark }}-Audit.git" +audit_git_version: main + +# archive or copy: +audit_conf_copy: "some path to copy from" + +# get_url: +audit_files_url: "some url maybe s3?" + +## Goss configuration information +# Where the goss configs and outputs are stored +audit_out_dir: '/var/tmp' +# Where the goss audit configuration will be stored +audit_conf_dir: "{{ audit_out_dir }}/{{ benchmark }}-Audit/" + +# If changed these can affect other products +pre_audit_outfile: "{{ audit_out_dir }}/{{ ansible_hostname }}_pre_scan_{{ ansible_date_time.epoch }}.{{ audit_format }}" +post_audit_outfile: "{{ audit_out_dir }}/{{ ansible_hostname }}_post_scan_{{ ansible_date_time.epoch }}.{{ audit_format }}" + +## The following should not need changing +audit_control_file: "{{ audit_conf_dir }}goss.yml" +audit_vars_path: "{{ audit_conf_dir }}/vars/{{ ansible_hostname }}.yml" +audit_results: | + The pre remediation results are: {{ pre_audit_summary }}. + The post remediation results are: {{ post_audit_summary }}. + Full breakdown can be found in {{ audit_out_dir }} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/handlers/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/handlers/main.yml new file mode 100644 index 0000000..be5883b --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/handlers/main.yml @@ -0,0 +1,58 @@ +--- +- name: grub update + command: update-grub + failed_when: false + +- name: reload gdm3 + command: dpkg-reconfigure gdm3 + failed_when: false + +- name: restart postfix + service: + name: postfix + state: restarted + +- name: restart exim4 + service: + name: exim4 + state: restarted + +- name: sysctl flush ipv4 route table + sysctl: + name: net.ipv4.route.flush + value: '1' + sysctl_set: yes + when: ansible_virtualization_type != "docker" + +- name: reload ufw + ufw: + state: reloaded + +- name: sysctl flush ipv6 route table + sysctl: + name: net.ipv6.route.flush + value: '1' + sysctl_set: yes + when: ansible_virtualization_type != "docker" + +- name: restart auditd + service: + name: auditd + state: restarted + when: + - not ubtu20cis_skip_for_travis + tags: + - skip_ansible_lint + +- name: restart rsyslog + service: + name: rsyslog + state: restarted + +- name: restart sshd + service: + name: sshd + state: restarted + +- name: reload gdm + command: dpkg-reconfigure gdm3 diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/meta/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/meta/main.yml new file mode 100644 index 0000000..7e528e0 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/meta/main.yml @@ -0,0 +1,20 @@ +galaxy_info: + author: "George Nalen, Mark Bolwell, and DFed" + description: "Apply the Ubuntu 20 CIS benmarks" + company: "MindPoint Group" + license: MIT + min_ansible_version: 2.9.0 + + platforms: + - name: Ubuntu + versions: + - focal + + galaxy_tags: + - system + - security + - ubuntu2004 + - cis + - hardening + +dependencies: [] diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/site.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/site.yml new file mode 100644 index 0000000..472d768 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/site.yml @@ -0,0 +1,12 @@ +--- +- hosts: all + become: true + vars: + is_container: false + + roles: + + - role: "{{ playbook_dir }}" + ubtu20cis_system_is_container: "{{ is_container | default(false) }}" + ubtu20cis_skip_for_travis: false + ubtu20cis_oscap_scan: yes diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/LE_audit_setup.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/LE_audit_setup.yml new file mode 100644 index 0000000..0f66332 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/LE_audit_setup.yml @@ -0,0 +1,22 @@ +--- + +- name: Pre Audit Setup | Download audit binary + get_url: + url: "{{ audit_bin_url }}" + dest: "{{ audit_bin }}" + owner: root + group: root + checksum: "{{ audit_bin_version.checksum }}" + mode: 0555 + when: + - get_audit_binary_method == 'download' + +- name: Pre Audit Setup | copy audit binary + copy: + src: "{{ audit_bin_copy_location }}" + dest: "{{ audit_bin }}" + mode: 0555 + owner: root + group: root + when: + - get_audit_binary_method == 'copy' diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/main.yml new file mode 100644 index 0000000..eb4b437 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/main.yml @@ -0,0 +1,115 @@ +--- +# - debug: var=ansible_facts +- name: Gather distribution info + # we need: + # - hardware for ansible_mounts + # - platform for ansible_architecture (ansible internal) + # - virtual for ansible_virtualization_type + setup: + gather_subset: distribution,hardware,platform,virtual,!all,!min + when: + - ansible_distribution is not defined + tags: + - always + +- name: Check OS version and family + fail: + msg: "This role can only be run against Ubuntu 20. {{ ansible_distribution }} {{ ansible_distribution_major_version }} is not supported." + when: + - ansible_distribution == 'Ubuntu' + - ansible_distribution_major_version is version_compare('20', '!=') + tags: + - always + +- name: Check ansible version + fail: + msg: You must use ansible 2.1 or greater + when: not ansible_version.full is version_compare('2.1', '>=') + tags: + - always + +- import_tasks: prelim.yml + tags: + - prelim_tasks + - run_audit + +- import_tasks: pre_remediation_audit.yml + when: + - run_audit + tags: run_audit + +- include: parse_etc_password.yml + when: + - ubtu20cis_section5_patch or + ubtu20cis_section6_patch + +- name: Gather the package facts + package_facts: + manager: auto + tags: + - always + +- name: Include section 1 patches + import_tasks: section_1/main.yml + when: ubtu20cis_section1_patch + tags: + - section1 + +- name: Include section 2 patches + import_tasks: section_2/main.yml + when: ubtu20cis_section2_patch + tags: + - section2 + +- name: Include section 3 patches + import_tasks: section_3/main.yml + when: ubtu20cis_section3_patch + tags: + - section3 + +- name: Include section 4 patches + import_tasks: section_4/main.yml + when: ubtu20cis_section4_patch + tags: + - section4 + +- name: Include section 5 patches + import_tasks: section_5/main.yml + when: ubtu20cis_section5_patch + tags: + - section5 + +- name: Include section 6 patches + import_tasks: section_6/main.yml + when: ubtu20cis_section6_patch + tags: + - section6 + +- name: flush handlers + meta: flush_handlers + + +- name: reboot system + block: + - name: reboot system if not skipped + reboot: + when: + - not ubtu20_skip_reboot + + - name: Warning a reboot required but skip option set + debug: + msg: "Warning!! changes have been made that require a reboot to be implemented but skip reboot was set - Can affect compliance check results" + changed_when: true + when: + - ubtu20_skip_reboot + + +- import_tasks: post_remediation_audit.yml + when: + - run_audit + +- name: Show Audit Summary + debug: + msg: "{{ audit_results.split('\n') }}" + when: + - run_audit diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/parse_etc_password.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/parse_etc_password.yml new file mode 100644 index 0000000..769918e --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/parse_etc_password.yml @@ -0,0 +1,32 @@ +--- +- name: "PRELIM | {{ ubtu20cis_passwd_tasks }} | Parse /etc/passwd" + block: + - name: "PRELIM | {{ ubtu20cis_passwd_tasks }} | Parse /etc/passwd" + command: cat /etc/passwd + changed_when: false + check_mode: false + register: ubtu20cis_passwd_file_audit + + - name: "PRELIM | {{ ubtu20cis_passwd_tasks }} | Split passwd entries" + set_fact: + ubtu20cis_passwd: "{{ ubtu20cis_passwd_file_audit.stdout_lines | map('regex_replace', ld_passwd_regex, ld_passwd_yaml) | map('from_yaml') | list }}" + + with_items: "{{ ubtu20cis_passwd_file_audit.stdout_lines }}" + vars: + ld_passwd_regex: >- + ^(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*):(?P[^:]*) + ld_passwd_yaml: | + id: >-4 + \g + password: >-4 + \g + uid: \g + gid: \g + gecos: >-4 + \g + dir: >-4 + \g + shell: >-4 + \g + tags: + - always diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/post_remediation_audit.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/post_remediation_audit.yml new file mode 100644 index 0000000..17ef3f8 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/post_remediation_audit.yml @@ -0,0 +1,43 @@ +--- + +- name: "Post Audit | Run post_remediation {{ benchmark }} audit" + shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ post_audit_outfile }} -g {{ group_names }}" + vars: + warn: false + +- name: Post Audit | ensure audit files readable by users + file: + path: "{{ item }}" + mode: 0644 + state: file + loop: + - "{{ post_audit_outfile }}" + - "{{ pre_audit_outfile }}" + +- name: Post Audit | Capture audit data if json format + block: + - name: "capture data {{ post_audit_outfile }}" + command: "cat {{ post_audit_outfile }}" + register: post_audit + changed_when: false + + - name: Capture post-audit result + set_fact: + post_audit_summary: "{{ post_audit.stdout | from_json |json_query(summary) }}" + vars: + summary: 'summary."summary-line"' + when: + - audit_format == "json" + +- name: Post Audit | Capture audit data if documentation format + block: + - name: "Post Audit | capture data {{ post_audit_outfile }}" + command: "tail -2 {{ post_audit_outfile }}" + register: post_audit + changed_when: false + + - name: Post Audit | Capture post-audit result + set_fact: + post_audit_summary: "{{ post_audit.stdout_lines }}" + when: + - audit_format == "documentation" diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/pre_remediation_audit.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/pre_remediation_audit.yml new file mode 100644 index 0000000..ad1756b --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/pre_remediation_audit.yml @@ -0,0 +1,118 @@ +--- + +- name: Audit Binary Setup | Setup the LE audit + include_tasks: LE_audit_setup.yml + when: + - setup_audit + tags: + - setup_audit + +- name: "Pre Audit Setup | Ensure {{ audit_conf_dir }} exists" + file: + path: "{{ audit_conf_dir }}" + state: directory + mode: '0755' + +- name: Pre Audit Setup | If using git for content set up + block: + - name: Pre Audit Setup | Install git (rh8 python3) + package: + name: git + state: present + when: ansible_distribution_major_version == '8' + + - name: Pre Audit Setup | Install git (rh7 python2) + package: + name: git + state: present + vars: + ansible_python_interpreter: "{{ python2_bin }}" + when: ansible_distribution_major_version == '7' + + - name: Pre Audit Setup | retrieve audit content files from git + git: + repo: "{{ audit_file_git }}" + dest: "{{ audit_conf_dir }}" + version: "{{ audit_git_version }}" + when: + - audit_content == 'git' + +- name: Pre Audit Setup | copy to audit content files to server + copy: + src: "{{ audit_local_copy }}" + dest: "{{ audit_conf_dest }}" + mode: 0644 + when: + - audit_content == 'copy' + +- name: Pre Audit Setup | unarchive audit content files on server + unarchive: + src: "{{ audit_conf_copy }}" + dest: "{{ audit_conf_dest }}" + when: + - audit_content == 'archived' + +- name: Pre Audit Setup | get audit content from url + get_url: + url: "{{ audit_files_url }}" + dest: "{{ audit_conf_dir }}" + when: + - audit_content == 'get_url' + +- name: Pre Audit Setup | Check Goss is available + block: + - name: Pre Audit Setup | Check for goss file + stat: + path: "{{ audit_bin }}" + register: goss_available + + - name: Pre Audit Setup | If audit ensure goss is available + assert: + msg: "Audit has been selected: unable to find goss binary at {{ audit_bin }}" + when: + - not goss_available.stat.exists + when: + - run_audit + +- name: Pre Audit Setup | Copy ansible default vars values to test audit + template: + src: ansible_vars_goss.yml.j2 + dest: "{{ audit_vars_path }}" + mode: 0600 + when: + - run_audit + tags: + - goss_template + +- name: "Pre Audit | Run pre_remediation {{ benchmark }} audit" + shell: "{{ audit_conf_dir }}/run_audit.sh -v {{ audit_vars_path }} -o {{ pre_audit_outfile }} -g {{ group_names }}" + vars: + warn: false + +- name: Pre Audit | Capture audit data if json format + block: + - name: "capture data {{ pre_audit_outfile }}" + command: "cat {{ pre_audit_outfile }}" + register: pre_audit + changed_when: false + + - name: Pre Audit | Capture pre-audit result + set_fact: + pre_audit_summary: "{{ pre_audit.stdout | from_json |json_query(summary) }}" + vars: + summary: 'summary."summary-line"' + when: + - audit_format == "json" + +- name: Pre Audit | Capture audit data if documentation format + block: + - name: "capture data {{ pre_audit_outfile }}" + command: "tail -2 {{ pre_audit_outfile }}" + register: pre_audit + changed_when: false + + - name: Pre Audit | Capture pre-audit result + set_fact: + pre_audit_summary: "{{ pre_audit.stdout_lines }}" + when: + - audit_format == "documentation" diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/prelim.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/prelim.yml new file mode 100644 index 0000000..92bd496 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/prelim.yml @@ -0,0 +1,53 @@ +--- +- name: "PRELIM | Run apt update" + apt: + update_cache: yes + when: + - ubtu20cis_rule_1_3_1 or + ubtu20cis_rule_1_9 + +- name: "PRELIM | Check for autofs service" + shell: "systemctl show autofs | grep LoadState | cut -d = -f 2" + register: ubtu20cis_autofs_service_status + changed_when: false + check_mode: false + when: + - ubtu20cis_rule_1_1_23 + tags: + - skip_ansible_lint + +- name: "PRELIM | Check for avahi-daemon service" + shell: "systemctl show avahi-daemon | grep LoadState | cut -d = -f 2" + register: avahi_service_status + changed_when: false + check_mode: false + when: + - ubtu20cis_rule_2_1_3 + tags: + - skip_ansible_lint + +- name: "PRELIM | Install Network-Manager" + apt: + name: network-manager + state: present + when: + - ubtu20cis_rule_3_1_2 + - not ubtu20cis_system_is_container + +- name: "PRELIM | Install ACL" + apt: + name: acl + state: present + when: + - ubtu20cis_rule_6_2_6 + - ubtu20cis_install_network_manager + +- name: "PRELIM | List users accounts" + command: "awk -F: '{print $1}' /etc/passwd" + changed_when: false + check_mode: false + register: ubtu20cis_users + when: + - ubtu20cis_rule_6_2_8 or + ubtu20cis_rule_6_2_9 or + ubtu20cis_rule_6_2_10 diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.1.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.1.x.yml new file mode 100644 index 0000000..8d73c0d --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.1.x.yml @@ -0,0 +1,524 @@ +--- +- name: "AUTOMATED | 1.1.1.1 | PATCH | Ensure mounting of cramfs filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.1 | PATCH | Ensure mounting of cramfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/cramfs.conf + regexp: "^(#)?install cramfs(\\s|$)" + line: install cramfs /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.1 | PATCH | Ensure mounting of cramfs filesystems is disabled | Disable cramfs" + modprobe: + name: cramfs + state: absent + when: ansible_connection != 'docker' + when: + - ubtu20cis_rule_1_1_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.1 + - cramfs + +- name: "AUTOMATED | 1.1.1.2 | PATCH | Ensure mounting of freevxfs filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.2 | PATCH | Ensure mounting of freevxfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/freevxfs.conf + regexp: "^(#)?install freevxfs(\\s|$)" + line: install freevxfs /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.2 | PATCH | Ensure mounting of freevxfs filesystems is disabled | Disable freevxfs" + modprobe: + name: freevxfs + state: absent + when: ansible_connection != 'docker' + when: + - ubtu20cis_rule_1_1_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.2 + - freevxfs + +- name: "AUTOMATED | 1.1.1.3 | PATCH | Ensure mounting of jffs2 filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.3 | PATCH | Ensure mounting of jffs2 filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/jffs2.conf + regexp: "^(#)?install jffs2(\\s|$)" + line: install jffs2 /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.3 | PATCH | Ensure mounting of jffs2 filesystems is disabled | Disable jffs2" + modprobe: + name: jffs2 + state: absent + when: ansible_connection != 'docker' + when: + - ubtu20cis_rule_1_1_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.3 + - jffs2 + +- name: "AUTOMATED | 1.1.1.4 | PATCH | Ensure mounting of hfs filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.4 | PATCH | Ensure mounting of hfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/hfs.conf + regexp: "^(#)?install hfs(\\s|$)" + line: install hfs /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.4 | PATCH | Ensure mounting of hfs filesystems is disabled | Disable hfs" + modprobe: + name: hfs + state: absent + when: ansible_connection != 'docker' + when: + - ubtu20cis_rule_1_1_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.4 + - hfs + +- name: "AUTOMATED | 1.1.1.5 | PATCH | Ensure mounting of hfsplus filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.5 | PATCH | Ensure mounting of hfsplus filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/hfsplus.conf + regexp: "^(#)?install hfsplus(\\s|$)" + line: install hfsplus /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.5 | PATCH | Ensure mounting of hfsplus filesystems is disabled | Disable hfsplus" + modprobe: + name: hfsplus + state: absent + when: ansible_connection != 'docker' + when: + - ubtu20cis_rule_1_1_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.5 + - hfsplus + +- name: "MANUAL | 1.1.1.6 | PATCH | Ensure mounting of squashfs filesystems is disabled" + block: + - name: "MANUAL | 1.1.1.6 | PATCH | Ensure mounting of squashfs filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/squashfs.conf + regexp: "^(#)?install squashfs(\\s|$)" + line: install squashfs /bin/true + create: yes + + - name: "MANUAL | 1.1.1.6 | PATCH | Ensure mounting of squashfs filesystems is disabled | Disable squashfs" + modprobe: + name: squashfs + state: absent + when: ansible_connection != 'docker' + when: + - ubtu20cis_rule_1_1_1_6 + tags: + - level2-server + - level2-workstation + - manual + - patch + - rule_1.1.1.6 + - squashfs + +- name: "AUTOMATED | 1.1.1.7 | PATCH | Ensure mounting of udf filesystems is disabled" + block: + - name: "AUTOMATED | 1.1.1.7 | PATCH | Ensure mounting of udf filesystems is disabled | Edit modprobe config" + lineinfile: + dest: /etc/modprobe.d/udf.conf + regexp: "^(#)?install udf(\\s|$)" + line: install udf /bin/true + create: yes + + - name: "AUTOMATED | 1.1.1.7 | PATCH | Ensure mounting of udf filesystems is disabled | Disable udf" + modprobe: + name: udf + state: absent + when: ansible_connection != 'docker' + when: + - ubtu20cis_rule_1_1_1_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.1.7 + - udf + +- name: "AUTOMATED | 1.1.2 | PATCH | Ensure /tmp is configured" + mount: + path: /tmp + src: /tmp + state: mounted + fstype: tmpfs + opts: "{{ ubtu20cis_tmp_fstab_options }}" + when: + - ubtu20cis_rule_1_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.2 + - tmp + +- name: | + "AUTOMATED | 1.1.3 | PATCH | Ensure nodev option set on /tmp partition" + "AUTOMATED | 1.1.4 | PATCH | Ensure nosuid option set on /tmp partition" + "AUTOMATED | 1.1.5 | PATCH | Ensure noexec option set on /tmp partition" + mount: + name: /tmp + src: /tmp + state: remounted + fstype: tmpfs + opts: "{{ ubtu20cis_tmp_fstab_options }}" + when: + - ubtu20cis_rule_1_1_3 or + ubtu20cis_rule_1_1_4 or + ubtu20cis_rule_1_1_5 + # - ubtu20cis_vartmp['enabled'] + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.3 + - rule_1.1.4 + - rule_1.1.5 + - tmp + +- name: "AUTOMATED | 1.1.6 | PATCH | Ensure /dev/shm is configured" + mount: + name: /dev/shm + src: /dev/shm + state: mounted + fstype: tmpfs + opts: "{{ ubtu20cis_dev_shm_fstab_options }}" + when: + - ubtu20cis_rule_1_1_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.6 + - dev_shm + +- name: | + "AUTOMATED | 1.1.7 | PATCH | Ensure nodev option set on /dev/shm partition" + "AUTOMATED | 1.1.8 | PATCH | Ensure nosuid option set on /dev/shm partition" + "AUTOMATED | 1.1.9 | PATCH | Ensure noexec option set on /dev/shm partition" + mount: + name: /dev/shm + src: /dev/shm + state: remounted + fstype: tmpfs + opts: "{{ ubtu20cis_dev_shm_fstab_options }}" + when: + - ubtu20cis_rule_1_1_7 or + ubtu20cis_rule_1_1_8 or + ubtu20cis_rule_1_1_9 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.7 + - rule_1.1.8 + - rule_1.1.9 + - dev_shm + +- name: "AUTOMATED | 1.1.10 | AUDIT | Ensure separate partition exists for /var" + block: + - name: "AUTOMATED | 1.1.10 | AUDIT | Ensure separate partition exists for /var | Gather /var partition" + shell: mount | grep "on /var " + changed_when: false + failed_when: false + check_mode: false + args: + warn: false + register: ubtu20cis_1_1_10_var_mounted + + - name: "AUTOMATED | 1.1.10 | AUDIT | Ensure separate partition exists for /var | Alert if /var partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /var" + - "Please create a separate partition for /var" + when: ubtu20cis_1_1_10_var_mounted.stdout | length == 0 + when: + - ubtu20cis_rule_1_1_10 + tags: + - level2-server + - level2-workstation + - automated + - audit + - rule_1.1.10 + - var + +- name: "AUTOMATED | 1.1.11 | AUDIT | Ensure separate partition exists for /var/tmp" + block: + - name: "AUTOMATED | 1.1.11 | AUDIT | Ensure separate partition exists for /var/tmp | Gather /var/tmp partition" + shell: mount | grep "on /var/tmp " + changed_when: false + failed_when: false + check_mode: false + args: + warn: false + register: ubtu20cis_1_1_11_var_tmp_mounted + + - name: "AUTOMATED | 1.1.11 | AUDIT | Ensure separate partition exists for /var/tmp | Alert if /var/tmp partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /var/tmp" + - "Please create a separate partition for /var/tmp" + when: ubtu20cis_1_1_11_var_tmp_mounted.stdout | length == 0 + when: + - ubtu20cis_rule_1_1_11 + tags: + - level2-server + - level2-workstation + - automated + - audit + - rule_1.1.11 + - var/tmp + +- name: | + "AUTOMATED | 1.1.12 | PATCH | Ensure /var/tmp partition includes the nodev option" + "AUTOMATED | 1.1.13 | PATCH | Ensure /var/tmp partition includes the nosuid option" + "AUTOMATED | 1.1.14 | PATCH | Ensure /var/tmp partition includes the noexec option" + mount: + name: /var/tmp + src: "{{ ubtu20cis_vartmp['source'] }}" + state: present + fstype: "{{ ubtu20cis_vartmp['fstype'] }}" + opts: "{{ ubtu20cis_vartmp['opts'] }}" + when: + - ubtu20cis_rule_1_1_12 or + ubtu20cis_rule_1_1_13 or + ubtu20cis_rule_1_1_14 + - ubtu20cis_vartmp['enabled'] + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.12 + - rule_1.1.13 + - rule_1.1.14 + - var/tmp + +- name: "AUTOMATED | 1.1.15 | AUDIT | Ensure separate partition exists for /var/log" + block: + - name: "AUTOMATED | 1.1.15 | AUDIT | Ensure separate partition exists for /var/log | Gather /var/log partition" + shell: mount | grep "on /var/log " + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_1_1_15_var_log_mounted + args: + warn: false + + - name: "AUTOMATED | 1.1.15 | AUDIT | Ensure separate partition exists for /var/log | Alert if /var/log partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /var/log" + - "Please create a separate partition for /var/log" + when: ubtu20cis_1_1_15_var_log_mounted.stdout | length == 0 + when: + - ubtu20cis_rule_1_1_15 + tags: + - level2-server + - level2-workstation + - automated + - audit + - rule_1.1.15 + - var/log + +- name: "AUTOMATED | 1.1.16 | AUDIT | Ensure separate partition exists for /var/log/audit" + block: + - name: "AUTOMATED | 1.1.16 | AUDIT | Ensure separate partition exists for /var/log/audit | Gather /var/log/audit" + shell: mount | grep "on /var/log/audit " + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_1_1_16_var_log_audit_mounted + args: + warn: false + + - name: "AUTOMATED | 1.1.16 | AUDIT | Ensure separate partition exists for /var/log/audit | Alert if /var/log/audit partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /var/log/audit" + - "Please create a separate partition for /var/log/audit" + when: ubtu20cis_1_1_16_var_log_audit_mounted.stdout | length == 0 + when: + - ubtu20cis_rule_1_1_16 + tags: + - level2-server + - level2-workstation + - automated + - audit + - var/log/audit + +- name: "AUTOMATED | 1.1.17 | AUDIT | Ensure separate partition exists for /home" + block: + - name: "AUTOMATED | 1.1.17 | AUDIT | Ensure separate partition exists for /home | Gather /home" + shell: mount | grep "on /home" + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_1_1_17_home_mounted + args: + warn: false + + - name: "AUTOMATED | 1.1.17 | AUDIT | Ensure separate partition exists for /home | Alert if /home partition does not exist" + debug: + msg: + - "ALERT!!!! There is no separate partition for /home" + - "Please create a separate partition for /home" + when: ubtu20cis_1_1_17_home_mounted.stdout | length == 0 + when: + - ubtu20cis_rule_1_1_17 + tags: + - level2-server + - level2-workstation + - automated + - audit + - /home + +- name: "AUTOMATED | 1.1.18 | PATCH | Ensure /home partition includes the nodev option" + mount: + name: "/home" + src: "{{ item.device }}" + state: mounted + fstype: "{{ item.fstype }}" + opts: "nodev" + with_items: "{{ ansible_mounts }}" + when: + - ubtu20cis_rule_1_1_18 + - item.mount == "/home" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.18 + - /home + +- name: "MANUAL | 1.1.19 | AUDIT | Ensure nodev option set on removable media partitions" + debug: + msg: "Warning!!!! Not relevant control" + when: + - ubtu20cis_rule_1_1_19 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.1.19 + - removable_media + +- name: "MANUAL | 1.1.20 | AUDIT | Ensure nosuid option set on removable media partitions" + debug: + msg: "Warning!!!! Not relevant control" + when: + - ubtu20cis_rule_1_1_20 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.1.20 + - removable_media + +- name: "MANUAL | 1.1.21 | AUDIT | Ensure noexec option set on removable media partitions" + debug: + msg: "Warning!!!! Not relevant control" + when: + - ubtu20cis_rule_1_1_21 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.1.21 + - removable_media + +- name: "AUTOMATED | 1.1.22 | PATCH | Ensure sticky bit is set on all world-writable directories" + shell: df --local -P | awk '{if (NR!=1) print $6}' | xargs -I '{}' find '{}' -xdev -type d \( -perm -0002 -a ! -perm -1000 \) 2>/dev/null | xargs -I '{}' chmod a+t '{}' + failed_when: ubtu20cis_1_1_22_status.rc>0 + check_mode: false + register: ubtu20cis_1_1_22_status + when: + - ubtu20cis_rule_1_1_22 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.1.22 + - sticky_bit + +- name: "AUTOMATED | 1.1.23 | PATCH | Disable Automounting" + service: + name: autofs + state: stopped + enabled: no + when: + - ubtu20cis_rule_1_1_23 + - ubtu20cis_autofs_service_status.stdout == "loaded" + - not ubtu20cis_allow_autofs + tags: + - level1-server + - level2-workstation + - automated + - patch + - rule_1.1.23 + - automounting + +- name: "AUTOMATED | 1.1.24 | PATCH | Disable USB Storage" + block: + - name: "AUTOMATED | 1.1.24 | PATCH | Disable USB Storage | Set modprobe config" + lineinfile: + path: /etc/modprobe.d/usb_storage.conf + regexp: '^install usb-storage' + line: 'install usb-storage /bin/true' + create: yes + + - name: "AUTOMATED | 1.1.24 | PATCH | Disable USB Storage | Remove usb-storage module" + modprobe: + name: usb-storage + state: absent + when: ansible_connection != 'docker' + when: + - ubtu20cis_rule_1_1_24 + - not ubtu20cis_allow_usb_storage + tags: + - level1-server + - level2-workstation + - automated + - patch + - rule_1.1.24 + - usb_storage diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.2.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.2.x.yml new file mode 100644 index 0000000..6849ca9 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.2.x.yml @@ -0,0 +1,52 @@ +--- +- name: "MANUAL | 1.2.1 | AUDIT | Ensure package manager repositories are configured" + block: + - name: "MANUAL 1.2.1 | AUDIT | Ensure package manager repositories are configured | Get repositories" + command: apt-cache policy + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_1_2_1_apt_policy + + - name: "MANUAL 1.2.1 | AUDIT | Ensure package manager repositories are configured | Message out repository configs" + debug: + msg: + - "Alert!!!! Below are the apt package repositories" + - "Please review to make sure they conform to your sites policies" + - "{{ ubtu20cis_1_2_1_apt_policy.stdout_lines }}" + when: + - ubtu20cis_rule_1_2_1 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.2.1 + - apt + +- name: "MANUAL | 1.2.2 | AUDIT | Ensure GPG keys are configured" + block: + - name: "MANUAL | 1.2.2 | AUDIT | Ensure GPG keys are configured | Get apt gpg keys" + command: apt-key list + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_1_2_2_apt_gpgkeys + + - name: "MANUAL | 1.2.2 | AUDIT | Ensure GPG keys are configured | Message out apt gpg keys" + debug: + msg: + - "Alert!!!! Below are the apt gpg kyes configured" + - "Please review to make sure they are configured" + - "in accordance with site policy" + - "{{ ubtu20cis_1_2_2_apt_gpgkeys.stdout_lines }}" + when: + - ubtu20cis_rule_1_2_2 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.2.2 + - gpg + - keys diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.3.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.3.x.yml new file mode 100644 index 0000000..dafbaed --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.3.x.yml @@ -0,0 +1,35 @@ +--- +- name: "AUTOMATED | 1.3.1 | PATCH | Ensure AIDE is installed" + apt: + name: ['aide', 'aide-common'] + state: present + when: + - ubtu20cis_rule_1_3_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.3.1 + - aide + +- name: "AUTOMATED | 1.3.2 | PATCH | Ensure filesystem integrity is regularly checked" + cron: + name: Run AIDE integrity check + cron_file: "{{ ubtu20cis_aide_cron['cron_file'] }}" + user: "{{ ubtu20cis_aide_cron['cron_user'] }}" + minute: "{{ ubtu20cis_aide_cron['aide_minute'] | default('0') }}" + hour: "{{ ubtu20cis_aide_cron['aide_hour'] | default('5') }}" + day: "{{ ubtu20cis_aide_cron['aide_day'] | default('*') }}" + month: "{{ ubtu20cis_aide_cron['aide_month'] | default('*') }}" + weekday: "{{ ubtu20cis_aide_cron['aide_weekday'] | default('*') }}" + job: "{{ ubtu20cis_aide_cron['aide_job'] }}" + when: + - ubtu20cis_rule_1_3_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.3.2 + - cron diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.4.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.4.x.yml new file mode 100644 index 0000000..4c261f8 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.4.x.yml @@ -0,0 +1,86 @@ +--- +- name: "AUTOMATED | 1.4.1 | PATCH | Ensure permissions on bootloader config are not overridden" + block: + - name: "AUTOMATED | 1.4.1 | PATCH | Ensure permissions on bootloader config are not overridden | Change chmod setting" + replace: + path: /usr/sbin/grub-mkconfig + regexp: 'chmod\s\d\d\d\s\${grub_cfg}.new' + replace: 'chmod 400 ${grub_cfg}.new' + + - name: "AUTOMATED | 1.4.1 | PATCH | Ensure permissions on bootloader config are not overridden | Remove check on password" + lineinfile: + path: /usr/sbin/grub-mkconfig + regexp: 'if \[ \"x\$\{grub_cfg\}\" != "x" \] && ! grep "\^password" \${grub_cfg}.new' + line: if [ "x${grub_cfg}" != "x" ]; then + when: + - ubtu20cis_rule_1_4_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.4.1 + - grub + +# --------------- +# --------------- +# The RHEL7 based control uses a custom module, grub_crypt +# I need to research best way to set grub pw for Ubuntu using the +# grub-mkpasswd-pbkdf2 command and passing the data at the same time. +# --------------- +# --------------- +- name: "AUTOMATED | 1.4.2 | PATCH | Ensure bootloader password is set" + command: /bin/true + changed_when: false + failed_when: false + check_mode: false + when: + - ubtu20cis_rule_1_4_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.4.2 + - grub + - notimplemented + +- name: "AUTOMATED | 1.4.3 | PATCH | Ensure permissions on bootloader config are configured" + block: + - name: "AUTOMATED | 1.4.3 | AUDIT | Ensure permissions on bootloader config are configured | Check for Grub file" + stat: + path: /boot/grub/grub.cfg + check_mode: false + register: ubtu20cis_1_4_3_grub_cfg_status + + - name: "AUTOMATED | 1.4.3 | PATCH | Ensure permissions on bootloader config are configured | Set permissions" + file: + path: /boot/grub/grub.cfg + owner: root + group: root + mode: 0400 + when: + - ubtu20cis_1_4_3_grub_cfg_status.stat.exists + when: + - ubtu20cis_rule_1_4_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.4.3 + - grub + +- name: "AUTOMATED | 1.4.4 | PATCH | Ensure authentication required for single user mode" + user: + name: root + password: "{{ ubtu20cis_root_pw }}" + when: + - ubtu20cis_rule_1_4_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.4.4 + - passwd diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.5.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.5.x.yml new file mode 100644 index 0000000..027d8b6 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.5.x.yml @@ -0,0 +1,122 @@ +--- +- name: "MANUAL | 1.5.1 | AUDIT | Ensure XD/NX support is enabled" + block: + - name: "MANUAL | 1.5.1 | AUDIT | Ensure XD/NX support is enabled | Find status of XD/NX" + shell: "journalctl | grep 'protection: active'" + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_1_5_1_xdnx_status + + - name: "MANUAL | 1.5.1 | AUDIT | Ensure XD/NX support is enabled | Alert if XD/NX is not enabled" + debug: + msg: + - "ALERT!!!!You do not have XD/NX (Execute Disable/No Execute) enabled" + - "To conform to CIS standards this needs to be enabled" + when: "'active'not in ubtu20cis_1_5_1_xdnx_status.stdout" + when: + - ubtu20cis_rule_1_5_1 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_1.5.1 + - xd/nx + +- name: "AUTOMATED | 1.5.2 | PATCH | Ensure address space layout randomization (ASLR) is enabled" + block: + - name: "AUTOMATED | 1.5.2 | PATCH | Ensure address space layout randomization (ASLR) is enabled | Set ASLR settings" + lineinfile: + path: /etc/sysctl.conf + regexp: '^kernel.randomize_va_space' + line: 'kernel.randomize_va_space = 2' + + - name: "AUTOMATED | 1.5.2 | PATCH | Ensure address space layout randomization (ASLR) is enabled | Set active kernel parameter" + sysctl: + name: kernel.randomize_va_space + value: '2' + when: + - ubtu20cis_rule_1_5_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.5.2 + - aslr + +- name: "AUTOMATED | 1.5.3 | PATCH | Ensure prelink is not installed" + block: + - name: "AUTOMATED | 1.5.3 | PATCH | Ensure prelink is not installed | Restore binaries to normal" + command: prelink -ua + changed_when: false + failed_when: false + + - name: "AUTOMATED | 1.5.3 | PATCH | Ensure prelink is not installed| Remove prelink package" + apt: + name: prelink + state: absent + when: + - ubtu20cis_rule_1_5_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.5.3 + - prelink + + +- name: "AUTOMATED | 1.5.4 | PATCH | Ensure core dumps are restricted" + block: + - name: "AUTOMATED | 1.5.4 | PATCH | Ensure core dumps are restricted | kernel sysctl" + sysctl: + name: fs.suid_dumpable + value: '0' + state: present + reload: yes + sysctl_set: yes + ignoreerrors: yes + + - name: "AUTOMATED | 1.5.4 | PATCH | Ensure core dumps are restricted | security limits" + lineinfile: + path: /etc/security/limits.d/99_zero_core.conf + regexp: '^\* hard core' + line: '* hard core 0' + create: true + owner: root + group: root + mode: '0644' + + - name: "AUTOMATED | 1.5.4 | PATCH | Ensure core dumps are restricted | sysctl.conf" + lineinfile: + path: /etc/sysctl.conf + regexp: '^fs.suid_dumpable' + line: fs.suid_dumpable=0 + owner: root + group: root + mode: '0644' + + - name: "AUTOMATED | 1.5.4 | PATCH | Ensure core dumps are restricted | coredump.conf" + lineinfile: + path: /etc/systemd/coredump.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + create: true + owner: root + group: root + mode: '0644' + loop: + - { regexp: '^Storage', line: 'Storage=none' } + - { regexp: '^ProcessSizeMax', line: 'ProcessSizeMax=0' } + when: "'systemd-coredump' in ansible_facts.packages" + when: + - ubtu20cis_rule_1_5_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.5.4 + - coredump diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.6.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.6.x.yml new file mode 100644 index 0000000..376047f --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.6.x.yml @@ -0,0 +1,83 @@ +--- +- name: "AUTOMATED | 1.6.1.1 | PATCH | Ensure AppArmor is installed" + apt: + name: ['apparmor', 'apparmor-utils'] + state: present + when: + - ubtu20cis_rule_1_6_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.6.1.1 + - apparmor + +- name: "AUTOMATED | 1.6.1.2 | PATCH | Ensure AppArmor is enabled in the bootloader configuration" + block: + - name: "AUTOMATED | 1.6.1.2 | AUDIT | Ensure AppArmor is enabled in the bootloader configuration | Get current settings" + shell: grep "GRUB_CMDLINE_LINUX=" /etc/default/grub | cut -f2 -d'"' + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_1_6_1_2_cmdline_settings + + - name: "AUTOMATED | 1.6.1.2 | PATCH | Ensure AppArmor is enabled in the bootloader configuration | Set apparmor settings if none exist" + lineinfile: + path: /etc/default/grub + regexp: '^GRUB_CMDLINE_LINUX' + line: 'GRUB_CMDLINE_LINUX="apparmor=1 security=apparmor {{ ubtu20cis_1_6_1_2_cmdline_settings.stdout }}"' + insertafter: '^GRUB_' + when: + - "'apparmor' not in ubtu20cis_1_6_1_2_cmdline_settings.stdout" + - "'security' not in ubtu20cis_1_6_1_2_cmdline_settings.stdout" + notify: grub update + + - name: "AUTOMATED | 1.6.1.2 | PATCH | Ensure AppArmor is enabled in the bootloader configuration | Set apparmor settings if none exist | Replace apparmor settings when exists" + replace: + path: /etc/default/grub + regexp: "{{ item.regexp }}" + replace: "{{ item.replace }}" + with_items: + - { regexp: 'apparmor=\S+', replace: 'apparmor=1' } + - { regexp: 'security=\S+', replace: 'security=apparmor' } + when: + - "'apparmor' in ubtu20cis_1_6_1_2_cmdline_settings.stdout" + - "'security' in ubtu20cis_1_6_1_2_cmdline_settings.stdout" + notify: grub update + when: + - ubtu20cis_rule_1_6_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.6.1.2 + - apparmor + +- name: "AUTOMATED | 1.6.1.3 | PATCH | Ensure all AppArmor Profiles are in enforce or complain mode" + command: aa-enforce /etc/apparmor.d/* + failed_when: false + when: + - ubtu20cis_rule_1_6_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.6.1.3 + - apparmor + +- name: "AUTOMATED | 1.6.1.4 | PATCH | Ensure all AppArmor Profiles are enforcing" + command: aa-enforce /etc/apparmor.d/* + failed_when: false + when: + - ubtu20cis_rule_1_6_1_4 + tags: + - level2-server + - level2-workstation + - automated + - scored + - patch + - rule_1.6.1.4 + - apparmor diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.7.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.7.x.yml new file mode 100644 index 0000000..ddde1a4 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.7.x.yml @@ -0,0 +1,93 @@ +--- +- name: "AUTOMATED | 1.7.1 | PATCH | Ensure message of the day is configured properly" + template: + src: etc/motd.j2 + dest: /etc/motd + when: + - ubtu20cis_rule_1_7_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.1 + - motd + +- name: "AUTOMATED | 1.7.2 | PATCH | Ensure local login warning banner is configured properly" + template: + src: etc/issue.j2 + dest: /etc/issue + when: + - ubtu20cis_rule_1_7_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.2 + - banner + +- name: "AUTOMATED | 1.7.3 | PATCH | Ensure remote login warning banner is configured properly" + template: + src: etc/issue.net.j2 + dest: /etc/issue.net + when: + - ubtu20cis_rule_1_7_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.3 + - banner + +- name: "AUTOMATED | 1.7.4 | PATCH | Ensure permissions on /etc/motd are configured" + file: + path: /etc/motd + owner: root + group: root + mode: 0644 + when: + - ubtu20cis_rule_1_7_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.4 + - permissions + - motd + +- name: "AUTOMATED | 1.7.5 | PATCH | Ensure permissions on /etc/issue are configured" + file: + path: /etc/issue + owner: root + group: root + mode: 0644 + when: + - ubtu20cis_rule_1_7_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.5 + - permissions + - banner + +- name: "AUTOMATED | 1.7.6 | PATCH | Ensure permissions on /etc/issue.net are configured" + file: + path: /etc/issue.net + owner: root + group: root + mode: 0644 + when: + - ubtu20cis_rule_1_7_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.7.6 + - permissions + - banner diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.8.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.8.x.yml new file mode 100644 index 0000000..1c2781f --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.8.x.yml @@ -0,0 +1,78 @@ +--- +- name: "MANUAL | 1.8.1 | PATCH | Ensure GNOME Display Manager is removed" + apt: + name: gdm3 + state: absent + when: + - ubtu20cis_rule_1_8_1 + - not ubtu20cis_desktop_required + - ubtu20cis_disruption_high + tags: + - level2-server + - manual + - patch + - rule_1.8.1 + - gnome + +- name: "AUTOMATED | 1.8.2 | PATCH | Ensure GDM login banner is configured" + lineinfile: + path: /etc/gdm3/greeter.dconf-defaults + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + create: yes + owner: root + group: root + mode: 0644 + notify: reload gdm + with_items: + - { regexp: '\[org\/gnome\/login-screen\]', line: '[org/gnome/login-screen]', insertafter: EOF } + - { regexp: 'banner-message-enable', line: 'banner-message-enable=true', insertafter: '\[org\/gnome\/login-screen\]'} + - { regexp: 'banner-message-text', line: "banner-message-text='{{ ubtu20cis_warning_banner | regex_replace('\n', ' ') | trim }}'", insertafter: 'banner-message-enable' } + when: + - ubtu20cis_rule_1_8_2 + - ubtu20cis_desktop_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.8.2 + - gnome + +- name: "AUTOMATED | 1.8.3 | PATCH | Ensure disable-user-list is enabled" + lineinfile: + path: /etc/gdm3/greeter.dconf-defaul + regexp: '^disable-user-list=' + line: 'disable-user-list=true' + insertafter: 'banner-message-text=' + create: yes + owner: root + group: root + mode: 0644 + notify: reload gdm + when: + - ubtu20cis_rule_1_8_3 + - ubtu20cis_desktop_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.8.3 + - gdm3 + +- name: "AUTOMATED | 1.8.4 | PATCH | Ensure XDCMP is not enabled" + lineinfile: + path: /etc/gdm3/custom.conf + regexp: '^Enable.*=.*true' + state: absent + when: + - ubtu20cis_rule_1_8_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_1.8.4 + - xdcmp diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.9.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.9.yml new file mode 100644 index 0000000..5460d84 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/cis_1.9.yml @@ -0,0 +1,14 @@ +--- +- name: "MANUAL | 1.9 | PATCH | Ensure updates, patches, and additional security software are installed" + apt: + name: "*" + state: latest + when: + - ubtu20cis_rule_1_9 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_1.9 + - patch diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/main.yml new file mode 100644 index 0000000..ac4a125 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_1/main.yml @@ -0,0 +1,27 @@ +--- +- name: "SECTION | 1.1 | Disable Unused Filesystems" + include: cis_1.1.x.yml + +- name: "SECTION | 1.2 | Cofnigure Software Updates" + include: cis_1.2.x.yml + +- name: "SECTION | 1.3. | Filesystem Integrity Checking" + include: cis_1.3.x.yml + +- name: "SECTION | 1.4 | Secure Boot Settings" + include: cis_1.4.x.yml + +- name: "SECTION | 1.5 | Additional Process Hardening" + include: cis_1.5.x.yml + +- name: "SECTION | 1.6 | Mandatory Access Control" + include: cis_1.6.x.yml + +- name: "SECTION | 1.7 | Command Line Warning Banners" + include: cis_1.7.x.yml + +- name: "SECTION | 1.8 | GNOME Display Manager" + include: cis_1.8.x.yml + +- name: "SECTION | 1.9 | Ensure updates, patches, and additional security software are installed" + include: cis_1.9.yml diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.1.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.1.x.yml new file mode 100644 index 0000000..861c1d0 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.1.x.yml @@ -0,0 +1,457 @@ +--- +- name: "AUTOMATED | 2.1.1.1 | PATCH | Ensure time synchronization is in use" + apt: + name: "{{ ubtu20cis_time_sync_tool }}" + state: present + when: + - ubtu20cis_rule_2_1_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.1.1 + - chrony + +- name: "AUTOMATED | 2.1.1.2 | PATCH | Ensure systemd-timesyncd is configured" + block: + - name: "AUTOMATED | 2.1.1.2 | PATCH | Ensure systemd-timesyncd is configured | Remove ntp and chrony" + apt: + name: ['ntp', 'chrony'] + state: absent + + - name: "AUTOMATED | 2.1.1.2 | PATCH | Ensure systemd-timesyncd is configured | Set configuration for systemd-timesyncd" + lineinfile: + path: /etc/systemd/timesyncd.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: '^\[Time\]', line: '[Time]', insertafter: EOF } + - { regexp: '^#NTP|^NTP', line: 'NTP={{ ubtu20cis_ntp_server_list }}', insertafter: '\[Time\]' } + - { regexp: '^#FallbackNTP|^FallbackNTP', line: 'FallbackNTP={{ ubtu20cis_ntp_fallback_server_list }}', insertafter: '\[Time\]' } + - { regexp: '^#RootDistanceMaxSec|^RootDistanceMaxSec', line: 'RootDistanceMaxSec=1', insertafter: '\[Time\]'} + + - name: "AUTOMATED | 2.1.1.2 | PATCH | Ensure systemd-timesyncd is configured | Start and enable the systemd-timesyncd service" + systemd: + name: systemd-timesyncd.service + state: started + enabled: yes + masked: no + + - name: "AUTOMATED | 2.1.1.2 | PATCH | Ensure systemd-timesyncd is configured | Set timedatectl to ntp" + command: timedatectl set-ntp true + when: + - ubtu20cis_rule_2_1_1_2 + - ubtu20cis_time_sync_tool == "systemd-timesyncd" + tags: + - level1-server + - level1-workstation + - automated + - manual + - patch + - rule_2.1.1.2 + - systemd-timesyncd + +- name: "AUTOMATED | 2.1.1.3 | PATCH | Ensure chrony is configured" + block: + - name: "AUTOMATED | 2.1.1.3 | PATCH | Ensure chrony is configured | Remove ntp" + apt: + name: ntp + state: absent + + - name: "AUTOMATED | 2.1.1.3 | PATCH | Ensure chrony is configured | Disable/Mask systemd-timesyncd" + systemd: + name: systemd-timesyncd + state: stopped + enabled: no + masked: yes + + - name: "AUTOMATED | 2.1.1.3 | AUDIT | Ensure chrony is configured | Check for chrony user" + shell: grep {{ ubtu20cis_chrony_user }} /etc/passwd + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_2_1_1_3_chrony_user_status + + - name: "AUTOMATED | 2.1.1.3 | PATCH | Ensure chrony is configured | Set chrony.conf file" + template: + src: chrony.conf.j2 + dest: /etc/chrony/chrony.conf + owner: root + group: root + mode: 0644 + + - name: "AUTOMATED | 2.1.1.3 | PATCH | Ensure chrony is configured | Create chrony user" + user: + name: "{{ ubtu20cis_chrony_user }}" + shell: /usr/sbin/nologin + system: true + when: ubtu20cis_2_1_1_3_chrony_user_status.stdout | length > 0 + + - name: "AUTOMATED | 2.2.1.3 | PATCH | Ensure chrony is configured | Set option to use chrony user" + lineinfile: + path: /etc/default/chrony + regexp: '^DAEMON_OPTS' + line: 'DAEMON_OPTS="-u _chrony"' + when: + - ubtu20cis_rule_2_1_1_3 + - ubtu20cis_time_sync_tool == "chrony" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.1.3 + - chrony + +- name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured" + block: + - name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured | Remove chrony" + apt: + name: chrony + state: absent + + - name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured | Disable/Mask systemd-timesyncd" + systemd: + name: systemd-timesyncd + state: stopped + enabled: no + masked: yes + + - name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured | Set ntp.conf settings" + template: + src: ntp.conf.j2 + dest: /etc/ntp.conf + owner: root + group: root + mode: 0644 + + - name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured | Modify sysconfig/ntpd" + lineinfile: + path: /etc/sysconfig/ntpd + regexp: "{{ item.regexp }}" + line: "{{ item. line }}" + create: yes + with_items: + - { regexp: '^OPTIONS', line: 'OPTIONS="-u ntp:ntp"'} + - { regexp: '^NTPD_OPTIONS', line: 'NTPD_OPTIONS="-u ntp:ntp"' } + + - name: "AUTOMATED | 2.1.1.4 | PATCH | Ensure ntp is configured | Modify /etc/init.d/npt" + lineinfile: + path: /etc/init.d/ntp + regexp: '^RUNAUSER' + line: 'RUNAUSER=npt' + when: + - ubtu20cis_rule_2_1_1_4 + - ubtu20cis_time_sync_tool == "ntp" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.1.4 + - ntp + +- name: "AUTOMATED | 2.1.2 | PATCH | Ensure X Window System is not installed" + apt: + name: xserver-xorg* + state: absent + when: + - ubtu20cis_rule_2_1_2 + - not ubtu20cis_desktop_required + tags: + - level1-server + - automated + - patch + - rule_2.1.2 + - xwindows + +- name: "AUTOMATED | 2.1.3 | PATCH | Ensure Avahi Server is not installed" + block: + - name: "AUTOMATED | 2.1.3 | PATCH | Ensure Avahi Server is not installed | Stop/Disable avahi-daemon.service" + service: + name: avahi-daemon.service + state: stopped + enabled: no + when: avahi_service_status.stdout == "loaded" + + - name: "AUTOMATED | 2.1.3 | PATCH | Ensure Avahi Server is not installed | Stop/Disable avahi-daemon.socket" + service: + name: avahi-daemon.socket + state: stopped + enabled: no + when: avahi_service_status.stdout == "loaded" + + - name: "AUTOMATED | 2.1.3 | PATCH | Ensure Avahi Server is not installed | Remove avahi-daemon" + apt: + name: avahi-daemon + state: absent + when: + - ubtu20cis_rule_2_1_3 + - not ubtu20cis_avahi_server + - ubtu20cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.3 + - avahi + - services + +- name: "AUTOMATED | 2.1.4 | PATCH | Ensure CUPS is not installed" + apt: + name: cups + state: absent + when: + - ubtu20cis_rule_2_1_4 + - not ubtu20cis_cups_server + tags: + - level1-server + - level2-workstation + - automated + - patch + - rule_2.1.4 + - cups + - services + +- name: "AUTOMATED | 2.1.5 | PATCH | Ensure DHCP Server is not installed" + apt: + name: isc-dhcp-server + state: absent + when: + - ubtu20cis_rule_2_1_5 + - not ubtu20cis_dhcp_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.5 + - dhcp + - services + +- name: "AUTOMATED | 2.1.6 | PATCH | Ensure LDAP server is not installed" + apt: + name: slapd + state: absent + when: + - ubtu20cis_rule_2_1_6 + - not ubtu20cis_ldap_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.6 + - ldap + - services + +- name: "AUTOMATED | 2.1.7 | PATCH | Ensure NFS is not installed" + apt: + name: nfs-kernel-server + state: absent + when: + - ubtu20cis_rule_2_1_7 + - not ubtu20cis_nfs_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.7 + - nfs + - rpc + - services + +- name: "AUTOMATED | 2.1.8 | PATCH | Ensure DNS Server is not installed" + apt: + name: bind9 + state: absent + when: + - ubtu20cis_rule_2_1_8 + - not ubtu20cis_dns_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.8 + - dns + - service + +- name: "AUTOMATED | 2.1.9 | PATCH | Ensure FTP Server is not installed" + apt: + name: vsftpd + state: absent + when: + - ubtu20cis_rule_2_1_9 + - not ubtu20cis_vsftpd_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.9 + - ftp + - service + +- name: "AUTOMATED | 2.1.10 | PATCH | Ensure HTTP server is not installed" + apt: + name: apache2 + state: absent + when: + - ubtu20cis_rule_2_1_10 + - not ubtu20cis_httpd_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.10 + - httpd + - service + +- name: "AUTOMATED | 2.1.11 | PATCH | Ensure IMAP and POP3 server are not installed" + apt: + name: ['dovecot-imapd', 'dovecot-pop3d'] + state: absent + when: + - ubtu20cis_rule_2_1_11 + - not ubtu20cis_dovecot_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.11 + - dovecot + - service + +- name: "AUTOMATED | 2.1.12 | PATCH | Ensure Samba is not installed" + apt: + name: samba + state: absent + when: + - ubtu20cis_rule_2_1_12 + - not ubtu20cis_smb_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.12 + - samba + - service + +- name: "AUTOMATED | 2.1.13 | PATCH | Ensure HTTP Proxy Server is not installed" + apt: + name: squid + state: absent + when: + - ubtu20cis_rule_2_1_13 + - not ubtu20cis_squid_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.13 + - http_proxy + - service + +- name: "AUTOMATED | 2.1.14 | PATCH | Ensure SNMP Server is not installed" + apt: + name: snmpd + state: absent + when: + - ubtu20cis_rule_2_1_14 + - not ubtu20cis_snmp_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.14 + - snmp + - service + +- name: "AUTOMATED | 2.1.15 | PATCH | Ensure mail transfer agent is configured for local-only mode" + block: + - name: "AUTOMATED | 2.1.15 | PATCH | Ensure mail transfer agent is configured for local-only mode | Make changes if exim4 installed" + lineinfile: + path: /etc/exim4/update-exim4.conf.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^dc_eximconfig_configtype', line: "dc_eximconfig_configtype='local'" } + - { regexp: '^dc_local_interfaces', line: "dc_local_interfaces='127.0.0.1 ; ::1'" } + - { regexp: '^dc_readhost', line: "dc_readhost=''" } + - { regexp: '^dc_relay_domains', line: "dc_relay_domains=''" } + - { regexp: '^dc_minimaldns', line: "dc_minimaldns='false'" } + - { regexp: '^dc_relay_nets', line: "dc_relay_nets=''" } + - { regexp: '^dc_smarthost', line: "dc_smarthost=''" } + - { regexp: '^dc_use_split_config', line: "dc_use_split_config='false'" } + - { regexp: '^dc_hide_mailname', line: "dc_hide_mailname=''" } + - { regexp: '^dc_mailname_in_oh', line: "dc_mailname_in_oh='true'" } + - { regexp: '^dc_localdelivery', line: "dc_localdelivery='mail_spool'" } + notify: restart exim4 + when: "'exim4' in ansible_facts.packages" + + - name: "AUTOMATED | 2.1.15 | PATCH | Ensure mail transfer agent is configured for local-only mode | Make changes if postfix is installed" + lineinfile: + path: /etc/postfix/main.cf + regexp: '^(#)?inet_interfaces' + line: 'inet_interfaces = loopback-only' + notify: restart postfix + when: "'postfix' in ansible_facts.packages" + + - name: "AUTOMATED | 2.1.15 | PATCH | Ensure mail transfer agent is configured for local-only mode | Message out other main agents" + debug: + msg: + - "Warning!! You are not using either exim4 or postfix" + - "Please review your vendors documentation to configure local-only mode" + when: + - "'exim4' not in ansible_facts.packages" + - "'postfix' not in ansible_facts.packages" + when: + - ubtu20cis_rule_2_1_15 + tags: + - level1-server + - level1-workstation + - automated + - scored + - patch + - rule_2.1.15 + - postfix + +- name: "AUTOMATED | 2.1.16 | PATCH | Ensure rsync service is not installed" + apt: + name: rsync + state: absent + when: + - ubtu20cis_rule_2_1_16 + - not ubtu20cis_rsync_server + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.1.16 + - rsync + +- name: "AUTOMATED | 2.1.17 | PATCH | Ensure NIS Server is not installed" + apt: + name: nis + state: absent + when: + - ubtu20cis_rule_2_1_17 + - not ubtu20cis_nis_server + tags: + - level1-server + - level1-workstation + - automated + - rule_2.1.17 + - nis + - service diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.2.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.2.x.yml new file mode 100644 index 0000000..7285141 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.2.x.yml @@ -0,0 +1,88 @@ +--- +- name: "2.2.1 | PATCH | Ensure NIS Client is not installed" + apt: + name: nis + state: absent + when: + - ubtu20cis_rule_2_2_1 + - not ubtu20cis_nis_required + tags: + - level1-server + - level1-workstation + - rule_2.2.1 + - nis + +- name: "AUTOMATED | 2.2.2 | PATCH | Ensure rsh client is not installed" + apt: + name: rsh-client + state: absent + when: + - ubtu20cis_rule_2_2_2 + - not ubtu20cis_rsh_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.2 + - rsh + +- name: "AUTOMATED | 2.2.3 | PATCH | Ensure talk client is not installed" + apt: + name: talk + state: absent + when: + - ubtu20cis_rule_2_2_3 + - not ubtu20cis_talk_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.3 + - talk + +- name: "AUTOMATED | 2.2.4 | PATCH | Ensure telnet client is not installed" + apt: + name: telnet + state: absent + when: + - ubtu20cis_rule_2_2_4 + - not ubtu20cis_telnet_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.4 + - telnet + +- name: "AUTOMATED | 2.2.5 | PATCH | Ensure LDAP client is not installed" + apt: + name: ldap-utils + state: absent + when: + - ubtu20cis_rule_2_2_5 + - not ubtu20cis_ldap_clients_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.5 + - ldap + +- name: "AUTOMATED | 2.2.6 | PATCH | Ensure RPC is not installed" + apt: + name: rpcbind + state: absent + when: + - ubtu20cis_rule_2_2_6 + - not ubtu20cis_rpc_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_2.2.6 + - rpbc diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.3.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.3.yml new file mode 100644 index 0000000..7a8d21d --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/cis_2.3.yml @@ -0,0 +1,24 @@ +--- +- name: "MANUAL | 2.3 | AUDIT | Ensure nonessential services are removed or masked" + block: + - name: "MANUAL | 2.3 | AUDIT | Ensure nonessential services are removed or masked | Check for services" + shell: lsof -i -P -n | grep -v "(ESTABLISHED)" + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_2_3_services + + - name: "MANUAL | 2.3 | AUDIT | Ensure nonessential services are removed or masked | Message out running services" + debug: + msg: + - "Warning!! Below are the running services. Please review and remove as well as mask un-needed services" + - "{{ ubtu20cis_2_3_services.stdout_lines }}" + when: + - ubtu20cis_rule_2_3 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_2.3 + - services diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/main.yml new file mode 100644 index 0000000..92bdbeb --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_2/main.yml @@ -0,0 +1,9 @@ +--- +- name: "SECTION | 2.1 | Special Purpose Services" + include: cis_2.1.x.yml + +- name: "SECTION | 2.2 | Service Clients" + include: cis_2.2.x.yml + +- name: "SECTION | 2.3 | Ensure nonessential services are removed or masked" + include: cis_2.3.yml diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.1.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.1.x.yml new file mode 100644 index 0000000..50697bd --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.1.x.yml @@ -0,0 +1,70 @@ +--- +- name: "MANUAL | 3.1.1 | PATCH | Disable IPv6" + block: + - name: "MANUAL | 3.1.1 | AUDIT | Disable IPv6 | Get current GRUB_CMDLINE_LINUX settings" + shell: grep "GRUB_CMDLINE_LINUX=" /etc/default/grub | cut -f2 -d'"' + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_3_1_1_grub_cmdline_linux_settings + + - name: "MANUAL | 3.1.1 | PATCH | Disable IPv6 | Add ipv6.disable if does not exist" + lineinfile: + path: /etc/default/grub + regexp: '^GRUB_CMDLINE_LINUX' + line: 'GRUB_CMDLINE_LINUX="{{ ubtu20cis_3_1_1_grub_cmdline_linux_settings.stdout }} ipv6.disable=1"' + when: "'ipv6.disable' not in ubtu20cis_3_1_1_grub_cmdline_linux_settings.stdout" + notify: grub update + + - name: "MANUAL | 3.1.1 | PATCH | Disable IPv6 | Set ipv6.disable to 1 if exists" + replace: + path: /etc/default/grub + regexp: 'ipv6\.disable=.' + replace: 'ipv6.disable=1' + when: "'ipv6.disable' in ubtu20cis_3_1_1_grub_cmdline_linux_settings.stdout" + notify: grub update + + - name: "MANUAL | 3.1.1 | PATCH | Disable IPv6 | Remove net.ipv6.conf.all.disable_ipv6" + lineinfile: + path: /etc/sysctl.conf + regexp: '^net.ipv6.conf.all.disable_ipv6.*' + state: absent + when: + - ubtu20cis_rule_3_1_1 + - not ubtu20cis_ipv6_required + tags: + - level2-server + - level2-workstation + - manual + - patch + - rule_3.1.1 + - ipv6 + +- name: "AUTOMATED | 3.1.2 | PATCH | Ensure wireless interfaces are disabled" + block: + - name: "AUTOMATED | 3.1.2 | PATCH | Ensure wireless interfaces are disabled | Check for network-manager tool" + shell: dpkg -l | grep network-manager + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_3_1_2_network_manager_status + + - name: "AUTOMATED | 3.1.2 | PATCH | Ensure wireless interfaces are disabled | Disable wireless if network-manager installed" + command: nmcli radio all off + changed_when: ubtu20cis_3_1_2_nmcli_radio_off.rc == 0 + register: ubtu20cis_3_1_2_nmcli_radio_off + when: ubtu20cis_3_1_2_network_manager_status.stdout | length > 0 + + - name: "AUTOMATED | 3.1.2 | PATCH | Ensure wireless interfaces are disabled | Warn about wireless if network-manager not installed" + debug: + msg: "ALERT!!!! You need to disable wireless interfaces manually since network-manager is not installed" + when: ubtu20cis_3_1_2_network_manager_status.stdout | length == 0 + when: + - ubtu20cis_rule_3_1_2 + tags: + - level1-server + - level2-workstation + - automated + - patch + - rule_3.1.2 + - wireless diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.2.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.2.x.yml new file mode 100644 index 0000000..90c2a20 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.2.x.yml @@ -0,0 +1,60 @@ +--- +- name: "AUTOMATED | 3.2.1 | PATCH | Ensure packet redirect sending is disabled" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.send_redirects + - net.ipv4.conf.default.send_redirects + notify: sysctl flush ipv4 route table + when: + - ubtu20cis_rule_3_2_1 + - not ubtu20cis_is_router + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.2.1 + - packet_redirect + - sysctl + +- name: "AUTOMATED | 3.2.2 | PATCH | Ensure IP forwarding is disabled" + block: + - name: "AUTOMATED | 3.2.2 | PATCH | Ensure IP forwarding is disabled | IPv4 settings" + sysctl: + name: net.ipv4.ip_forward + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: + - sysctl flush ipv4 route table + + - name: "AUTOMATED | 3.2.2 | PATCH | Ensure IP forwarding is disabled | IPv6 settings" + sysctl: + name: net.ipv6.conf.all.forwarding + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: + - sysctl flush ipv6 route table + when: ubtu20cis_ipv6_required + when: + - ubtu20cis_rule_3_2_2 + - not ubtu20cis_is_router + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.2.2 + - ip_forwarding + - sysctl diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.3.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.3.x.yml new file mode 100644 index 0000000..b4060c4 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.3.x.yml @@ -0,0 +1,233 @@ +--- +- name: "AUTOMATED | 3.3.1 | PATCH | Ensure source routed packets are not accepted" + block: + - name: "AUTOMATED | 3.3.1 | PATCH | Ensure source routed packets are not accepted | IPv4 settings" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.accept_source_route + - net.ipv4.conf.default.accept_source_route + notify: sysctl flush ipv4 route table + + - name: "AUTOMATED | 3.3.1 | PATCH | Ensure source routed packets are not accepted | IPv6 settings" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv6.conf.all.accept_source_route + - net.ipv6.conf.default.accept_source_route + notify: sysctl flush ipv6 route table + when: ubtu20cis_ipv6_required + when: + - ubtu20cis_rule_3_3_1 + - not ubtu20cis_is_router + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.1 + - routed_packets + - sysctl + +- name: "AUTOMATED | 3.3.2 | PATCH | Ensure ICMP redirects are not accepted" + block: + - name: "AUTOMATED | 3.3.2 | PATCH | Ensure ICMP redirects are not accepted | IPv4 settings" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.accept_redirects + - net.ipv4.conf.default.accept_redirects + notify: sysctl flush ipv4 route table + + - name: "AUTOMATED | 3.3.2 | PATCH | Ensure ICMP redirects are not accepted | IPv6 settings" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv6.conf.all.accept_redirects + - net.ipv6.conf.default.accept_redirects + notify: sysctl flush ipv6 route table + when: ubtu20cis_ipv6_required + when: + - ubtu20cis_rule_3_3_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.2 + - icmp + - sysctl + +- name: "AUTOMATED | 3.3.3 | PATCH | Ensure secure ICMP redirects are not accepted" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.secure_redirects + - net.ipv4.conf.default.secure_redirects + notify: sysctl flush ipv4 route table + when: + - ubtu20cis_rule_3_3_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.3 + - icmp + - sysctl + +- name: "AUTOMATED | 3.3.4 | PATCH | Ensure suspicious packets are logged" + sysctl: + name: "{{ item }}" + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.log_martians + - net.ipv4.conf.default.log_martians + notify: sysctl flush ipv4 route table + when: + - ubtu20cis_rule_3_3_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.4 + - suspicious_packets + - sysctl + +- name: "AUTOMATED | 3.3.5 | PATCH | Ensure broadcast ICMP requests are ignored" + sysctl: + name: net.ipv4.icmp_echo_ignore_broadcasts + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - ubtu20cis_rule_3_3_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.5 + - icmp + - sysctl + +- name: "AUTOMATED | 3.3.6 | PATCH | Ensure bogus ICMP responses are ignored" + sysctl: + name: net.ipv4.icmp_ignore_bogus_error_responses + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - ubtu20cis_rule_3_3_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.6 + - icmp + - sysctl + +- name: "AUTOMATED | 3.3.7 | PATCH | Ensure Reverse Path Filtering is enabled" + sysctl: + name: "{{ item }}" + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv4.conf.all.rp_filter + - net.ipv4.conf.default.rp_filter + notify: sysctl flush ipv4 route table + when: + - ubtu20cis_rule_3_3_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.7 + - reverse_path_filtering + - sysctl + +- name: "AUTOMATED | 3.3.8 | PATCH | Ensure TCP SYN Cookies is enabled" + sysctl: + name: net.ipv4.tcp_syncookies + value: '1' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + notify: sysctl flush ipv4 route table + when: + - ubtu20cis_rule_3_3_8 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.8 + - tcp_syn_cookies + - sysctl + +- name: "AUTOMATED | 3.3.9 | PATCH | Ensure IPv6 router advertisements are not accepted" + sysctl: + name: "{{ item }}" + value: '0' + sysctl_set: yes + state: present + reload: yes + ignoreerrors: yes + with_items: + - net.ipv6.conf.all.accept_ra + - net.ipv6.conf.default.accept_ra + notify: sysctl flush ipv6 route table + when: + - ubtu20cis_rule_3_3_9 + - ubtu20cis_ipv6_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.3.9 + - ipv6 + - router_advertisements + - sysctl diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.4.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.4.x.yml new file mode 100644 index 0000000..042ee6e --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.4.x.yml @@ -0,0 +1,64 @@ +--- +- name: "AUTOMATED | 3.4.1 | PATCH | Ensure DCCP is disabled" + lineinfile: + path: /etc/modprobe.d/dccp.conf + regexp: '^(#)?install dccp(\\s|$)' + line: 'install dccp /bin/true' + create: yes + when: + - ubtu20cis_rule_3_4_1 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_3.4.1 + - dccp + +- name: "AUTOMATED | 3.4.2 | PATCH | Ensure SCTP is disabled" + lineinfile: + path: /etc/modprobe.d/sctp.conf + regexp: "^(#)?install sctp(\\s|$)" + line: 'install sctp /bin/true' + create: yes + when: + - ubtu20cis_rule_3_4_2 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_3.4.2 + - sctp + +- name: "AUTOMATED | 3.4.3 | PATCH | Ensure RDS is disabled" + lineinfile: + path: /etc/modprobe.d/rds.conf + regexp: '^(#)?install rds(\\s|$)' + line: 'install rds /bin/true' + create: yes + when: + - ubtu20cis_rule_3_4_3 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_3.4.3 + - rds + +- name: "AUTOMATED | 3.4.4 | PATCH | Ensure TIPC is disabled" + lineinfile: + path: /etc/modprobe.d/tipc.conf + regexp: '^(#)?install tipc(\\s|$)' + line: install tipc /bin/true + create: yes + when: + - ubtu20cis_rule_3_4_4 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_3.4.4 + - tipc diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.5.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.5.x.yml new file mode 100644 index 0000000..5525c4a --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/cis_3.5.x.yml @@ -0,0 +1,790 @@ +--- +- name: "AUTOMATED | 3.5.1.1 | PATCH | Ensure ufw is installed" + apt: + name: ufw + state: present + when: + - ubtu20cis_rule_3_5_1_1 + - ubtu20cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.1 + - apt + - ufw + +- name: "AUTOMATED | 3.5.1.2 | PATCH | Ensure iptables-persistent is not installed with ufw" + apt: + name: iptables-persistent + state: absent + when: + - ubtu20cis_rule_3_5_1_2 + - ubtu20cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.2 + - ufw + +# Adding the allow OpenSSH rule while enabling ufw to allow ansible to run after enabling +- name: "AUTOMATED | 3.5.1.3 | PATCH | Ensure ufw service is enabled" + ufw: + rule: allow + name: OpenSSH + state: enabled + when: + - ubtu20cis_rule_3_5_2_1 + - ubtu20cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.3 + - ufw + +- name: "AUTOMATED | 3.5.1.4 | PATCH | Ensure loopback traffic is configured" + block: + - name: "AUTOMATED | 3.5.1.4 | PATCH | Ensure loopback traffic is configured | Set allow in ufw rules" + ufw: + rule: allow + direction: in + interface: lo + notify: reload ufw + + - name: "AUTOMATED | 3.5.1.4 | PATCH | Ensure loopback traffic is configured | Set allow out ufw rules" + ufw: + rule: allow + direction: out + interface: lo + notify: reload ufw + + - name: "AUTOMATED | 3.5.1.4 | PATCH | Ensure loopback traffic is configured | Set deny ufw rules IPv4" + ufw: + rule: deny + direction: in + from_ip: 127.0.0.0/8 + notify: reload ufw + + - name: "AUTOMATED | 3.5.1.4 | PATCH | Ensure loopback traffic is configured | Set deny ufw rules IPv6" + ufw: + rule: deny + direction: in + from_ip: "::1" + notify: reload ufw + when: ubtu20cis_ipv6_required + when: + - ubtu20cis_rule_3_5_1_4 + - ubtu20cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.4 + - ufw + +- name: "MANUAL | 3.5.1.5 | PATCH | Ensure ufw outbound connections are configured" + block: + - name: "MANUAL | 3.5.1.5 | PATCH | Ensure ufw outbound connections are configured | Custom ports" + ufw: + rule: allow + direction: out + to_port: '{{ item }}' + with_items: + - "{{ ubtu20cis_ufw_allow_out_ports }}" + notify: reload ufw + when: ubtu20cis_ufw_allow_out_ports != "all" + + - name: "MANUAL | 3.5.1.5 | PATCH | Ensure ufw outbound connections are configured | Allow all" + ufw: + rule: allow + direction: out + notify: reload ufw + when: "'all' in ubtu20cis_ufw_allow_out_ports" + when: + - ubtu20cis_rule_3_5_1_5 + - ubtu20cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_3.5.1.5 + - ufw + +- name: "MANUAL | 3.5.1.6 | AUDIT | Ensure ufw firewall rules exist for all open ports" + block: + - name: "MANUAL | 3.5.1.6 | AUDIT | Ensure ufw firewall rules exist for all open ports | Get list of open ports" + command: ss -4tuln + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_3_5_1_6_open_listen_ports + + - name: "MANUAL | 3.5.1.6 | AUDIT | Ensure ufw firewall rules exist for all open ports | Get list of firewall rules" + command: ufw status + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_3_5_1_6_firewall_rules + + - name: "MANUAL | 3.5.1.6 | AUDIT | Ensure ufw firewall rules exist for all open ports | Message out settings" + debug: + msg: + - "ALERT!!!!Below are the listening ports and firewall rules" + - "Please create firewall rule for any open ports if not already done" + - "*****---Open Listen Ports---*****" + - "{{ ubtu20cis_3_5_1_6_open_listen_ports.stdout_lines }}" + - "*****---Firewall Rules---*****" + - "{{ ubtu20cis_3_5_1_6_firewall_rules.stdout_lines }}" + when: + - ubtu20cis_rule_3_5_1_6 + - ubtu20cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_3.5.1.6 + - ufw + +- name: "AUTOMATED | 3.5.1.7 | PATCH | Ensure ufw default deny firewall policy" + ufw: + default: deny + direction: "{{ item }}" + notify: reload ufw + with_items: + - incoming + - outgoing + - routed + when: + - ubtu20cis_rule_3_5_1_7 + - ubtu20cis_firewall_package == "ufw" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.1.7 + - ufw + +# --------------- +# --------------- +# NFTables is unsupported with this role. However I have the actions commented out as a guide +# --------------- +# --------------- +- name: "AUTOMATED | 3.5.2.1 | AUDIT | Ensure nftables is installed" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + # apt: + # name: nftables + # state: present + when: + - ubtu20cis_rule_3_5_2_1 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.1 + - nftables + +- name: "AUTOMATED | 3.5.2.2 | AUDIT | Ensure ufw is uninstalled or disabled with nftables" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + # apt: + # name: ufw + # state: absent + when: + - ubtu20cis_rule_3_5_2_2 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.2 + - nftables + +- name: "MANUAL | 3.5.2.3 | AUDIT | Ensure iptables are flushed with nftables" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + # iptables: + # flush: yes + when: + - ubtu20cis_rule_3_5_2_3 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_3.5.2.3 + - nftables + +- name: "AUTOMATED | 3.5.2.4 | AUDIT | Ensure a nftables table exists" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + # command: "nft create table {{ ubtu20cis_nftables_table_name }}" + # changed_when: ubtu20cis_3_5_2_4_new_table.rc == 0 + # failed_when: false + # check_mode: false + # register: ubtu20cis_3_5_2_4_new_table + when: + - ubtu20cis_rule_3_5_2_4 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.2.4 + - nftables + +- name: "AUTOMATED | 3.5.2.5 | AUDIT | Ensure nftables base chains exist" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + # block: + # - name: "AUTOMATED | 3.5.2.5 | PATCH | Ensure nftables base chains exist | Input entry" + # shell: 'nft create chain {{ ubtu20cis_nftables_table_name }} input { type filter hook input priority 0 \; }' + # changed_when: ubtu20cis_3_5_2_5_base_chains_input.rc == 0 + # failed_when: false + # register: ubtu20cis_3_5_2_5_base_chains_input + + # - name: "AUTOMATED | 3.5.2.5 | PATCH | Ensure nftables base chains exist | Forward entry" + # shell: 'nft create chain {{ ubtu20cis_nftables_table_name }} forward { type filter hook forward priority 0 \; }' + # changed_when: ubtu20cis_3_5_2_5_base_chains_forward.rc == 0 + # failed_when: false + # register: ubtu20cis_3_5_2_5_base_chains_forward + + # - name: "AUTOMATED | 3.5.2.5 | PATCH | Ensure nftables base chains exist | Output entry" + # shell: 'nft create chain {{ ubtu20cis_nftables_table_name }} output { type filter hook output priority 0 \; }' + # changed_when: ubtu20cis_3_5_2_5_base_chains_output.rc == 0 + # failed_when: false + # register: ubtu20cis_3_5_2_5_base_chains_output + when: + - ubtu20cis_rule_3_5_2_5 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.5 + - nftables + +- name: "AUTOMATED | 3.5.2.6 | AUDIT | Ensure nftables loopback traffic is configured" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + # block: + # - name: "AUTOMATED | 3.5.2.6 | AUDIT | Ensure nftables loopback traffic is configured | Get input iif lo accept status" + # shell: nft list ruleset | awk '/hook input/,/}/' | grep 'iif "lo" accept' + # changed_when: false + # failed_when: false + # check_mode: false + # register: ubtu20cis_3_5_2_6_loopback_iif_status + + # - name: "AUTOMATED | 3.5.2.6 | AUDIT | Ensure nftables loopback traffic is configured | Get input iif lo accept status" + # shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip saddr' + # changed_when: false + # failed_when: false + # check_mode: false + # register: ubtu20cis_3_5_2_6_loopback_input_drop_status + + # - name: "AUTOMATED | 3.5.2.6 | AUDIT | Ensure nftables loopback traffic is configured | Get input iif lo accept status" + # shell: nft list ruleset | awk '/hook input/,/}/' | grep 'ip6 saddr' + # changed_when: false + # failed_when: false + # check_mode: false + # register: ubtu20cis_3_5_2_6_loopback_ipv6_drop_status + + # - name: "AUTOMATED | 3.5.2.6 | PATCH | Ensure nftables loopback traffic is configured | Loopback iif lo accept" + # command: 'nft add rule inet {{ ubtu20cis_nftables_table_name }} input iif lo accept' + # changed_when: ubtu20cis_3_5_2_6_loopback_iif.rc == 0 + # failed_when: false + # register: ubtu20cis_3_5_2_6_loopback_iif + # when: "'iif \"lo\" accept' not in ubtu20cis_3_5_2_6_loopback_iif_status.stdout" + + # - name: "AUTOMATED | 3.5.2.6 | PATCH | Ensure nftables loopback traffic is configured | Loopback input drop" + # command: 'nft add rule inet {{ ubtu20cis_nftables_table_name }} input ip saddr 127\.0\.0\.0\/8 counter drop' + # changed_when: ubtu20cis_3_5_2_6_loopback_input_drop.rc == 0 + # failed_when: false + # register: ubtu20cis_3_5_2_6_loopback_input_drop + # when: + # - "'ip saddr 127.0.0.0/8' not in ubtu18cis_3_5_3_4_loopback_input_drop_status.stdout" + # - "'drop' not in ubtu20cis_3_5_2_6_loopback_input_drop_status.stdout" + + # - name: "3AUTOMATED | .5.2.6 | PATCH | Ensure nftables loopback traffic is configured | Loopback ipv6 drop" + # command: 'nft add rule inet {{ ubtu20cis_nftables_table_name }} input ip6 saddr ::1 counter drop' + # changed_when: ubtu20cis_3_5_2_6_loopback_ipv6_drop.rc == 0 + # failed_when: false + # register: ubtu20cis_3_5_2_6_loopback_ipv6_drop + # when: + # - "'ip6 saddr' not in ubtu20cis_3_5_2_6_loopback_ipv6_drop_status.stdout" + # - "'drop' not in ubtu20cis_3_5_2_6_loopback_ipv6_drop_status.stdout" + when: + - ubtu20cis_rule_3_5_2_6 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.6 + - nftables + +- name: "MANUAL | 3.5.2.7 | AUDIT | Ensure nftables outbound and established connections are configured" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + when: + - ubtu20cis_rule_3_5_2_7 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_3.5.2.7 + - nftables + +- name: "AUTOMATED | 3.5.2.8 | AUDIT | Ensure nftables default deny firewall policy" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + when: + - ubtu20cis_rule_3_5_2_8 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.8 + - nftables + +- name: "AUTOMATED | 3.5.2.9 | AUDIT | Ensure nftables service is enabled" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + # service: + # name: nftables + # state: started + # enabled: yes + when: + - ubtu20cis_rule_3_5_2_8 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.9 + - nftables + +- name: "AUTOMATED | 3.5.2.10 | AUDIT | Ensure nftables rules are permanent" + debug: + msg: "Warning!! NFTables is not supported in this role. Please use UFW, iptables, or manually manage nftables" + when: + - ubtu20cis_rule_3_5_2_10 + - ubtu20cis_firewall_package == "nftables" + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.2.10 + - nftables + +- name: "AUTOMATED | 3.5.3.1.1 | PATCH | Ensure iptables packages are installed" + apt: + name: ['iptables', 'iptables-persistent'] + state: present + when: + - ubtu20cis_rule_3_5_3_1_1 + - ubtu20cis_firewall_package == "iptables" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.1.1 + - iptables + +- name: "AUTOMATED | 3.5.3.1.2 | PATCH | Ensure nftables is not installed with iptables" + apt: + name: nftables + state: absent + when: + - ubtu20cis_rule_3_5_3_1_2 + - ubtu20cis_firewall_package == "iptables" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.1.2 + - iptables + +- name: "AUTOMATED | 3.5.3.1.3 | PATCH | Ensure ufw is uninstalled or disabled with iptables" + apt: + name: ufw + state: absent + when: + - ubtu20cis_rule_3_5_3_1_3 + - ubtu20cis_firewall_package == "iptables" + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.1.3 + - iptables + +- name: "AUTOMATED | 3.5.3.2.1 | PATCH | Ensure iptables loopback traffic is configured" + block: + - name: "AUTOMATED | 3.5.3.2.1 | PATCH | Ensure iptables loopback traffic is configured | INPUT loopback ACCEPT" + iptables: + action: append + chain: INPUT + in_interface: lo + jump: ACCEPT + + - name: "AUTOMATED | 3.5.3.2.1 | PATCH | Ensure iptables loopback traffic is configured | OUTPUT loopback ACCEPT" + iptables: + action: append + chain: OUTPUT + out_interface: lo + jump: ACCEPT + + - name: "AUTOMATED | 3.5.3.2.1 | PATCH | Ensure iptables loopback traffic is configured | OUTPUT loopback ACCEPT" + iptables: + action: append + chain: INPUT + source: 127.0.0.0/8 + jump: DROP + when: + - ubtu20cis_rule_3_5_3_2_1 + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_ipv4_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.2.1 + - iptables + +- name: "MANUAL | 3.5.3.2.2 | PATCH | Ensure iptables outbound and established connections are configured" + iptables: + action: append + chain: '{{ item.chain }}' + protocol: '{{ item.protocol }}' + match: state + ctstate: '{{ item.ctstate }}' + jump: ACCEPT + with_items: + - { chain: OUTPUT, protocol: tcp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: udp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: icmp, ctstate: 'NEW,ESTABLISHED' } + - { chain: INPUT, protocol: tcp, ctstate: 'ESTABLISHED' } + - { chain: INPUT, protocol: udp, ctstate: 'ESTABLISHED' } + - { chain: INPUT, protocol: icmp, ctstate: 'ESTABLISHED' } + when: + - ubtu20cis_rule_3_5_3_2_2 + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_ipv4_required + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_3.5.3.2.2 + - iptables + +- name: "AUTOMATED | 3.5.3.2.3 | PATCH | Ensure iptables default deny firewall policy" + block: + - name: "AUTOMATED | 3.5.3.2.3 | PATCH | Ensure iptables default deny firewall policy | Configure SSH to be allowed in" + iptables: + chain: INPUT + protocol: tcp + destination_port: 22 + jump: ACCEPT + ctstate: 'NEW,ESTABLISHED' + + - name: "AUTOMATED | 3.5.3.2.3 | PATCH | Ensure iptables default deny firewall policy | Configure SSH to be allowed out" + iptables: + chain: OUTPUT + protocol: tcp + source_port: 22 + jump: ACCEPT + ctstate: 'NEW,ESTABLISHED' + + - name: "AUTOMATED | 3.5.3.2.3 | PATCH | Ensure iptables default deny firewall policy | Enable apt traffic" + iptables: + chain: INPUT + ctstate: 'ESTABLISHED' + jump: ACCEPT + + - name: "AUTOMATED | 3.5.3.2.3 | PATCH | Ensure iptables default deny firewall policy | Set drop items" + iptables: + policy: DROP + chain: "{{ item }}" + with_items: + - INPUT + - FORWARD + - OUTPUT + when: + - ubtu20cis_rule_3_5_3_2_3 + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_ipv4_required + - not system_is_ec2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.2.3 + - iptables + + +- name: "AUTOMATED | 3.5.3.2.4 | AUDIT | Ensure iptables firewall rules exist for all open ports" + block: + - name: "AUTOMATED | 3.5.3.2.4 | AUDIT | Ensure iptables firewall rules exist for all open ports | Get list of open ports" + command: ss -4tuln + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_3_5_3_2_4_open_ports + + - name: "AUTOMATED | 3.5.3.2.4 | AUDIT | Ensure iptables firewall rules exist for all open ports | Get list of rules" + command: iptables -L INPUT -v -n + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_3_5_3_2_4_current_rules + + - name: "AUTOMATED | 3.5.3.2.4 | AUDIT | Ensure iptables firewall rules exist for all open ports | Alert about settings" + debug: + msg: + - "ALERT!!!!Below is the list the open ports and current rules" + - "Please create a rule for any open port that does not have a current rule" + - "Open Ports:" + - "{{ ubtu20cis_3_5_3_2_4_open_ports.stdout_lines }}" + - "Current Rules:" + - "{{ ubtu20cis_3_5_3_2_4_current_rules.stdout_lines }}" + when: + - ubtu20cis_rule_3_5_3_2_4 + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_ipv4_required + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.3.2.4 + - iptables + +# --------------- +# --------------- +# This is not a control however using the iptables module only writes to memery +# if a reboot occurs that means changes can revert. This task will make the +# above iptables settings permanent +# --------------- +# --------------- +- name: "Make IPTables persistent | Not a control" + block: + - name: "Make IPTables persistent | Install iptables-persistent" + apt: + name: iptables-persistent + state: present + + - name: "Make IPTables persistent | Save to persistent files" + shell: bash -c "iptables-save > /etc/iptables/rules.v4" + changed_when: ubtu20cis_iptables_save.rc == 0 + failed_when: ubtu20cis_iptables_save.rc > 0 + register: ubtu20cis_iptables_save + when: + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_save_iptables_cis_rules + - ubtu20cis_rule_3_5_3_2_1 or + ubtu20cis_rule_3_5_3_2_2 or + ubtu20cis_rule_3_5_3_2_3 or + ubtu20cis_rule_3_5_3_2_4 + +- name: "AUTOMATED | 3.5.3.3.1 | PATCH | Ensure ip6tables loopback traffic is configured" + block: + - name: "AUTOMATED | 3.5.3.3.1 | PATCH | Ensure ip6tables loopback traffic is configured | INPUT loopback ACCEPT" + iptables: + action: append + chain: INPUT + in_interface: lo + jump: ACCEPT + ip_version: ipv6 + + - name: "AUTOMATED | 3.5.3.3.1 | PATCH | Ensure ip6tables loopback traffic is configured | OUTPUT loopback ACCEPT" + iptables: + action: append + chain: OUTPUT + out_interface: lo + jump: ACCEPT + ip_version: ipv6 + + - name: "AUTOMATED | 3.5.3.3.1 | PATCH | Ensure ip6tables loopback traffic is configured | INPUT loopback drop" + iptables: + action: append + chain: INPUT + source: ::1 + jump: DROP + ip_version: ipv6 + when: + - ubtu20cis_rule_3_5_3_3_1 + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_ipv6_required + - not ubtu20cis_ipv4_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.3.1 + - ip6tables + +- name: "MANUAL | 3.5.3.3.2 | PATCH | Ensure ip6tables outbound and established connections are configured" + iptables: + action: append + chain: '{{ item.chain }}' + protocol: '{{ item.protocol }}' + match: state + ctstate: '{{ item.ctstate }}' + jump: ACCEPT + ip_version: ipv6 + with_items: + - { chain: OUTPUT, protocol: tcp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: udp, ctstate: 'NEW,ESTABLISHED' } + - { chain: OUTPUT, protocol: icmp, ctstate: 'NEW,ESTABLISHED' } + - { chain: INPUT, protocol: tcp, ctstate: 'ESTABLISHED' } + - { chain: INPUT, protocol: udp, ctstate: 'ESTABLISHED' } + - { chain: INPUT, protocol: icmp, ctstate: 'ESTABLISHED' } + when: + - ubtu20cis_rule_3_5_3_3_2 + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_ipv6_required + - not ubtu20cis_ipv4_required + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_3.5.3.3.2 + - ip6tables + +- name: "AUTOMATED | 3.5.3.3.3 | PATCH | Ensure ip6tables default deny firewall policy" + block: + - name: "3.5.3.3.3 | PATCH | Ensure ip6tables default deny firewall policy | Configure SSH to be allowed out" + iptables: + chain: OUTPUT + protocol: tcp + source_port: 22 + jump: ACCEPT + ctstate: 'NEW,ESTABLISHED' + ip_version: ipv6 + + - name: "AUTOMATED | 3.5.3.3.3 | PATCH | Ensure ip6tables default deny firewall policy | Enable apt traffic" + iptables: + chain: INPUT + ctstate: 'ESTABLISHED' + jump: ACCEPT + ip_version: ipv6 + + - name: "AUTOMATED | 3.5.3.3.3 | PATCH | Ensure ip6tables default deny firewall policy | Set drop items" + iptables: + policy: DROP + chain: "{{ item }}" + ip_version: ipv6 + with_items: + - INPUT + - FORWARD + - OUTPUT + when: + - ubtu20cis_rule_3_5_3_3_3 + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_ipv6_required + - not ubtu20cis_ipv4_required + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_3.5.3.3.3 + - ip6tables + +- name: "AUTOMATED | 3.5.3.3.4 | AUDIT | Ensure ip6tables firewall rules exist for all open ports" + block: + - name: "AUTOMATED | 3.5.3.3.4 | AUDIT | Ensure ip6tables firewall rules exist for all open ports | Get list of open ports" + command: ss -6tuln + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_3_5_3_3_4_open_ports + + - name: "AUTOMATED | 3.5.3.3.4 | AUDIT | Ensure ip6tables firewall rules exist for all open ports | Get list of rules" + command: ip6tables -L INPUT -v -n + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_3_5_3_3_4_current_rules + + - name: "AUTOMATED | 3.5.3.3.4 | AUDIT | Ensure ip6tables firewall rules exist for all open ports | Alert about settings" + debug: + msg: + - "ALERT!!!!Below is the list the open ports and current rules" + - "Please create a rule for any open port that does not have a current rule" + - "Open Ports:" + - "{{ ubtu20cis_3_5_3_3_4_open_ports.stdout_lines }}" + - "Current Rules:" + - "{{ ubtu20cis_3_5_3_3_4_current_rules.stdout_lines }}" + when: + - ubtu20cis_rule_3_5_3_3_4 + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_ipv6_required + - not ubtu20cis_ipv4_required + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_3.5.3.3.4 + - ip6tables + +# --------------- +# --------------- +# This is not a control however using the ip6tables module only writes to memery +# if a reboot occurs that means changes can revert. This task will make the +# above ip6tables settings permanent +# --------------- +# --------------- +- name: "Make IP6Tables persistent | Not a control" + block: + - name: "Make IP6Tables persistent | Install iptables-persistent" + apt: + name: iptables-persistent + state: present + + - name: "Make IP6Tables persistent | Save to persistent files" + shell: bash -c "ip6tables-save > /etc/iptables/rules.v6" + changed_when: ubtu20cis_ip6tables_save.rc == 0 + failed_when: ubtu20cis_ip6tables_save.rc > 0 + register: ubtu20cis_ip6tables_save + when: + - ubtu20cis_firewall_package == "iptables" + - ubtu20cis_ipv6_required + - not ubtu20cis_ipv4_required + - ubtu20cis_save_iptables_cis_rules + - ubtu20cis_rule_3_5_3_3_1 or + ubtu20cis_rule_3_5_3_3_2 or + ubtu20cis_rule_3_5_3_3_3 or + ubtu20cis_rule_3_5_3_3_4 diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/main.yml new file mode 100644 index 0000000..ed622e8 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_3/main.yml @@ -0,0 +1,15 @@ +--- +- name: "SECTION | 3.1 | Disable unused network protocols and devices" + include: cis_3.1.x.yml + +- name: "SECTION | 3.2 | Network Parameters Host Only" + include: cis_3.2.x.yml + +- name: "SECTION | 3.3 | Network Parameters Host and Router" + include: cis_3.3.x.yml + +- name: "SECTION | 3.4 | Uncommon Network Protocols" + include: cis_3.4.x.yml + +- name: "SECTION | 3.5 | Firewall Configuration" + include: cis_3.5.x.yml diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.1.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.1.x.yml new file mode 100644 index 0000000..3e743eb --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.1.x.yml @@ -0,0 +1,100 @@ +--- +- name: "AUTOMATED | 4.1.1.1 | PATCH | Ensure auditd is installed" + apt: + name: ['auditd', 'audispd-plugins'] + state: present + when: + - ubtu20cis_rule_4_1_1_1 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.1.1 + - auditd + +- name: "AUTOMATED | 4.1.1.2 | PATCH | Ensure auditd service is enabled" + service: + name: auditd + state: started + enabled: yes + when: + - ubtu20cis_rule_4_1_1_2 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.1.2 + - auditd + +- name: "AUTOMATED | 4.1.1.3 | PATCH | Ensure auditing for processes that start prior to auditd is enabled" + block: + - name: "AUTOMATED | 4.1.1.3 | AUDIT | Ensure auditing for processes that start prior to auditd is enabled | Get GRUB_CMDLINE_LINUX" + shell: grep "GRUB_CMDLINE_LINUX=" /etc/default/grub | cut -f2 -d'"' + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_4_1_1_3_cmdline_settings + + - name: "AUTOMATED | 4.1.1.3 | PATCH | Ensure auditing for processes that start prior to auditd is enabled | Add setting if doesn't exist" + lineinfile: + path: /etc/default/grub + regexp: '^GRUB_CMDLINE_LINUX=' + line: 'GRUB_CMDLINE_LINUX="{{ ubtu20cis_4_1_1_3_cmdline_settings.stdout }} audit=1"' + when: "'audit=' not in ubtu20cis_4_1_1_3_cmdline_settings.stdout" + notify: grub update + + - name: "AUTOMATED | 4.1.1.3 | PATCH | Ensure auditing for processes that start prior to auditd is enabled | Update setting if exists" + replace: + dest: /etc/default/grub + regexp: 'audit=([0-9]+)' + replace: 'audot=1' + after: '^GRUB_CMDLINE_LINUX="' + before: '"' + notify: grub update + when: "'audit=' in ubtu20cis_4_1_1_3_cmdline_settings.stdout" + when: + - ubtu20cis_rule_4_1_1_3 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4_1_1_3 + - auditd + +- name: "AUTOMATED | 4.1.1.4 | PATCH | Ensure audit_backlog_limit is sufficient" + block: + - name: "AUTOMATED | 4.1.1.4 | PATCH | Ensure audit_backlog_limit is sufficient | Get current GRUB_CMDLINE_LINUX" + shell: grep "GRUB_CMDLINE_LINUX=" /etc/default/grub | cut -f2 -d'"' + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_4_1_1_4_cmdline_settings + + - name: "AUTOMATED | 4.1.1.4 | PATCH | Ensure audit_backlog_limit is sufficient | Add setting if doesn't exist" + lineinfile: + path: /etc/default/grub + regexp: '^GRUB_CMDLINE_LINUX=' + line: 'GRUB_CMDLINE_LINUX="{{ ubtu20cis_4_1_1_4_cmdline_settings.stdout }} audit_backlog_limit={{ ubtu20cis_audit_back_log_limit }}"' + notify: grub update + when: "'audit_backlog_limit=' not in ubtu20cis_4_1_1_4_cmdline_settings.stdout" + + - name: "AUTOMATED | 4.1.1.4 | PATCH | Ensure audit_backlog_limit is sufficient | Update setting if exists" + replace: + dest: /etc/default/grub + regexp: 'audit_backlog_limit=([0-9]+)' + replace: 'audit_backlog_limit={{ ubtu20cis_audit_back_log_limit }}' + after: '^GRUB_CMDLINE_LINUX="' + before: '"' + notify: grub update + when: + - ubtu20cis_rule_4_1_1_4 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.1.4 + - auditd diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.2.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.2.x.yml new file mode 100644 index 0000000..0fb91b2 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.2.x.yml @@ -0,0 +1,53 @@ +--- +- name: "AUTOMATED | 4.1.2.1 | PATCH | Ensure audit log storage size is configured" + lineinfile: + dest: /etc/audit/auditd.conf + regexp: "^max_log_file( |=)" + line: "max_log_file = {{ ubtu20cis_max_log_file_size }}" + state: present + notify: restart auditd + when: + - ubtu20cis_rule_4_1_2_1 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.2.1 + - auditd + +- name: "AUTOMATED | 4.1.2.2 | PATCH | Ensure audit logs are not automatically deleted" + lineinfile: + path: /etc/audit/auditd.conf + regexp: '^max_log_file_action' + line: "max_log_file_action = {{ ubtu20cis_auditd['max_log_file_action'] }}" + notify: restart auditd + when: + - ubtu20cis_rule_4_1_2_2 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.2.2 + - auditd + +- name: "AUTOMATED | 4.1.2.3 | PATCH | Ensure system is disabled when audit logs are full" + lineinfile: + path: /etc/audit/auditd.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^space_left_action', line: 'space_left_action = email' } + - { regexp: '^action_mail_acct', line: 'action_mail_acct = root' } + - { regexp: '^admin_space_left_action', line: 'admin_space_left_action = halt' } + notify: restart auditd + when: + - ubtu20cis_rule_4_1_2_3 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.2.3 + - auditd diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.x.yml new file mode 100644 index 0000000..f6eb574 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.1.x.yml @@ -0,0 +1,279 @@ +--- +- name: "AUTOMATED | 4.1.3 | PATCH | Ensure events that modify date and time information are collected" + template: + src: audit/ubtu20cis_4_1_3_timechange.rules.j2 + dest: /etc/audit/rules.d/time-change.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_3 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.3 + - auditd + +- name: "AUTOMATED | 4.1.4 | PATCH | Ensure events that modify user/group information are collected" + template: + src: audit/ubtu20cis_4_1_4_identity.rules.j2 + dest: /etc/audit/rules.d/identity.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_4 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.4 + - auditd + +- name: "AUTOMATED | 4.1.5 | PATCH | Ensure events that modify the system's network environment are collected" + template: + src: audit/ubtu20cis_4_1_5_systemlocale.rules.j2 + dest: /etc/audit/rules.d/system-locale.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_5 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.5 + - auditd + +- name: "AUTOMATED | 4.1.6 | PATCH | Ensure events that modify the system's Mandatory Access Controls are collected" + template: + src: audit/ubtu20cis_4_1_6_macpolicy.rules.j2 + dest: /etc/audit/rules.d/MAC-policy.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_6 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.6 + - auditd + +- name: "AUTOMATED | 4.1.7 | PATCH | Ensure login and logout events are collected" + template: + src: audit/ubtu20cis_4_1_7_logins.rules.j2 + dest: /etc/audit/rules.d/logins.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_7 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.7 + - auditd + +- name: "AUTOMATED | 4.1.8 | PATCH | Ensure session initiation information is collected" + template: + src: audit/ubtu20cis_4_1_8_session.rules.j2 + dest: /etc/audit/rules.d/session.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_8 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.8 + - auditd + +- name: "AUTOMATED | 4.1.9 | PATCH | Ensure discretionary access control permission modification events are collected" + template: + src: audit/ubtu20cis_4_1_9_permmod.rules.j2 + dest: /etc/audit/rules.d/perm_mod.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_9 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.9 + - auditd + +- name: "AUTOMATED | 4.1.10 | PATCH | Ensure unsuccessful unauthorized file access attempts are collected" + template: + src: audit/ubtu20cis_4_1_10_access.rules.j2 + dest: /etc/audit/rules.d/access.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_10 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.10 + - auditd + +- name: "AUTOMATED | 4.1.11 | PATCH | Ensure use of privileged commands is collected" + block: + - name: "AUTOMATED | 4.1.11 | AUDIT | Ensure use of privileged commands is collected | Get list of privileged programs" + shell: for i in $(df | grep '^/dev' | awk '{ print $NF }'); do find $i -xdev -type f -perm -4000 -o -type f -perm -2000 2>/dev/null; done + register: priv_procs + changed_when: no + check_mode: false + + - name: "AUTOMATED | 4.1.11 | PATCH | Ensure use of privileged commands is collected | Set privileged rules" + template: + src: audit/ubtu20cis_4_1_11_privileged.rules.j2 + dest: /etc/audit/rules.d/privileged.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_11 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.11 + - auditd + +- name: "AUTOMATED | 4.1.12 | PATCH | Ensure successful file system mounts are collected" + template: + src: audit/ubtu20cis_4_1_12_audit.rules.j2 + dest: /etc/audit/rules.d/audit.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + ubtu20cis_rule_4_1_12 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.12 + - auditd + +- name: "AUTOMATED | 4.1.13 | PATCH | Ensure file deletion events by users are collected" + template: + src: audit/ubtu20cis_4_1_13_delete.rules.j2 + dest: /etc/audit/rules.d/delete.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_13 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.13 + - auditd + +- name: "AUTOMATED | 4.1.14 | PATCH | Ensure changes to system administration scope (sudoers) is collected" + template: + src: audit/ubtu20cis_4_1_14_scope.rules.j2 + dest: /etc/audit/rules.d/scope.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_14 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.14 + - auditd + +- name: "AUTOMATED | 4.1.15 | PATCH | Ensure system administrator command executions (sudo) are collected" + template: + src: audit/ubtu20cis_4_1_15_actions.rules.j2 + dest: /etc/audit/rules.d/actions.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_15 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.15 + - auditd + +- name: "AUTOMATED | 4.1.16 | PATCH | Ensure kernel module loading and unloading is collected" + template: + src: audit/ubtu20cis_4_1_16_modules.rules.j2 + dest: /etc/audit/rules.d/modules.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_16 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_4.1.16 + - auditd + +- name: "AUTOMATED | 4.1.17 | PATCH | Ensure the audit configuration is immutable" + template: + src: audit/ubtu20cis_4_1_17_99finalize.rules.j2 + dest: /etc/audit/rules.d/99-finalize.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + when: + - ubtu20cis_rule_4_1_17 + tags: + - level2-server + - level2-workstation + - automated + - scored + - patch + - rule_4.1.17 + - auditd diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.1.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.1.x.yml new file mode 100644 index 0000000..4fff92c --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.1.x.yml @@ -0,0 +1,153 @@ +--- +- name: "AUTOMATED | 4.2.1.1 | PATCH | Ensure rsyslog is installed" + apt: + name: rsyslog + state: present + when: + - ubtu20cis_rule_4_2_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.1.1 + - rsyslog + - apt + +- name: "AUTOMATED | 4.2.1.2 | PATCH | Ensure rsyslog Service is enabled" + service: + name: rsyslog + enabled: yes + when: + - ubtu20cis_rule_4_2_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.1.2 + - rsyslog + +- name: "MANUAL | 4.2.1.3 | PATCH | Ensure logging is configured" + block: + - name: "MANUAL | 4.2.1.3 | AUDIT | Ensure logging is configured | Find configuration file" + shell: grep -r "*.emerg" /etc/* | cut -f1 -d":" + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_4_2_1_3_rsyslog_config_path + + - name: "MANUAL | 4.2.1.3 | AUDIT | Ensure logging is configured | Gather rsyslog current config" + command: "cat {{ ubtu20cis_4_2_1_3_rsyslog_config_path.stdout }}" + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_4_2_1_3_rsyslog_config + + - name: "MANUAL | 4.2.1.3 | AUDIT | Ensure logging is configured | Message out config" + debug: + msg: + - "Alert!!!Below is the current logging configurations for rsyslog, please review" + - "{{ ubtu20cis_4_2_1_3_rsyslog_config.stdout_lines }}" + when: not ubtu20cis_rsyslog_ansible_managed + + - name: "MANUAL | 4.2.1.3 | PATCH | Ensure logging is configured | Automated rsyslog configuration" + lineinfile: + path: "{{ ubtu20cis_4_2_1_3_rsyslog_config_path.stdout }}" + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + insertafter: "{{ item.insertafter }}" + with_items: + - { regexp: '^\*.emerg', line: '*.emerg :omusrmsg:*', insertafter: '^# Emergencies are sent to everybody logged in' } + - { regexp: '^auth,authpriv.\*', line: 'auth,authpriv.* /var/log/auth.log', insertafter: '^# First some standard log files. Log by facility' } + - { regexp: '^mail.\*|^#mail.\*', line: 'mail.* -/var/log/mail', insertafter: '^# First some standard log files' } + - { regexp: '^mail.info|^#mail.info', line: 'mail.info -/var/log/mail.info', insertafter: '^# Logging for the mail system' } + - { regexp: '^mail.warn|^#mail.warn', line: 'mail.warn -/var/log/mail.warn', insertafter: '^# Logging for the mail system.' } + - { regexp: '^mail.err|^#mail.err', line: 'mail.err /var/log/mail.err', insertafter: '^# Logging for the mail system.' } + - { regexp: '^news.crit|^#news.crit', line: 'news.crit -/var/log/news/news.crit', insertafter: '^# First some standard log files'} + - { regexp: '^news.err|^#news.err', line: 'news.err -/var/log/news/news.err', insertafter: '^# First some standard log files' } + - { regexp: '^news.notice|^#news.notice', line: 'news.notice -/var/log/news/news.notice', insertafter: '^# First some standard log files' } + - { regexp: '^\*.=warning;\*.=err|^#\*.=warning;\*.=err', line: '*.=warning;*.=err -/var/log/warn', insertafter: '^# First some standard log files' } + - { regexp: '^\*.crit|^#\*.crit', line: '*.crit /var/log/warn', insertafter: '^# First some standard log files' } + - { regexp: '^\*.\*;mail.none;news.none|^#\*.\*;mail.none;news.none', line: '*.*;mail.none;news.none -/var/log/messages', insertafter: '^# First some standard log files' } + - { regexp: '^local0,local1.\*|^#local0,local1.\*', line: 'local0,local1.* -/var/log/localmessages', insertafter: '^# First some standard log files' } + - { regexp: '^local2,local3.\*|^#local2,local3.\*', line: 'local2,local3.* -/var/log/localmessages', insertafter: '^# First some standard log files' } + - { regexp: '^local4,local5.\*|^#local4,local5.\*', line: 'local4,local5.* -/var/log/localmessages', insertafter: '^# First some standard log files' } + - { regexp: '^local6,local7.\*|^#local6,local7.\*', line: 'local6,local7.* -/var/log/localmessages', insertafter: '^# First some standard log files' } + notify: restart rsyslog + when: ubtu20cis_rsyslog_ansible_managed + when: + - ubtu20cis_rule_4_2_1_3 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_4.2.1.3 + - rsyslog + +- name: "AUTOMATED | 4.2.1.4 | PATCH | Ensure rsyslog default file permissions configured" + lineinfile: + path: /etc/rsyslog.conf + regexp: '^\$FileCreateMode|^#\$FileCreateMode' + line: '$FileCreateMode 0640' + notify: restart rsyslog + when: + - ubtu20cis_rule_4_2_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.1.4 + - rsyslog + +- name: "AUTOMATED | 4.2.1.5 | PATCH | Ensure rsyslog is configured to send logs to a remote log host" + blockinfile: + path: /etc/rsyslog.conf + block: | + ##Enable sending of logs over TCP add the following line: + *.* @@{{ ubtu20cis_remote_log_server }} + insertafter: EOF + when: + - ubtu20cis_rule_4_2_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.1.5 + - rsyslog + +- name: "MANUAL | 4.2.1.6 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts" + block: + - name: "MANUAL | 4.2.1.6 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts | When not a log host" + replace: + path: /etc/rsyslog.conf + regexp: '({{ item }})' + replace: '#\1' + with_items: + - '^(\$ModLoad)' + - '^(\$InputTCPServerRun)' + notify: restart rsyslog + when: not ubtu20cis_system_is_log_server + + - name: "MANUAL | 4.2.1.6 | PATCH | Ensure remote rsyslog messages are only accepted on designated log hosts | When a log server" + lineinfile: + path: /etc/rsyslog.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^\$ModLoad|^#\$ModLoad', line: '$ModLoad imtcp' } + - { regexp: '^\$InputTCPServerRun|^#\$InputTCPServerRun', line: '$InputTCPServerRun 514' } + notify: restart rsyslog + when: ubtu20cis_system_is_log_server + when: + - ubtu20cis_rule_4_2_1_6 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_4.2.1.6 + - rsyslog diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.2.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.2.x.yml new file mode 100644 index 0000000..891ffb2 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.2.x.yml @@ -0,0 +1,50 @@ +--- +- name: "AUTOMATED | 4.2.2.1 | PATCH | Ensure journald is configured to send logs to rsyslog" + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^ForwardToSyslog|^#ForwardToSyslog' + line: 'ForwardToSyslog=yes' + insertafter: '\[Journal\]' + when: + - ubtu20cis_rule_4_2_2_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.2.1 + - rsyslog + - journald + +- name: "4.2.2.2 | PATCH | Ensure journald is configured to compress large log files" + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^Compress|^#Compress' + line: 'Compress=yes' + insertafter: '\[Journal\]' + when: + - ubtu20cis_rule_4_2_2_2 + tags: + - level1-server + - level1-workstation + - patch + - rule_4.2.2.2 + - rsyslog + - journald + +- name: "AUTOMATED | 4.2.2.3 | PATCH | Ensure journald is configured to write logfiles to persistent disk" + lineinfile: + path: /etc/systemd/journald.conf + regexp: '^Storage|^#Storage' + line: 'Storage=persistent' + insertafter: '\[Journal\]' + when: + - ubtu20cis_rule_4_2_2_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.2.3 + - rsyslog + - journald diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.3.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.3.yml new file mode 100644 index 0000000..033fb27 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.2.3.yml @@ -0,0 +1,16 @@ +--- +- name: "AUTOMATED | 4.2.3 | PATCH | Ensure permissions on all logfiles are configured" + command: find /var/log -type f -exec chmod g-wx,o-rwx "{}" + -o -type d -exec chmod g-w,o-rwx "{}" + + changed_when: ubtu20cis_4_2_3_logfile_perms_status.rc == 0 + check_mode: false + register: ubtu20cis_4_2_3_logfile_perms_status + when: + - ubtu20cis_rule_4_2_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.2.3 + - logfiles + - permissions diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.3.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.3.yml new file mode 100644 index 0000000..362f632 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.3.yml @@ -0,0 +1,26 @@ +--- +- name: "MANUAL | 4.3 | PATCH | Ensure logrotate is configured" + block: + - name: "MANUAL | 4.3 | PATCH | Ensure logrotate is configured | Get logrotate files" + find: + paths: /etc/logrotate.d/ + check_mode: false + register: ubtu20cis_4_3_logrotate_files + + - name: "MANUAL | 4.3 | PATCH | Ensure logrotate is configured | Set rotation configurations" + replace: + path: "{{ item.path }}" + regexp: '^(\s*)(daily|weekly|monthly|yearly)$' + replace: "\\1{{ ubtu20cis_logrotate }}" + with_items: + - "{{ ubtu20cis_4_3_logrotate_files.files }}" + - { path: "/etc/logrotate.conf" } + when: + - ubtu20cis_rule_4_3 + tags: + - level1-server + - level1-workstation + - manual + - patch + - rule_4.3 + - logrotate diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.4.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.4.yml new file mode 100644 index 0000000..448e640 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/cis_4.4.yml @@ -0,0 +1,15 @@ +--- +- name: "AUTOMATED | 4.4 | PATCH | Ensure logrotate assigns appropriate permissions" + lineinfile: + path: /etc/logrotate.conf + regexp: '^create' + line: ' create {{ ubtu20cis_logrotate_create_settings }}' + when: + - ubtu20cis_rule_4_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_4.4 + - logrotate diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/main.yml new file mode 100644 index 0000000..c0fb49c --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_4/main.yml @@ -0,0 +1,24 @@ +--- +- name: "SECTION | 4.1.1 | Ensure auditing is enabled" + include: cis_4.1.1.x.yml + +- name: "SECTION | 4.1.2 | Configure Data Retention" + include: cis_4.1.2.x.yml + +- name: "SECTION | 4.1.x | Login Settings" + include: cis_4.1.x.yml + +- name: "SECTION | 4.2.1 | Configure rsyslog" + include: cis_4.2.1.x.yml + +- name: "SECTION | 4.2.2 | Configure journald" + include: cis_4.2.2.x.yml + +- name: "SECTION | 4.2.3 | Ensure permissions on all logfiles are configured" + include: cis_4.2.3.yml + +- name: "SECTION | 4.3 | Ensure logrotate is configured" + include: cis_4.3.yml + +- name: "SECTION | 4.4 | Ensure logrotate assigns appropriate permissions" + include: cis_4.4.yml diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.1.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.1.x.yml new file mode 100644 index 0000000..7065799 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.1.x.yml @@ -0,0 +1,159 @@ +--- +- name: "AUTOMATED | 5.1.1 | PATCH | Ensure cron daemon is enabled and running" + service: + name: cron + state: started + enabled: yes + when: + - ubtu20cis_rule_5_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.1 + - cron + +- name: "AUTOMATED | 5.1.2 | PATCH | Ensure permissions on /etc/crontab are configured" + file: + path: /etc/crontab + owner: root + group: root + mode: 0600 + when: + - ubtu20cis_rule_5_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.2 + - cron + +- name: "AUTOMATED | 5.1.3 | PATCH | Ensure permissions on /etc/cron.hourly are configured" + file: + path: /etc/cron.hourly + owner: root + group: root + mode: 0700 + when: + - ubtu20cis_rule_5_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.3 + - cron + +- name: "AUTOMATED | 5.1.4 | PATCH | Ensure permissions on /etc/cron.daily are configured" + file: + path: /etc/cron.daily + owner: root + group: root + mode: 0700 + when: + - ubtu20cis_rule_5_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.4 + - cron + +- name: "AUTOMATED | 5.1.5 | PATCH | Ensure permissions on /etc/cron.weekly are configured" + file: + path: /etc/cron.weekly + owner: root + group: root + mode: 0700 + when: + - ubtu20cis_rule_5_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.5 + - cron + +- name: "AUTOMATED | 5.1.6 | PATCH | Ensure permissions on /etc/cron.monthly are configured" + file: + path: /etc/cron.monthly + owner: root + group: root + mode: 0700 + when: + - ubtu20cis_rule_5_1_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.6 + - cron + +- name: "AUTOMATED | 5.1.7 | PATCH | Ensure permissions on /etc/cron.d are configured" + file: + path: /etc/cron.d + owner: root + group: root + mode: 0700 + when: + - ubtu20cis_rule_5_1_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.7 + - cron + +- name: "AUTOMATED | 5.1.8 | PATCH | Ensure at/cron is restricted to authorized users" + block: + - name: "AUTOMATED | 5.1.8 | PATCH | Ensure at/cron is restricted to authorized users | Remove cron.deny" + file: + path: /etc/cron.deny + state: absent + + - name: "AUTOMATED | 5.1.8 | PATCH | Ensure at/cron is restricted to authorized users | Create cron.allow" + file: + path: /etc/cron.allow + owner: root + group: root + mode: 0640 + state: touch + when: + - ubtu20cis_rule_5_1_8 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.8 + - cron + +- name: "AUTOMATED | 5.1.9 | PATCH | Ensure at is restricted to authorized users" + block: + - name: "AUTOMATED | 5.1.9 | PATCH | Ensure at is restricted to authorized users | Remove at.deny" + file: + path: /etc/at.deny + state: absent + + - name: "AUTOMATED | 5.1.9 | PATCH | Ensure at is restricted to authorized users | Create at.allow" + file: + path: /etc/at.allow + owner: root + group: root + mode: 0640 + state: touch + when: + - ubtu20cis_rule_5_1_9 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.1.9 + - cron diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.2.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.2.x.yml new file mode 100644 index 0000000..a001e3b --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.2.x.yml @@ -0,0 +1,46 @@ +--- +- name: "AUTOMATED | 5.2.1 | PATCH | Ensure sudo is installed" + apt: + name: "{{ ubtu20cis_sudo_package }}" + state: present + when: + - ubtu20cis_rule_5_2_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.2.1 + - sudo + +- name: "AUTOMATED | 5.2.2 | PATCH | Ensure sudo commands use pty" + lineinfile: + path: /etc/sudoers + regexp: '^Defaults use_' + line: 'Defaults use_pty' + insertafter: '^Defaults' + when: + - ubtu20cis_rule_5_2_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.2.2 + - sudo + +- name: "AUTOMATED | 5.2.3 | PATCH | Ensure sudo log file exists" + lineinfile: + path: /etc/sudoers + regexp: '^Defaults logfile' + line: 'Defaults logfile="{{ ubtu20cis_sudo_logfile }}"' + insertafter: '^Defaults' + when: + - ubtu20cis_rule_5_2_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.2.3 + - sudo diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.3.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.3.x.yml new file mode 100644 index 0000000..3eb9647 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.3.x.yml @@ -0,0 +1,413 @@ +--- +- name: "AUTOMATED | 5.3.1 | PATCH | Ensure permissions on /etc/ssh/sshd_config are configured" + file: + path: /etc/ssh/sshd_config + owner: root + group: root + mode: 0600 + when: + - ubtu20cis_rule_5_3_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.1 + - ssh + +- name: "AUTOMATED | 5.3.2 | PATCH | Ensure permissions on SSH private host key files are configured" + block: + - name: "AUTOMATED | 5.3.2 | AUDIT | Ensure permissions on SSH private host key files are configured | Find ssh_host private keys" + find: + paths: /etc/ssh + patterns: 'ssh_host_*_key' + register: ubtu20cis_5_3_2_ssh_host_priv_keys + + - name: "AUTOMATED | 5.3.2 | PATCH | Ensure permissions on SSH private host key files are configured | Set permissions" + file: + path: "{{ item.path }}" + owner: root + group: root + mode: 0600 + with_items: + - "{{ ubtu20cis_5_3_2_ssh_host_priv_keys.files }}" + when: + - ubtu20cis_rule_5_3_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.2 + - ssh + +- name: "AUTOMATED | 5.3.3 | PATCH | Ensure permissions on SSH public host key files are configured" + block: + - name: "AUTOMATED | 5.3.3 | AUDIT | Ensure permissions on SSH public host key files are configured | Find ssh_host public keys" + find: + paths: /etc/ssh + patterns: 'ssh_host_*_key.pub' + register: ubtu20cis_5_3_3_ssh_host_pub_keys + + - name: "AUTOMATED | 5.3.3 | PATCH | Ensure permissions on SSH public host key files are configured | Set permissions" + file: + path: "{{ item.path }}" + owner: root + group: root + mode: 0644 + with_items: + - "{{ ubtu20cis_5_3_3_ssh_host_pub_keys.files }}" + when: + - ubtu20cis_rule_5_3_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.3 + - ssh + +- name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited" + block: + - name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited | Add allowed users" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^AllowUsers|^#AllowUsers' + line: 'AllowUsers {{ ubtu20cis_sshd.allow_users }}' + notify: restart sshd + when: "ubtu20cis_sshd['allow_users']|default('') != ''" + + - name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited | Add allowed groups" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^AllowGroups|^#AllowGroups' + line: 'AllowGroups {{ ubtu20cis_sshd.allow_groups }}' + notify: restart sshd + when: "ubtu20cis_sshd['allow_groups']|default('') != ''" + + - name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited | Add deny users" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^DenyUsers|^#DenyUsers' + line: 'DenyUsers {{ ubtu20cis_sshd.deny_users }}' + notify: restart sshd + when: "ubtu20cis_sshd['deny_users']|default('') != ''" + + - name: "AUTOMATED | 5.3.4 | PATCH | Ensure SSH access is limited | Add deny groups" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^DenyGroups|^#DenyGroups' + line: 'DenyGroups {{ ubtu20cis_sshd.deny_groups }}' + notify: restart sshd + when: "ubtu20cis_sshd['deny_groups']|default('') != ''" + when: + - ubtu20cis_rule_5_3_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.4 + - ssh + +- name: "AUTOMATED | 5.3.5 | PATCH | Ensure SSH LogLevel is appropriate" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^LogLevel|^#LogLevel' + line: 'LogLevel {{ ubtu20cis_sshd.log_level }}' + insertafter: '^# Logging' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.5 + - ssh + +- name: "AUTOMATED | 5.3.6 | PATCH | Ensure SSH X11 forwarding is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^X11Forwarding|^#X11Forwarding' + line: 'X11Forwarding no' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_6 + tags: + - level2-server + - level1-workstation + - automated + - patch + - rule_5.3.6 + - ssh + +- name: "AUTOMATED | 5.3.7 | PATCH | Ensure SSH MaxAuthTries is set to 4 or less" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^MaxAuthTries|^#MaxAuthTries' + line: 'MaxAuthTries {{ ubtu20cis_sshd.max_auth_tries }}' + insertafter: '^# Authentication' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.7 + - ssh + +- name: "AUTOMATED | 5.3.8 | PATCH | Ensure SSH IgnoreRhosts is enabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^IgnoreRhosts|^#IgnoreRhosts' + line: 'IgnoreRhosts yes' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_8 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.8 + - ssh + +- name: "AUTOMATED | 5.3.9 | PATCH | Ensure SSH HostbasedAuthentication is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^HostbasedAuthentication|^#HostbasedAuthentication' + line: 'HostbasedAuthentication no' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_9 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.9 + - ssh + +- name: "AUTOMATED | 5.3.10 | PATCH | Ensure SSH root login is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^PermitRootLogin|^#PermitRootLogin' + line: 'PermitRootLogin no' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_10 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.10 + - ssh + +- name: "AUTOMATED | 5.3.11 | PATCH | Ensure SSH PermitEmptyPasswords is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^PermitEmptyPasswords|^#PermitEmptyPasswords' + line: 'PermitEmptyPasswords no' + insertafter: '# To disable tunneled clear text passwords' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_11 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.11 + - ssh + +- name: "AUTOMATED | 5.3.12 | PATCH | Ensure SSH PermitUserEnvironment is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^PermitUserEnvironment|^#PermitUserEnvironment' + line: 'PermitUserEnvironment no' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_12 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.12 + - ssh + +- name: "AUTOMATED | 5.3.13 | PATCH | Ensure only strong Ciphers are used" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^Ciphers|^#Ciphers' + line: 'Ciphers {{ ubtu20cis_sshd.ciphers }}' + insertafter: '^# Ciphers and keying' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_13 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.13 + - ssh + +- name: "AUTOMATED | 5.3.14 | PATCH | Ensure only strong MAC algorithms are used" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^MACs|^#MACs' + line: 'MACs {{ ubtu20cis_sshd.macs }}' + insertafter: '^# Ciphers and keying' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_14 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.14 + - ssh + +- name: "AUTOMATED | 5.3.15 | PATCH | Ensure only strong Key Exchange algorithms are used" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^KexAlgorithms|^#KexAlgorithms' + line: 'KexAlgorithms {{ ubtu20cis_sshd.kex_algorithms }}' + insertafter: '^# Ciphers and keying' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_15 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.15 + - ssh + +- name: "AUTOMATED | 5.3.16 | PATCH | Ensure SSH Idle Timeout Interval is configured" + lineinfile: + path: /etc/ssh/sshd_config + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + with_items: + - { regexp: '^ClientAliveInterval|^#ClientAliveInterval', line: 'ClientAliveInterval {{ ubtu20cis_sshd.client_alive_interval }}' } + - { regexp: '^ClientAliveCountMax|^#ClientAliveCountMax', line: 'ClientAliveCountMax {{ ubtu20cis_sshd.client_alive_count_max }}' } + notify: restart sshd + when: + - ubtu20cis_rule_5_3_16 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.16 + - sshd + +- name: "AUTOMATED | 5.3.17 | PATCH | Ensure SSH LoginGraceTime is set to one minute or less" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^LoginGraceTime|^#LoginGraceTime' + line: 'LoginGraceTime {{ ubtu20cis_sshd.login_grace_time }}' + insertafter: '^# Authentication' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_17 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.17 + - ssh + +- name: "AUTOMATED | 5.3.18 | PATCH | Ensure SSH warning banner is configured" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^Banner|^#Banner' + line: Banner /etc/issue.net + insertafter: '^# no default banner path' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_18 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.18 + - ssh + +- name: "AUTOMATED | 5.3.19 | PATCH | Ensure SSH PAM is enabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^UsePAM|^#UsePAM' + line: 'UsePAM yes' + insertafter: '^# and ChallengeResponseAuthentication' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_19 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.19 + - ssh + - pam + +- name: "AUTOMATED | 5.3.20 | PATCH | Ensure SSH AllowTcpForwarding is disabled" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^AllowTcpForwarding|^#AllowTcpForwarding' + line: 'AllowTcpForwarding no' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_20 + tags: + - level2-server + - level2-workstation + - automated + - patch + - rule_5.3.20 + - ssh + +- name: "AUTOMATED | 5.3.21 | PATCH | Ensure SSH MaxStartups is configured" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^MaxStartups|^#MaxStartups' + line: 'MaxStartups 10:30:60' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_21 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.21 + - ssh + +- name: "AUTOMATED | 5.3.22 | PATCH | Ensure SSH MaxSessions is set to 4 or less" + lineinfile: + path: /etc/ssh/sshd_config + regexp: '^MaxSessions|^#MaxSessions' + line: 'MaxSessions {{ ubtu20cis_sshd.max_sessions }}' + insertafter: '^# Authentication' + notify: restart sshd + when: + - ubtu20cis_rule_5_3_22 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.3.22 + - ssh diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.4.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.4.x.yml new file mode 100644 index 0000000..8780279 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.4.x.yml @@ -0,0 +1,199 @@ +--- +- name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured" + block: + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Install pam_pwquality module" + apt: + name: libpam-pwquality + state: present + + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Add minlen" + lineinfile: + path: /etc/security/pwquality.conf + regexp: '^minlen|^# minlen' + line: minlen = 14 + + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Add minclass" + lineinfile: + path: /etc/security/pwquality.conf + regexp: '^minclass|^# minclass' + line: 'minclass = 4' + + - name: "AUTOMATED | 5.4.1 | AUDIT | Ensure password creation requirements are configured | Confirm pwquality module in common-password" + command: grep 'password.*requisite.*pam_pwquality.so' /etc/pam.d/common-password + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_5_4_1_pam_pwquality_state + + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Set retry to 3 if pwquality exists" + pamd: + name: common-password + type: password + control: requisite + module_path: pam_pwquality.so + module_arguments: 'retry=3' + state: args_present + when: ubtu20cis_5_4_1_pam_pwquality_state.stdout | length > 0 + + - name: "AUTOMATED | 5.4.1 | PATCH | Ensure password creation requirements are configured | Set retry to 3 if pwquality does not exist" + pamd: + name: common-password + type: password + control: required + module_path: pam_permit.so + new_type: password + new_control: requisite + new_module_path: pam_pwquality.so + module_arguments: 'retry=3' + state: after + when: ubtu20cis_5_4_1_pam_pwquality_state.stdout | length == 0 + when: + - ubtu20cis_rule_5_4_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.4.1 + - pam + +# ------------- +# ------------- +# There is a bug in pam_tally2.so where the use of the audit keyword may log credentials in the case of user error during authentication. +# To work around this bug the CIS documentation has you setting pam_tally2 to the account section. +# Once bug is fixed please set pam_tally2 to the auth sections. We have those commented out in the task +# ------------- +# ------------- + +# ------------- +# ------------- +# figure out why pam_deny kills vagrant user. Below is everything working but the pam_deny.so in the last task with_items +# ------------- +# ------------- +- name: "AUTOMATED | 5.4.2 | PATCH | Ensure lockout for failed password attempts is configured" + command: /bin/true + changed_when: false + failed_when: false + check_mode: false + # block: + # - name: "AUTOMATED | 5.4.2 | AUDIT | Ensure lockout for failed password attempts is configured | Confirm pam_tally2.so module in common-auth" + # # command: grep 'auth.*required.*pam_tally2.so' /etc/pam.d/common-auth + # command: grep 'auth.*required.*pam_tally2.so' /etc/pam.d/common-account + # changed_when: false + # failed_when: false + # check_mode: false + # register: ubtu20cis_5_4_2_pam_tally2_state + + # - name: "AUTOMATED | 5.4.2 | PATCH | Ensure lockout for failed password attempts is configured | Set pam_tally2.so settings if exists" + # pamd: + # # name: common-auth + # name: common-account + # # type: auth + # type: account + # control: required + # module_path: pam_tally2.so + # module_arguments: 'onerr=fail + # audit + # silent + # deny=5 + # unlock_time=900' + # when: ubtu20cis_5_4_2_pam_tally2_state.stdout != "" + + # - name: "AUTOMATED | 5.4.2 | PATCH | Ensure lockout for failed password attempts is configured | Set pam_tally2.so settings if does not exist" + # lineinfile: + # # path: /etc/pam.d/common-auth + # path: /etc/pam.d/common-account + # # line: 'auth required pam_tally2.so onerr=fail audit silent deny=5 unlock_time=900' + # line: 'account required pam_tally2.so onerr=fail audit silent deny=5 unlock_time=900' + # insertafter: '^# end of pam-auth-update config' + # when: ubtu20cis_5_4_2_pam_tally2_state == "" + + # - name: "AUTOMATED | 5.4.2 | PATCH | Ensure lockout for failed password attempts is configured | Set pam_deny.so and pam_tally.so" + # lineinfile: + # path: /etc/pam.d/common-account + # regexp: "{{ item.regexp }}" + # line: "{{ item.line }}" + # insertafter: '^# end of pam-auth-update config' + # with_items: + # # - { regexp: '^accout.*requisite.*pam_deny.so', line: 'account requisite pam_george.so' } + # - { regexp: '^account.*required.*pam_tally.so', line: 'account required pam_tally.so' } + when: + - ubtu20cis_rule_5_4_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.4.2 + - pamd + - notimplemented + +- name: "AUTOMATED | 5.4.3 | PATCH | Ensure password reuse is limited" + block: + - name: "AUTOMATED | 5.4.3 | AUDIT | Ensure password reuse is limited | Confirm pam_pwhistory.so in common-password" + command: grep 'password.*required.*pam_pwhistory.so' /etc/pam.d/common-password + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_5_4_3_pam_pwhistory_state + + - name: "AUTOMATED | 5.4.3 | PATCH | Ensure password reuse is limited | Set remember value if pam_pwhistory exists" + pamd: + name: common-password + type: password + control: required + module_path: pam_pwhistory.so + module_arguments: 'remember={{ ubtu20cis_pamd_pwhistory_remember }}' + state: args_present + when: ubtu20cis_5_4_3_pam_pwhistory_state.stdout | length > 0 + + - name: "AUTOMATED | 5.4.3 | PATCH | Ensure password reuse is limited | Set remember value if pam_pwhistory does no exist" + lineinfile: + path: /etc/pam.d/common-password + line: 'password required pam_pwhistory.so remember={{ ubtu20cis_pamd_pwhistory_remember }}' + insertafter: '^# end of pam-auth-update config' + when: ubtu20cis_5_4_3_pam_pwhistory_state.stdout | length == 0 + when: + - ubtu20cis_rule_5_4_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.4.3 + - pamd + +- name: "AUTOMATED | 5.4.4 | PATCH | Ensure password hashing algorithm is SHA-512" + block: + - name: "AUTOMATED | 5.4.4 | AUDIT | Ensure password hashing algorithm is SHA-512 | Confirm pam_unix.so" + shell: grep -E '^\s*password\s+(\S+\s+)+pam_unix\.so\s+(\S+\s+)*sha512\s*(\S+\s*)*(\s+#.*)?$' /etc/pam.d/common-password + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_5_4_4_pam_unix_state + + - name: "AUTOMATED | 5.4.4 | PATCH | Ensure password hashing algorithm is SHA-512 | Set hashing if pam_unix.so exists" + pamd: + name: common-password + type: password + control: '[success=1 default=ignore]' + module_path: pam_unix.so + module_arguments: sha512 + state: args_present + when: ubtu20cis_5_4_4_pam_unix_state.stdout | length > 0 + + - name: "AUTOMATED | 5.4.4 | PATCH | Ensure password hashing algorithm is SHA-512 | Set hashing if pam_unix.so does not exist" + lineinfile: + path: /etc/pam.d/common-password + line: 'password [success=1 default=ignore] pam_unix.so sha512' + insertafter: '^# end of pam-auth-update config' + when: ubtu20cis_5_4_4_pam_unix_state.stdout | length == 0 + when: + - ubtu20cis_rule_5_4_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.4.4 + - pamd diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.5.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.5.x.yml new file mode 100644 index 0000000..0a53c08 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.5.x.yml @@ -0,0 +1,266 @@ +--- +- name: "AUTOMATED | 5.5.1.1 | PATCH | Ensure minimum days between password changes is configured" + block: + - name: "AUTOMATED | 5.5.1.1 | PATCH | Ensure minimum days between password changes is configured | Set /etc/login.defs PASS_MIN_DAYS" + lineinfile: + path: /etc/login.defs + regexp: '^PASS_MIN_DAYS|^#PASS_MIN_DAYS' + line: 'PASS_MIN_DAYS {{ ubtu20cis_pass.min_days }}' + + - name: "AUTOMATED | 5.5.1.1 | PATCH | Ensure minimum days between password changes is configured | Set existing users PASS_MIN_DAYS" + command: chage --mindays {{ ubtu20cis_pass.min_days }} {{ item }} + failed_when: false + with_items: + - "{{ ubtu20cis_passwd| selectattr('uid', '>=', 1000) | map(attribute='id') | list }}" + when: ubtu20cis_disruption_high + when: + - ubtu20cis_rule_5_5_1_1 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.1 + - user + - login + +- name: "AUTOMATED | 5.5.1.2 | PATCH | Ensure password expiration is 365 days or less" + block: + - name: "AUTOMATED | 5.5.1.2 | PATCH | Ensure password expiration is 365 days or less | Set /etc/login.defs PASS_MAX_DAYS" + lineinfile: + path: /etc/login.defs + regexp: '^PASS_MAX_DAYS|^#PASS_MAX_DAYS' + line: 'PASS_MAX_DAYS {{ ubtu20cis_pass.max_days }}' + insertafter: '# Password aging controls' + + - name: "AUTOMATED | 5.5.1.2 | PATCH | Ensure password expiration is 365 days or less | Set existing users PASS_MAX_DAYS" + command: chage --maxdays {{ ubtu20cis_pass.max_days }} {{ item }} + failed_when: false + with_items: + - "{{ ubtu20cis_passwd| selectattr('uid', '>=', 1000) | map(attribute='id') | list }}" + when: ubtu20cis_disruption_high + when: + - ubtu20cis_rule_5_5_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.2 + - user + - login + +- name: "AUTOMATED | 5.5.1.3 | PATCH | Ensure password expiration warning days is 7 or more" + block: + - name: "AUTOMATED | 5.5.1.3 | PATCH | Ensure password expiration warning days is 7 or more | Set /etc/login.defs PASS_WARN_AGE" + lineinfile: + path: /etc/login.defs + regexp: '^PASS_WARN_AGE|^#PASS_WARN_AGE' + line: 'PASS_WARN_AGE {{ ubtu20cis_pass.warn_age }}' + + - name: "AUTOMATED | 5.5.1.3 | PATCH | Ensure password expiration warning days is 7 or more | Set existing users PASS_WARN_AGE" + command: chage --warndays {{ ubtu20cis_pass.warn_age }} {{ item }} + failed_when: false + with_items: + - "{{ ubtu20cis_passwd| selectattr('uid', '>=', 1000) | map(attribute='id') | list }}" + when: ubtu20cis_disruption_high + when: + - ubtu20cis_rule_5_5_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.3 + - user + - login + +- name: "AUTOMATED | 5.5.1.4 | PATCH | Ensure inactive password lock is 30 days or less" + block: + - name: "AUTOMATED | 5.5.1.4 | PATCH | Ensure inactive password lock is 30 days or less | Set inactive period for new users" + command: useradd -D -f {{ ubtu20cis_pass.inactive }} + failed_when: false + + - name: "AUTOMATED | 5.5.1.4 | PATCH | Ensure inactive password lock is 30 days or less | Set inactive period for existing users" + command: chage --inactive {{ ubtu20cis_pass.inactive }} {{ item }} + failed_when: false + with_items: + - "{{ ubtu20cis_passwd| selectattr('uid', '>=', 1000) | map(attribute='id') | list }}" + when: ubtu20cis_disruption_high + when: + - ubtu20cis_rule_5_5_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.4 + - user + - login + +- name: "AUTOMATED | 5.5.1.5 | PATCH | Ensure all users last password change date is in the past" + block: + - name: "AUTOMATED | 5.5.1.5 | AUDIT | Ensure all users last password change date is in the past | Get current date in Unix Time" + shell: echo $(($(date --utc --date "$1" +%s)/86400)) + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_5_5_1_5_current_time + + - name: "AUTOMATED | 5.5.1.5 | AUDIT | Ensure all users last password change date is in the past | Get list of users with last changed PW date in future" + shell: "cat /etc/shadow | awk -F: '{if($3>{{ ubtu20cis_5_5_1_5_current_time.stdout }})print$1}'" + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_5_5_1_5_user_list + + - name: "AUTOMATED | 5.5.1.5 | PATCH | Ensure all users last password change date is in the past | Warn about users" + debug: + msg: + - "WARNING!!!!The following accounts have the last PW change date in the future" + - "{{ ubtu20cis_5_5_1_5_user_list.stdout_lines }}" + when: ubtu20cis_5_5_1_5_user_list.stdout | length > 0 + + - name: "AUTOMATED | 5.5.1.5 | PATCH | Ensure all users last password change date is in the past | Lock accounts with furtre PW changed dates" + command: passwd --expire {{ item }} + failed_when: false + with_items: + - "{{ ubtu20cis_5_5_1_5_user_list.stdout_lines }}" + when: + - ubtu20cis_disruption_high + - ubtu20cis_5_5_1_5_user_list.stdout | length > 0 + when: + - ubtu20cis_rule_5_5_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.1.5 + - user + - login + +- name: "AUTOMATED | 5.5.2 | PATCH | Ensure system accounts are secured" + block: + - name: "AUTOMATED | 5.5.2 | PATCH | Ensure system accounts are secured | Set system accounts to login" + user: + name: "{{ item }}" + shell: /sbin/nologin + with_items: + - "{{ ubtu20cis_passwd | selectattr('uid', '<', 1000) | map(attribute='id') | list }}" + when: + - item != "root" + - item != "sync" + - item != "shutdown" + - item != "halt" + + - name: "AUTOMATED | 5.5.2 | PATCH | Ensure system accounts are secured | Lock non-root system accounts" + user: + name: "{{ item }}" + password_lock: true + with_items: + - "{{ ubtu20cis_passwd| selectattr('uid', '<', 1000) | map(attribute='id') | list }}" + when: + - item != "root" + when: + - ubtu20cis_rule_5_5_2 + - ubtu20cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.2 + - user + - system + +- name: "AUTOMATED | 5.5.3 | PATCH | Ensure default group for the root account is GID 0" + block: + - name: "AUTOMATED | 5.5.3 | PATCH | Ensure default group for the root account is GID 0 | Set root group to GUID 0" + group: + name: root + gid: 0 + + - name: "AUTOMATED | 5.5.3 | PATCH | Ensure default group for the root account is GID 0 | Set root user to root group" + user: + name: root + group: root + when: + - ubtu20cis_rule_5_5_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.3 + - user + - system + +- name: "AUTOMATED | 5.5.4 | PATCH | Ensure default user umask is 027 or more restrictive" + block: + - name: "AUTOMATED | 5.5.4 | AUDIT | Ensure default user umask is 027 or more restrictive" + shell: grep -E '^session\s+optional\s+pam_umask.so' /etc/pam.d/common-session + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_5_5_4_umask_pam_status + + - name: "AUTOMATED | 5.5.4 | PATCH | Ensure default user umask is 027 or more restrictive" + lineinfile: + path: /etc/pam.d/common-session + line: 'session optional pam_umask.so' + insertbefore: '^# end of pam-auth-update config' + when: ubtu20cis_5_5_4_umask_pam_status.stdout | length == 0 + + - name: "AUTOMATED | 5.5.4 | PATCH | Ensure default user umask is 027 or more restrictive" + replace: + path: "{{ item }}" + regexp: '(?i)^((?!#)umask)\s+0[0,2,5][0,2,5]' + replace: '\1 027' + with_items: + - /etc/bash.bashrc + - /etc/profile + - /etc/login.defs + + - name: "AUTOMATED | 5.5.4 | PATCH | Ensure default user umask is 027 or more restrictive" + lineinfile: + path: /etc/login.defs + regexp: '^USERGROUPS_ENAB' + line: USERGROUPS_ENAB no + when: + - ubtu20cis_rule_5_5_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.4 + - user + +- name: "AUTOMATED | 5.5.5 | PATCH | Ensure default user shell timeout is 900 seconds or less" + blockinfile: + create: yes + mode: 0644 + dest: "{{ item.dest }}" + state: "{{ item.state }}" + marker: "# {mark} ANSIBLE MANAGED" + block: | + # Set session timeout - CIS ID 5.5.5 + TMOUT={{ ubtu20cis_shell_session_timeout.timeout }} + # only set TMOUT if it isn't set yet to avoid a shell error + : ${TMOUT={{ ubtu20cis_shell_session_timeout.timeout }}} + readonly TMOUT + export TMOUT + with_items: + - { dest: "{{ ubtu20cis_shell_session_timeout.file }}", state: present } + - { dest: /etc/profile, state: "{{ (ubtu20cis_shell_session_timeout.file == '/etc/profile') | ternary('present', 'absent') }}" } + - { dest: /etc/bash.bashrc, state: present } + when: + - ubtu20cis_rule_5_5_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.5.5 + - user diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.6.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.6.yml new file mode 100644 index 0000000..c32a96e --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.6.yml @@ -0,0 +1,25 @@ +--- +- name: "MANUAL | 5.6 | AUDIT | Ensure root login is restricted to system console" + block: + - name: "MANUAL | 5.6 | AUDIT | Ensure root login is restricted to system console | Get list of all terminals" + command: cat /etc/securetty + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_5_6_terminal_list + + - name: "MANUAL | 5.6 | AUDIT | Ensure root login is restricted to system console | Message out list" + debug: + msg: + - "WARNING!!!! Below is the list of consoles with root login access" + - "Please review for any consoles that are not in a physically secure location" + - "{{ ubtu20cis_5_6_terminal_list.stdout_lines }}" + when: + - ubtu20cis_rule_5_6 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_5.6 + - user diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.7.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.7.yml new file mode 100644 index 0000000..a009c12 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/cis_5.7.yml @@ -0,0 +1,38 @@ +--- +- name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted" + block: + - name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted | Check for pam_wheel.so module" + command: grep '^auth.*required.*pam_wheel' /etc/pam.d/su + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_5_7_pam_wheel_status + + - name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted | Create empty sugroup" + group: + name: "{{ ubtu20cis_su_group }}" + + - name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted | Set pam_wheel if exists" + pamd: + name: su + type: auth + control: required + module_path: pam_wheel.so + module_arguments: 'use_uid group={{ ubtu20cis_su_group }}' + when: ubtu20cis_5_7_pam_wheel_status.stdout | length > 0 + + - name: "AUTOMATED | 5.7 | PATCH | Ensure access to the su command is restricted | Set pam_wheel if does not exist" + lineinfile: + path: /etc/pam.d/su + line: 'auth required pam_wheel.so use_uid group={{ ubtu20cis_su_group }}' + create: yes + when: ubtu20cis_5_7_pam_wheel_status.stdout | length == 0 + when: + - ubtu20cis_rule_5_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_5.7 + - user diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/main.yml new file mode 100644 index 0000000..b03c773 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_5/main.yml @@ -0,0 +1,21 @@ +--- +- name: "SECTION | 5.1 | Configure time-based job schedulers" + include: cis_5.1.x.yml + +- name: "SECTION | 5.2 | Configure sudo" + include: cis_5.2.x.yml + +- name: "SECTION | 5.3 | Configure SSH Server" + include: cis_5.3.x.yml + +- name: "SECTION | 5.4.x | User PAM" + include: cis_5.4.x.yml + +- name: "SECTION | 5.5.x | User Accounts and Environment" + include: cis_5.5.x.yml + +- name: "SECTION | 5.6 | Ensure root login is restricted to system console" + include: cis_5.6.yml + +- name: "SECTION | 5.7 | Ensure access to the su command is restricted" + include: cis_5.7.yml diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/cis_6.1.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/cis_6.1.x.yml new file mode 100644 index 0000000..0a2d18e --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/cis_6.1.x.yml @@ -0,0 +1,355 @@ +--- +- name: "MANUAL | 6.1.1 | AUDIT | Audit system file permissions" + block: + - name: "MANUAL | 6.1.1 | AUDIT | Audit system file permissions | Register package list" + command: ls -a /bin/ + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_1_1_packages + + # - name: "NOTSCORED | 6.1.1 | AUDIT | Audit system file permissions | Audit the packages" + # command: dpkg --verify {{ item }} + # changed_when: false + # failed_when: false + # check_mode: false + # with_items: + # - "{{ ubtu18cis_6_1_1_packages.stdout_lines }}" + # register: ubtu18cis_6_1_1_packages_audited + + - name: "MANUAL | 6.1.1 | AUDIT | Audit system file permissions | Message out packages results for review" + debug: + msg: + - "ALERT!!!! Below are the packages that need to be reviewed." + - "You can run dpkg --verify and if nothing is returned the package is installed correctly" + - "{{ ubtu20cis_6_1_1_packages.stdout_lines }}" + when: + - ubtu20cis_rule_6_1_1 + tags: + - level2-server + - level2-workstation + - manual + - audit + - rule_6.1.1 + - permissions + +- name: "AUTOMATED | 6.1.2 | PATCH | Ensure permissions on /etc/passwd are configured" + file: + path: /etc/passwd + owner: root + group: root + mode: 0644 + when: + - ubtu20cis_rule_6_1_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.2 + - permissions + +- name: "AUTOMATED | 6.1.3 | PATCH | Ensure permissions on /etc/passwd- are configured" + file: + path: /etc/passwd- + owner: root + group: root + mode: 0600 + when: + - ubtu20cis_rule_6_1_3 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.3 + - permissions + +- name: "AUTOMATED | 6.1.4 | PATCH | Ensure permissions on /etc/group are configured" + file: + path: /etc/group + owner: root + group: root + mode: 0644 + when: + - ubtu20cis_rule_6_1_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.4 + - permissions + +- name: "AUTOMATED | 6.1.5 | PATCH | Ensure permissions on /etc/group- are configured" + file: + path: /etc/group- + owner: root + group: root + mode: 0644 + when: + - ubtu20cis_rule_6_1_5 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.5 + - permissions + +- name: "AUTOMATED | 6.1.6 | PATCH | Ensure permissions on /etc/shadow are configured" + file: + path: /etc/shadow + owner: root + group: shadow + mode: 0640 + when: + - ubtu20cis_rule_6_1_6 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.6 + - permissions + +- name: "AUTOMATED | 6.1.7 | PATCH | Ensure permissions on /etc/shadow- are configured" + file: + path: /etc/shadow- + owner: root + group: shadow + mode: 0640 + when: + - ubtu20cis_rule_6_1_7 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.7 + - permissions + +- name: "AUTOMATED | 6.1.8 | PATCH | Ensure permissions on /etc/gshadow are configured" + file: + path: /etc/gshadow + owner: root + group: shadow + mode: 0640 + when: + - ubtu20cis_rule_6_1_8 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.8 + - permissions + +- name: "AUTOMATED | 6.1.9 | PATCH | Ensure permissions on /etc/gshadow- are configured" + file: + path: /etc/gshadow- + owner: root + group: shadow + mode: 0640 + when: + - ubtu20cis_rule_6_1_9 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.9 + - permissions + +- name: "AUTOMATED | 6.1.10 | PATCH | Ensure no world writable files exist" + block: + - name: "AUTOMATED | 6.1.10 | AUDIT | Ensure no world writable files exist | Get list of world-writable files" + shell: find {{ item.mount }} -xdev -type f -perm -0002 -not -fstype nfs + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_1_10_wwf + with_items: + - "{{ ansible_mounts }}" + + - name: "AUTOMATED | 6.1.10 | PATCH | Ensure no world writable files exist | Adjust world-writable files if they exist" + file: + path: "{{ item }}" + mode: o-w + with_items: + - "{{ ubtu20cis_6_1_10_wwf.results | map(attribute='stdout_lines') | flatten }}" + when: ubtu20cis_no_world_write_adjust + when: + - ubtu20cis_rule_6_1_10 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.10 + - permissions + +- name: "AUTOMATED | 6.1.11 | PATCH | Ensure no unowned files or directories exist" + block: + - name: "AUTOMATED | 6.1.11 | AUDIT | Ensure no unowned files or directories exist | Get unowned files or directories" + shell: find {{ item.mount }} -xdev -nouser -not -fstype nfs + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_1_11_no_user_items + with_items: + - "{{ ansible_mounts }}" + + - name: "AUTOMATED | 6.1.11 | AUDIT | Ensure no unowned files or directories exist | Flatten no_user_items results for easier use" + set_fact: + ubtu20cis_6_1_11_no_user_items_flatten: "{{ ubtu20cis_6_1_11_no_user_items.results | map(attribute='stdout_lines') | flatten }}" + + - name: "AUTOMATED | 6.1.11 | AUDIT | Ensure no unowned files or directories exist | Alert on unowned files and directories" + debug: + msg: + - "ALERT!!!You have unowned files and are configured to not auto-remediate for this task" + - "Please review the files/directories below and assign an owner" + - "{{ ubtu20cis_6_1_11_no_user_items_flatten }}" + when: + - not ubtu20cis_no_owner_adjust + - ubtu20cis_6_1_11_no_user_items_flatten | length > 0 + + - name: "AUTOMATED | 6.1.11 | PATCH | Ensure no unowned files or directories exist | Set unowned files/directories to configured owner" + file: + path: "{{ item }}" + owner: "{{ ubtu20cis_unowned_owner }}" + with_items: + - "{{ ubtu20cis_6_1_11_no_user_items_flatten }}" + when: + - ubtu20cis_no_owner_adjust + - ubtu20cis_6_1_11_no_user_items_flatten | length > 0 + when: + - ubtu20cis_rule_6_1_11 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.11 + - permissions + +- name: "AUTOMATED | 6.1.12 | PATCH | Ensure no ungrouped files or directories exist" + block: + - name: "AUTOMATED | 6.1.12 | AUDIT | Ensure no ungrouped files or directories exist | Get ungrouped fiels or directories" + shell: find {{ item.mount }} -xdev -nogroup -not -fstype nfs + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_1_12_ungrouped_items + with_items: + - "{{ ansible_mounts }}" + + - name: "AUTOMATED | 6.1.12 | AUDIT | Ensure no ungrouped files or directories exist | Flatten ungrouped_items results for easier use" + set_fact: + ubtu20cis_6_1_12_ungrouped_items_flatten: "{{ ubtu20cis_6_1_12_ungrouped_items.results | map(attribute='stdout_lines') | flatten }}" + + - name: "AUTOMATED | 6.1.12 | AUDIT | Ensure no ungrouped files or directories exist | Alert on ungrouped files and directories" + debug: + msg: + - "ALERT!!!!You have ungrouped files/directories and are configured to not auto-remediate for this task" + - "Please review the files/directories below and assign a group" + - "{{ ubtu20cis_6_1_12_ungrouped_items_flatten }}" + when: + - not ubtu20cis_no_group_adjust + - ubtu20cis_6_1_12_ungrouped_items_flatten | length > 0 + + - name: "AUTOMATED | 6.1.12 | PATCH | Ensure no ungrouped files or directories exist | Set ungrouped files/directories to configured group" + file: + path: "{{ item }}" + group: "{{ ubtu20cis_ungrouped_group }}" + with_items: + - "{{ ubtu20cis_6_1_12_ungrouped_items_flatten }}" + when: + - ubtu20cis_no_group_adjust + - ubtu20cis_6_1_12_ungrouped_items_flatten | length > 0 + when: + - ubtu20cis_rule_6_1_12 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.1.12 + - permissions + +- name: "MANUAL | 6.1.13 | AUDIT | Audit SUID executables" + block: + - name: "MANUAL | 6.1.13 | AUDIT | Audit SUID executables | Find SUID executables" + # shell: df --local -P | awk '{if (NR!=1) print $6}' | xargs -I '{}' find '{}' -xdev -type f -perm -4000 + shell: find {{ item.mount }} -xdev -type f -perm -4000 -not -fstype nfs + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_1_13_suid_executables + with_items: + - "{{ ansible_mounts }}" + + - name: "MANUAL | 6.1.13 | AUDIT | Audit SUID executables | Flatten suid_executables results for easier use" + set_fact: + ubtu20cis_6_1_13_suid_executables_flatten: "{{ ubtu20cis_6_1_13_suid_executables.results | map(attribute='stdout_lines') | flatten }}" + + - name: "MANUAL | 6.1.13 | AUDIT | Audit SUID executables | Alert SUID executables exist" + debug: + msg: + - "ALERT!!!!You have SUID executables" + - "The files are listed below, please confirm the integrity of these binaries" + - "{{ ubtu20cis_6_1_13_suid_executables_flatten }}" + when: + - ubtu20cis_6_1_13_suid_executables_flatten | length > 0 + - not ubtu20cis_suid_adjust + + - name: "MANUAL | 6.1.13 | PATCH | Audit SUID executables | Remove SUID bit" + file: + path: "{{ item }}" + mode: 'u-s' + with_items: + - "{{ ubtu20cis_6_1_13_suid_executables_flatten }}" + when: + - ubtu20cis_suid_adjust + - ubtu20cis_6_1_13_suid_executables_flatten | length > 0 + when: + - ubtu20cis_rule_6_1_13 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_6.1.13 + - permissions + +- name: "MANUAL | 6.1.14 | AUDIT | Audit SGID executables" + block: + - name: "MANUAL |6.1.14 | AUDIT | Audit SGID executables | Find SGID executables" + shell: find {{ item }} -xdev -type f -perm -2000 -not -fstype nfs + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_1_14_sgid_executables + with_items: + - "{{ ansible_mounts }}" + + - name: "MANUAL | 6.1.14 | AUDIT | Audit SGID executables | Flatten sgid_executables results for easier use" + set_fact: + ubtu20cis_6_1_14_sgid_executables_flatten: "{{ ubtu20cis_6_1_14_sgid_executables.results | map(attribute='stdout_lines') | flatten }}" + + - name: "MANUAL | 6.1.14 | AUDIT | Audit SGID executables | Alert SGID executables exist" + debug: + msg: + - "ALERT!!!!You have SGID executables" + - "The files are listed below, please review the integrity of these binaries" + - "{{ ubtu20cis_6_1_14_sgid_executables_flatten }}" + when: ubtu20cis_6_1_14_sgid_executables_flatten | length > 0 + when: + - ubtu20cis_rule_6_1_14 + tags: + - level1-server + - level1-workstation + - manual + - audit + - rule_6.1.14 + - permissions diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/cis_6.2.x.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/cis_6.2.x.yml new file mode 100644 index 0000000..b739244 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/cis_6.2.x.yml @@ -0,0 +1,567 @@ +--- +- name: "AUTOMATED | 6.2.1 | AUDIT | Ensure accounts in /etc/passwd use shadowed passwords" + block: + - name: "AUTOMATED | 6.2.1 | AUDIT | Ensure accounts in /etc/passwd use shadowed passwords | Get users not using shadowed passwords" + command: awk -F':' '($2 != "x" ) { print $1}' /etc/passwd + changed_when: false + failed_when: false + register: ubtu20cis_6_2_1_nonshadowed_users + + - name: "AUTOMATED | 6.2.1 | AUDIT | Ensure accounts in /etc/passwd use shadowed passwords | Alert on findings" + debug: + msg: + - "ALERT! You have users that are not using a shadowed password. Please convert the below accounts to use a shadowed password" + - "{{ ubtu20cis_6_2_1_nonshadowed_users.stdout_lines }}" + when: + - ubtu20cis_6_2_1_nonshadowed_users.stdout | length > 0 + when: + - ubtu20cis_rule_6_2_1 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.1 + - user_accounts + +- name: "AUTOMATED | 6.2.2 | PATCH | Ensure password fields are not empty" + block: + - name: "AUTOMATED | 6.2.2 | AUDIT | Ensure password fields are not empty | Find users with no password" + shell: awk -F":" '($2 == "" ) { print $1 }' /etc/shadow + changed_when: no + check_mode: false + register: ubtu20cis_6_2_2_empty_password_acct + + - name: "AUTOMATED | 6.2.2 | PATCH | Ensure password fields are not empty | Lock users with empty password" + user: + name: "{{ item }}" + password_lock: yes + with_items: + - "{{ ubtu20cis_6_2_2_empty_password_acct.stdout_lines }}" + when: ubtu20cis_6_2_2_empty_password_acct.stdout | length > 0 + when: + - ubtu20cis_rule_6_2_2 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.2 + - user + - permissions + +- name: "AUTOMATED | 6.2.3 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group" + block: + - name: "AUTOMATED | 6.2.3 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Check /etc/passwd entries" + shell: pwck -r | grep 'no group' | awk '{ gsub("[:\47]",""); print $2}' + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_2_3_passwd_gid_check + + - name: "AUTOMATED | 6.2.3 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Print message that all groups match between passwd and group files" + debug: + msg: "Good News! There are no users that have non-existent GUIDs (Groups)" + when: ubtu20cis_6_2_3_passwd_gid_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.3 | AUDIT | Ensure all groups in /etc/passwd exist in /etc/group | Print warning about users with invalid GIDs missing GID entries in /etc/group" + debug: + msg: "WARNING!!!! The following users have non-existent GIDs (Groups): {{ ubtu20cis_6_2_3_passwd_gid_check.stdout_lines | join (', ') }}" + when: ubtu20cis_6_2_3_passwd_gid_check.stdout | length > 0 + when: + - ubtu20cis_rule_6_2_3 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.3 + - groups + +- name: "AUTOMATED | 6.2.4 | PATCH | Ensure all users' home directories exist" + block: + - name: capture audit task for missing homedirs + block: &u20s_homedir_audit + - name: "AUTOMATED | 6.2.4 | PATCH | Ensure all users' home directories exist | Find users missing home directories" + shell: pwck -r | grep -vF -e "'/nonexistent'" | grep -P {{ ld_regex | quote }} + check_mode: false + register: ubtu20cis_users_missing_home + changed_when: ubtu20cis_6_2_4_audit | length > 0 + # failed_when: 0: success, 1: no grep match, 2: pwck found something + failed_when: ubtu20cis_users_missing_home.rc not in [0,1,2] + + ### NOTE: due to https://github.com/ansible/ansible/issues/24862 This is a shell command, and is quite frankly less than ideal. + - name: "AUTOMATED | 6.2.4 | PATCH | Ensure all users' home directories exist| Creates home directories" + command: "mkhomedir_helper {{ item }}" + # check_mode: "{{ ubtu20cis_disruptive_check_mode }}" + with_items: "{{ ubtu20cis_6_2_4_audit | map(attribute='id') | list }}" + when: + - ubtu20cis_users_missing_home is changed + - ubtu20cis_disruption_high + + ### NOTE: Now we need to address that SELINUX will not let mkhomedir_helper create home directories for UUID < 500, so the ftp user will still show up in a pwck. Not sure this is needed, I need to confirm if that user is removed in an earlier task. + ### ^ Likely doesn't matter as 6.2.7 defines "local interactive users" as those w/ uid 1000-4999 + - name: replay audit task + block: *u20s_homedir_audit + + # CAUTION: debug loops don't show changed since 2.4: + # Fix: https://github.com/ansible/ansible/pull/59958 + - name: "AUTOMATED | 6.2.4 | PATCH | Ensure all users' home directories exist | Alert about correcting owner and group" + debug: msg="You will need to mkdir -p {{ item }} and chown properly to the correct owner and group." + with_items: "{{ ubtu20cis_6_2_4_audit | map(attribute='dir') | list }}" + changed_when: ubtu20cis_audit_complex + when: + - ubtu20cis_users_missing_home is changed + vars: + ld_regex: >- + ^user '(?P.*)': directory '(?P.*)' does not exist$ + ld_users: "{{ ubtu20cis_users_missing_home.stdout_lines | map('regex_replace', ld_regex, '\\g') | list }}" + ubtu20cis_6_2_4_audit: "{{ ubtu20cis_passwd | selectattr('uid', '>=', 1000) | selectattr('id', 'in', ld_users) | list }}" + when: + - ubtu20cis_rule_6_2_4 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.4 + - user + +- name: "AUTOMATED | 6.2.5 | PATCH | Ensure users own their home directories" + file: + path: "{{ item.dir }}" + owner: "{{ item.id }}" + state: directory + with_items: + - "{{ ubtu20cis_passwd }}" + loop_control: + label: "{{ ubtu20cis_passwd_label }}" + when: + - ubtu20cis_rule_6_2_5 + - item.uid >= 1000 + - item.dir != '/nonexistent' + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.5 + - user + +- name: "AUTOMATED | 6.2.6 | PATCH | Ensure users' home directories permissions are 750 or more restrictive" + block: + - name: "AUTOMATED | 6.2.6 | AUDIT | Ensure users' home directories permissions are 750 or more restrictive | Get home directories" + stat: + path: "{{ item }}" + with_items: "{{ ubtu20cis_passwd | selectattr('uid', '>=', 1000) | selectattr('uid', '!=', 65534) | map(attribute='dir') | list }}" + check_mode: false + register: ubtu20cis_6_2_6_audit + + - name: "AUTOMATED | 6.2.6 | AUDIT | Ensure users' home directories permissions are 750 or more restrictive | Find home directories more 750" + command: find -H {{ item.0 | quote }} -not -type l -perm /027 + register: ubtu20cis_6_2_6_patch_audit + changed_when: ubtu20cis_6_2_6_patch_audit.stdout | length > 0 + check_mode: false + when: + - item.1.exists + with_together: + - "{{ ubtu20cis_6_2_6_audit.results | map(attribute='item') | list }}" + - "{{ ubtu20cis_6_2_6_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + - name: "AUTOMATED | 6.2.6 | PATCH | Ensure users' home directories permissions are 750 or more restrictive | Set home perms" + file: + path: "{{ item.0 }}" + recurse: yes + mode: a-st,g-w,o-rwx + register: ubtu20cis_6_2_6_patch + when: + - ubtu20cis_disruption_high + - item.1.exists + with_together: + - "{{ ubtu20cis_6_2_6_audit.results | map(attribute='item') | list }}" + - "{{ ubtu20cis_6_2_6_audit.results | map(attribute='stat') | list }}" + loop_control: + label: "{{ item.0 }}" + + # set default ACLs so the homedir has an effective umask of 0027 + - name: "AUTOMATED | 6.2.6 | PATCH | Ensure users' home directories permissions are 750 or more restrictive | Set ACL's" + acl: + path: "{{ item.0 }}" + default: yes + state: present + recursive: yes + etype: "{{ item.1.etype }}" + permissions: "{{ item.1.mode }}" + when: not ubtu20cis_system_is_container + with_nested: + - "{{ (ansible_check_mode | ternary(ubtu20cis_6_2_6_patch_audit, ubtu20cis_6_2_6_patch)).results | + rejectattr('skipped', 'defined') | map(attribute='item') | map('first') | list }}" + - + - etype: group + mode: rx + - etype: other + mode: '0' + when: + - ubtu20cis_rule_6_2_6 + - ubtu20cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.6 + - user + +- name: "AUTOMATED | 6.2.7 | PATCH | Ensure users' dot files are not group or world writable" + block: + - name: "AUTOMATED | 6.2.7 | AUDIT | Ensure users' dot files are not group or world-writable | Check for files" + shell: find /home/ -name "\.*" -perm /g+w,o+w + changed_when: no + failed_when: no + check_mode: false + register: ubtu20cis_6_2_7_audit + + - name: "AUTOMATED | 6.2.7 | AUDIT | Ensure users' dot files are not group or world-writable | Alert on files found" + debug: + msg: "Good news! We have not found any group or world-writable dot files on your sytem" + failed_when: false + changed_when: false + when: + - ubtu20cis_6_2_7_audit.stdout | length == 0 + + - name: "AUTOMATED | 6.2.7 | PATCH | Ensure users' dot files are not group or world-writable | Changes files if configured" + file: + path: '{{ item }}' + mode: go-w + with_items: "{{ ubtu20cis_6_2_7_audit.stdout_lines }}" + when: + - ubtu20cis_6_2_7_audit.stdout | length > 0 + - ubtu20cis_dotperm_ansibleManaged + when: + - ubtu20cis_rule_6_2_7 + - ubtu20cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.7 + - user + +- name: "AUTOMATED | 6.2.8 | PATCH | Ensure no users have .netrc files" + file: + dest: "~{{ item }}/.netrc" + state: absent + with_items: + - "{{ ubtu20cis_users.stdout_lines }}" + when: + - ubtu20cis_rule_6_2_8 + - ubtu20cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.8 + - user + +- name: "AUTOMATED | 6.2.9 | PATCH | Ensure no users have .forward files" + file: + dest: "~{{ item }}/.forward" + state: absent + with_items: + - "{{ ubtu20cis_users.stdout_lines }}" + when: + - ubtu20cis_rule_6_2_9 + - ubtu20cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.9 + - user + +- name: "AUTOMATED | 6.2.10 | PATCH | Ensure no users have .rhosts files" + file: + dest: "~{{ item }}/.rhosts" + state: absent + with_items: + - "{{ ubtu20cis_users.stdout_lines }}" + when: + - ubtu20cis_rule_6_2_10 + - ubtu20cis_disruption_high + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.10 + - user + +- name: "AUTOMATED | 6.2.11 | PATCH | Ensure root is the only UID 0 account" + block: + - name: "AUTOMATED | 6.2.11 | AUDIT | Ensure root is the only UID 0 account | Get non-root users with UID of 0" + shell: awk -F":" '($3 == 0 && $1 != \"root\") {i++;print $1 }' /etc/passwd + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_2_11_uid_0_notroot + + - name: "AUTOMATED | 6.2.11 | PATCH | Ensure root is the only UID 0 account | Lock UID 0 users" + user: + name: "{{ item }}" + password_lock: yes + with_items: + - "{{ ubtu20cis_6_2_11_uid_0_notroot.stdout_lines }}" + when: + - ubtu20cis_disruption_high + - ubtu20cis_6_2_11_uid_0_notroot.stdout | length > 0 + + - name: "AUTOMATED | 6.2.11 | AUDIT | Ensure root is the only UID 0 account | Alert about accounts disruption high" + debug: + msg: + - "ALERT!!!! You have non-root users with a UID of 0 and ubtu18cis_disruption_high enabled" + - "This means the following accounts were password locked and will need to have the UID's manually adjusted" + - "{{ ubtu20cis_6_2_11_uid_0_notroot.stdout_lines }}" + when: + - ubtu20cis_disruption_high + - ubtu20cis_6_2_11_uid_0_notroot.stdout | length > 0 + + - name: "AUTOMATED | 6.2.11 | AUDIT | Ensure root is the only UID 0 account | Alert about accounts disruption low" + debug: + msg: + - "ALERT!!!! You have non-root users with a UID of 0 and ubtu18cis_disruption_high disabled" + - "This means no action was taken, you will need to have the UID's of the users below manually adjusted" + - "{{ ubtu20cis_6_2_11_uid_0_notroot.stdout_lines }}" + when: + - not ubtu20cis_disruption_high + - ubtu20cis_6_2_11_uid_0_notroot.stdout | length > 0 + when: + - ubtu20cis_rule_6_2_11 + tags: + - level1-server + - level1-workstation + - automated + - scored + - rule_6.2.11 + - user + - root + +- name: "AUTOMATED | 6.2.12 | PATCH | Ensure root PATH Integrity" + command: /bin/true + changed_when: false + failed_when: false + check_mode: false + # block: + # - name: "AUTOMATED | 6.2.12 | PATCH | Ensure root PATH Integrity | Determine empty value" + # shell: 'echo $PATH | grep ::' + # changed_when: False + # failed_when: ubtu20cis_6_2_12_path_colon.rc == 0 + # check_mode: false + # register: ubtu20cis_6_2_12_path_colon + + # - name: "AUTOMATED | 6.2.12 | PATCH | Ensure root PATH Integrity | Determine colon end" + # shell: 'echo $PATH | grep :$' + # changed_when: False + # failed_when: ubtu20cis_6_2_12_path_colon_end.rc == 0 + # check_mode: false + # register: ubtu20cis_6_2_12_path_colon_end + + # - name: "AUTOMATED | 6.2.12 | PATCH | Ensure root PATH Integrity | Determine working dir" + # shell: echo "$PATH" + # changed_when: False + # failed_when: '"." in ubtu20cis_6_2_12_working_dir.stdout_lines' + # check_mode: false + # register: ubtu20cis_6_2_12_working_dir + # - debug: var=ubtu20cis_6_2_12_working_dir + + # - name: "AUTOMATED | 6.2.12 | PATCH | Ensure root PATH Integrity | Check paths" + # stat: + # path: "{{ item }}" + # check_mode: false + # register: ubtu20cis_6_2_12_path_stat + # with_items: + # - "{{ ubtu20cis_6_2_12_working_dir.stdout.split(':') }}" + + # - debug: var=ubtu20cis_6_2_12_path_stat + + # - name: "AUTOMATED | 6.2.12 | PATCH | Ensure root PATH Integrity | Alert on empty value, colon end, and no working dir" + # debug: + # msg: + # - "The following paths have no working directory: {{ ubtu20cis_6_2_12_path_stat.results | selectattr('stat.exists','==','false') | map(attribute='item') | list }}" + + # # - name: "AUTOMATED | 6.2.12 | PATCH | Ensure root PATH Integrity | Set permissions" + # # file: + # # path: "{{ item }}" + # # owner: root + # # mode: 'o-w,g-w' + # # follow: yes + # # state: directory + # # with_items: + # # - "{{ ubtu18cis_6_2_12_path_stat | selectattr('exists','==','true') | map(attribute='path') }}" + when: + - ubtu20cis_rule_6_2_12 + tags: + - level1-server + - level1-workstation + - automated + - patch + - rule_6.2.12 + - user + - root + - notimplemented + +- name: "AUTOMATED | 6.2.13 | AUDIT | Ensure no duplicate UIDs exist" + block: + - name: "AUTOMATED | 6.2.13 | AUDIT | Ensure no duplicate UIDs exist | Check for duplicate UIDs" + shell: "pwck -r | awk -F: '{if ($3 in uid) print $1 ; else uid[$3]}' /etc/passwd" + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_2_13_user_uid_check + + - name: "AUTOMATED | 6.2.13 | AUDIT | Ensure no duplicate UIDs exist | Print message that no duplicate UIDs exist" + debug: + msg: "Good News! There are no duplicate UID's in the system" + when: ubtu20cis_6_2_13_user_uid_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.13 | AUDIT | Ensure no duplicate UIDs exist | Print warning about users with duplicate UIDs" + debug: + msg: "Warning!!!! The following users have UIDs that are duplicates: {{ ubtu20cis_6_2_13_user_uid_check.stdout_lines }}" + when: ubtu20cis_6_2_13_user_uid_check.stdout | length > 0 + when: + - ubtu20cis_rule_6_2_13 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.13 + - user + +- name: "AUTOMATED | 6.2.14 | AUDIT | Ensure no duplicate GIDs exist" + block: + - name: "AUTOMATED | 6.2.14 | AUDIT | Ensure no duplicate GIDs exist | Check for duplicate GIDs" + shell: "pwck -r | awk -F: '{if ($3 in users) print $1 ; else users[$3]}' /etc/group" + changed_when: no + failed_when: no + check_mode: false + register: ubtu20cis_6_2_14_user_user_check + + - name: "AUTOMATED | 6.2.14 | AUDIT | Ensure no duplicate GIDs exist | Print message that no duplicate GID's exist" + debug: + msg: "Good News! There are no duplicate GIDs in the system" + when: ubtu20cis_6_2_14_user_user_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.14 | AUDIT | Ensure no duplicate GIDs exist | Print warning about users with duplicate GIDs" + debug: + msg: "Warning: The following groups have duplicate GIDs: {{ ubtu20cis_6_2_14_user_user_check.stdout_lines }}" + when: ubtu20cis_6_2_14_user_user_check.stdout | length > 0 + when: + - ubtu20cis_rule_6_2_14 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.14 + - groups + +- name: "AUTOMATED | 6.2.15 | AUDIT | Ensure no duplicate user names exist" + block: + - name: "AUTOMATED | 6.2.15 | AUDIT | Ensure no duplicate user names exist | Check for duplicate User Names" + shell: "pwck -r | awk -F: '{if ($1 in users) print $1 ; else users[$1]}' /etc/passwd" + changed_when: no + failed_when: no + check_mode: false + register: ubtu20cis_6_2_15_user_username_check + + - name: "AUTOMATED | 6.2.15 | AUDIT | Ensure no duplicate user names exist | Print message that no duplicate user names exist" + debug: + msg: "Good News! There are no duplicate user names in the system" + when: ubtu20cis_6_2_15_user_username_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.15 | AUDIT | Ensure no duplicate user names exist | Print warning about users with duplicate User Names" + debug: + msg: "Warning: The following user names are duplicates: {{ ubtu20cis_6_2_15_user_username_check.stdout_lines }}" + when: ubtu20cis_6_2_15_user_username_check.stdout | length > 0 + when: + - ubtu20cis_rule_6_2_15 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.15 + - user + +- name: "AUTOMATED | 6.2.16 | AUDIT | Ensure no duplicate group names exist" + block: + - name: "AUTOMATED | 6.2.16 | AUDIT | Ensure no duplicate group names exist | Check for duplicate group names" + shell: 'getent passwd | cut -d: -f1 | sort -n | uniq -d' + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_2_16_group_group_check + + - name: "AUTOMATED | 6.2.16 | AUDIT | Ensure no duplicate group names exist | Print message that no duplicate groups exist" + debug: + msg: "Good News! There are no duplicate group names in the system" + when: ubtu20cis_6_2_16_group_group_check.stdout | length == 0 + + - name: "AUTOMATED | 6.2.16 | AUDIT | Ensure no duplicate group names exist | Print warning about users with duplicate group names" + debug: + msg: "Warning: The following group names are duplicates: {{ ubtu20cis_6_2_16_group_group_check.stdout_lines }}" + when: ubtu20cis_6_2_16_group_group_check.stdout | length > 0 + when: + - ubtu20cis_rule_6_2_16 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.16 + - groups + +- name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty" + block: + - name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty | Get Shadow GID" + shell: grep ^shadow /etc/group | cut -f3 -d":" + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_2_17_shadow_gid + + - name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty | List of users with Shadow GID" + shell: awk -F":" '($4 == "{{ ubtu20cis_6_2_17_shadow_gid.stdout }}") { print }' /etc/passwd | cut -f1 -d":" + changed_when: false + failed_when: false + check_mode: false + register: ubtu20cis_6_2_17_users_shadow_gid + + - name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty | Message on no users" + debug: + msg: "Good News! There are no users with the Shadow GID on your system" + when: ubtu20cis_6_2_17_users_shadow_gid.stdout | length == 0 + + - name: "AUTOMATED | 6.2.17 | AUDIT | Ensure shadow group is empty | Message on users with Shadow GID" + debug: + msg: + - "WARNING!!!! There are users that are in the Shadow group" + - "To conform to CIS standards no users should be in this group" + - "Please move the users below into another group" + - "{{ ubtu20cis_6_2_17_users_shadow_gid.stdout_lines }}" + when: ubtu20cis_6_2_17_users_shadow_gid.stdout | length > 0 + when: + - ubtu20cis_rule_6_2_17 + tags: + - level1-server + - level1-workstation + - automated + - audit + - rule_6.2.17 + - groups + - user diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/main.yml new file mode 100644 index 0000000..e3a3fbe --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tasks/section_6/main.yml @@ -0,0 +1,6 @@ +--- +- name: "SECTION | 6.1 | System File Permissions" + include: cis_6.1.x.yml + +- name: "SECTION | 6.2 | User and Group Settings" + include: cis_6.2.x.yml diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/.DS_Store b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..1358e07096806d4c9763e41d63dd0c741b9f45b4 GIT binary patch literal 6148 zcmeHK!EO^V5FIBeI6;U=MWVg=!YxP}QCcoYOGpnKkSZ;L1E6L%8#OC!)@gRrh^DFb z(w~68;m8m4cRIGGC83m4g;X>njo*0e8E2m@I|BfsGmYB-O#qOn1}k-J{vc-D=ryU3 z#|#Raqloi<8fl$km*p-nVD@eeLYP7f39Rg26$Vg1Kbqy^DAf^8{27hYBriPgTvV#H zwR`uaY{@GMMc*AHUIA5hmGSVuO=- zfKxtyOtM(dI(nMLxy=)}0V(UUzTe!O&)eSiR!hBn;VoKfzO&=C)YkLu#iA~sJZ-;v z+Z%m7o}VmEzd7#3o(2%zk752zZ%3nW`G&^uNV-uZqV)EC(+w_?c>Z_ vt5AEWM$Ej@;Z+D4_9@1><|*DlwSsbch}l{v%*%;D#Carwm*GY720{ literal 0 HcmV?d00001 diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/ansible_vars_goss.yml.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/ansible_vars_goss.yml.j2 new file mode 100644 index 0000000..78880a2 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/ansible_vars_goss.yml.j2 @@ -0,0 +1,500 @@ +audit_run: ansible # This is forced to wrapper by running the run_audit wrapper script (placeholder only if run via ansible) +## metadata for Audit benchmark +ubuntu20cis_benchmark: +- "type: CIS" +- "version: '1.1.0'" +- "os: Ubuntu 20.04" +- "epoch: {{ ansible_date_time.epoch }}" +- "hostname: {{ ansible_hostname }}" +- "automation_group: {% if group_names|length == 0 %}[ungrouped]"{% else %}{% for group in group_names %}[{{ group }}{% if not loop.last %},{% else %}]"{% endif %}{% endfor %}{% endif +%} +- "fullname: Ubuntu 20.04.3 LTS" +- "machine_uuid: {{ ansible_product_uuid }}" +- "os_locale: {{ ansible_date_time.tz }}" +- "host_os_version: {{ ansible_distribution_version }}" +ubuntu20cis_os_distribution: {{ ansible_distribution | lower }} + +# Some audit tests may need to scan every filesystem or have an impact on a system +# these may need be scheduled to minimise impact also ability to set a timeout if taking too long +run_heavy_tests: {{ audit_run_heavy_tests }} +timeout_ms: {{ audit_cmd_timeout }} + + +ubuntu20cis_section1: true +ubuntu20cis_section2: true +ubuntu20cis_section3: true +ubuntu20cis_section4: true +ubuntu20cis_section5: true +ubuntu20cis_section6: true + +ubuntu20cis_level_1: true +ubuntu20cis_level_2: true + +ubuntu20cis_apparmor_disable: true + +# to enable rules that may have IO impact on a system e.g. full filesystem scans or CPU heavy +run_heavy_tests: true + +# True is BIOS based system else set to false +ubuntu20_legacy_boot: true + +ubuntu20_set_boot_pass: true + +# These variables correspond with the CIS rule IDs or paragraph numbers defined in +# the CIS benchmark documents. +# PLEASE NOTE: These work in coordination with the section # group variables and tags. +# You must enable an entire section in order for the variables below to take effect. +# Section 1 rules +ubuntu20cis_rule_1_1_1_1: {{ ubtu20cis_rule_1_1_1_1 }} +ubuntu20cis_rule_1_1_1_2: {{ ubtu20cis_rule_1_1_1_2 }} +ubuntu20cis_rule_1_1_1_3: {{ ubtu20cis_rule_1_1_1_3 }} +ubuntu20cis_rule_1_1_1_4: {{ ubtu20cis_rule_1_1_1_4 }} +ubuntu20cis_rule_1_1_1_5: {{ ubtu20cis_rule_1_1_1_5 }} +ubuntu20cis_rule_1_1_1_6: {{ ubtu20cis_rule_1_1_1_6 }} +ubuntu20cis_rule_1_1_1_7: {{ ubtu20cis_rule_1_1_1_7 }} +ubuntu20cis_rule_1_1_2: {{ ubtu20cis_rule_1_1_2 }} +ubuntu20cis_rule_1_1_3: {{ ubtu20cis_rule_1_1_3 }} +ubuntu20cis_rule_1_1_4: {{ ubtu20cis_rule_1_1_4 }} +ubuntu20cis_rule_1_1_5: {{ ubtu20cis_rule_1_1_5 }} +ubuntu20cis_rule_1_1_6: {{ ubtu20cis_rule_1_1_6 }} +ubuntu20cis_rule_1_1_7: {{ ubtu20cis_rule_1_1_7 }} +ubuntu20cis_rule_1_1_8: {{ ubtu20cis_rule_1_1_8 }} +ubuntu20cis_rule_1_1_9: {{ ubtu20cis_rule_1_1_9 }} +ubuntu20cis_rule_1_1_10: {{ ubtu20cis_rule_1_1_10 }} +ubuntu20cis_rule_1_1_11: {{ ubtu20cis_rule_1_1_11 }} +ubuntu20cis_rule_1_1_12: {{ ubtu20cis_rule_1_1_12 }} +ubuntu20cis_rule_1_1_13: {{ ubtu20cis_rule_1_1_13 }} +ubuntu20cis_rule_1_1_14: {{ ubtu20cis_rule_1_1_14 }} +ubuntu20cis_rule_1_1_15: {{ ubtu20cis_rule_1_1_15 }} +ubuntu20cis_rule_1_1_16: {{ ubtu20cis_rule_1_1_16 }} +ubuntu20cis_rule_1_1_17: {{ ubtu20cis_rule_1_1_17 }} +ubuntu20cis_rule_1_1_18: {{ ubtu20cis_rule_1_1_18 }} +ubuntu20cis_rule_1_1_19: {{ ubtu20cis_rule_1_1_19 }} +ubuntu20cis_rule_1_1_20: {{ ubtu20cis_rule_1_1_20 }} +ubuntu20cis_rule_1_1_21: {{ ubtu20cis_rule_1_1_21 }} +ubuntu20cis_rule_1_1_22: {{ ubtu20cis_rule_1_1_22 }} +ubuntu20cis_rule_1_1_23: {{ ubtu20cis_rule_1_1_23 }} +ubuntu20cis_rule_1_1_24: {{ ubtu20cis_rule_1_1_24 }} +ubuntu20cis_rule_1_2_1: {{ ubtu20cis_rule_1_2_1 }} +ubuntu20cis_rule_1_2_2: {{ ubtu20cis_rule_1_2_2 }} +ubuntu20cis_rule_1_3_1: {{ ubtu20cis_rule_1_3_1 }} +ubuntu20cis_rule_1_3_2: {{ ubtu20cis_rule_1_3_2 }}= +ubuntu20cis_rule_1_4_1: {{ ubtu20cis_rule_1_4_1 }} +ubuntu20cis_rule_1_4_2: {{ ubtu20cis_rule_1_4_2 }} +ubuntu20cis_rule_1_4_3: {{ ubtu20cis_rule_1_4_3 }} +ubuntu20cis_rule_1_4_4: {{ ubtu20cis_rule_1_4_4 }} +ubuntu20cis_rule_1_5_1: {{ ubtu20cis_rule_1_5_1 }} +ubuntu20cis_rule_1_5_2: {{ ubtu20cis_rule_1_5_2 }} +ubuntu20cis_rule_1_5_3: {{ ubtu20cis_rule_1_5_3 }} +ubuntu20cis_rule_1_5_4: {{ ubtu20cis_rule_1_5_4 }} +ubuntu20cis_rule_1_6_1_1: {{ ubtu20cis_rule_1_6_1_1 }} +ubuntu20cis_rule_1_6_1_2: {{ ubtu20cis_rule_1_6_1_2 }} +ubuntu20cis_rule_1_6_1_3: {{ ubtu20cis_rule_1_6_1_3 }} +ubuntu20cis_rule_1_6_1_4: {{ ubtu20cis_rule_1_6_1_4 }} +ubuntu20cis_rule_1_7_1: {{ ubtu20cis_rule_1_7_1 }} +ubuntu20cis_rule_1_7_2: {{ ubtu20cis_rule_1_7_2 }} +ubuntu20cis_rule_1_7_3: {{ ubtu20cis_rule_1_7_3 }} +ubuntu20cis_rule_1_7_4: {{ ubtu20cis_rule_1_7_4 }} +ubuntu20cis_rule_1_7_5: {{ ubtu20cis_rule_1_7_5 }} +ubuntu20cis_rule_1_7_6: {{ ubtu20cis_rule_1_7_6 }} +ubuntu20cis_rule_1_8_1: {{ ubtu20cis_rule_1_8_1 }} +ubuntu20cis_rule_1_8_2: {{ ubtu20cis_rule_1_8_2 }} +ubuntu20cis_rule_1_8_3: {{ ubtu20cis_rule_1_8_3 }} +ubuntu20cis_rule_1_8_4: {{ ubtu20cis_rule_1_8_4 }} +ubuntu20cis_rule_1_9: {{ ubtu20cis_rule_1_9 }} + +# section 2 rules + +ubuntu20cis_rule_2_1_1_1: {{ ubtu20cis_rule_2_1_1_1 }} +ubuntu20cis_rule_2_1_1_2: {{ ubtu20cis_rule_2_1_1_2 }} +ubuntu20cis_rule_2_1_1_3: {{ ubtu20cis_rule_2_1_1_3 }} +ubuntu20cis_rule_2_1_1_4: {{ ubtu20cis_rule_2_1_1_4 }} +ubuntu20cis_rule_2_1_2: {{ ubtu20cis_rule_2_1_2 }} +ubuntu20cis_rule_2_1_3: {{ ubtu20cis_rule_2_1_3 }} +ubuntu20cis_rule_2_1_4: {{ ubtu20cis_rule_2_1_4 }} +ubuntu20cis_rule_2_1_5: {{ ubtu20cis_rule_2_1_5 }} +ubuntu20cis_rule_2_1_6: {{ ubtu20cis_rule_2_1_6 }} +ubuntu20cis_rule_2_1_7: {{ ubtu20cis_rule_2_1_7 }} +ubuntu20cis_rule_2_1_8: {{ ubtu20cis_rule_2_1_8 }} +ubuntu20cis_rule_2_1_9: {{ ubtu20cis_rule_2_1_9 }} +ubuntu20cis_rule_2_1_10: {{ ubtu20cis_rule_2_1_10 }} +ubuntu20cis_rule_2_1_11: {{ ubtu20cis_rule_2_1_11 }} +ubuntu20cis_rule_2_1_12: {{ ubtu20cis_rule_2_1_12 }} +ubuntu20cis_rule_2_1_13: {{ ubtu20cis_rule_2_1_13 }} +ubuntu20cis_rule_2_1_14: {{ ubtu20cis_rule_2_1_14 }} +ubuntu20cis_rule_2_1_15: {{ ubtu20cis_rule_2_1_15 }} +ubuntu20cis_rule_2_1_16: {{ ubtu20cis_rule_2_1_16 }} +ubuntu20cis_rule_2_1_17: {{ ubtu20cis_rule_2_1_17 }} +ubuntu20cis_rule_2_2_1: {{ ubtu20cis_rule_2_2_1 }} +ubuntu20cis_rule_2_2_2: {{ ubtu20cis_rule_2_2_2 }} +ubuntu20cis_rule_2_2_3: {{ ubtu20cis_rule_2_2_3 }} +ubuntu20cis_rule_2_2_4: {{ ubtu20cis_rule_2_2_4 }} +ubuntu20cis_rule_2_2_5: {{ ubtu20cis_rule_2_2_5 }} +ubuntu20cis_rule_2_2_6: {{ ubtu20cis_rule_2_2_6 }} +ubuntu20cis_rule_2_3: {{ ubtu20cis_rule_2_3 }} + +# Section 3 rules +ubuntu20cis_rule_3_1_1: {{ ubtu20cis_rule_3_1_1 }} +ubuntu20cis_rule_3_1_2: {{ ubtu20cis_rule_3_1_2 }} +ubuntu20cis_rule_3_2_1: {{ ubtu20cis_rule_3_2_1 }} +ubuntu20cis_rule_3_2_2: {{ ubtu20cis_rule_3_2_2 }} +ubuntu20cis_rule_3_3_1: {{ ubtu20cis_rule_3_3_1 }} +ubuntu20cis_rule_3_3_2: {{ ubtu20cis_rule_3_3_2 }} +ubuntu20cis_rule_3_3_3: {{ ubtu20cis_rule_3_3_3 }} +ubuntu20cis_rule_3_3_4: {{ ubtu20cis_rule_3_3_4 }} +ubuntu20cis_rule_3_3_5: {{ ubtu20cis_rule_3_3_5 }} +ubuntu20cis_rule_3_3_6: {{ ubtu20cis_rule_3_3_6 }} +ubuntu20cis_rule_3_3_7: {{ ubtu20cis_rule_3_3_7 }} +ubuntu20cis_rule_3_3_8: {{ ubtu20cis_rule_3_3_8 }} +ubuntu20cis_rule_3_3_9: {{ ubtu20cis_rule_3_3_9 }} +ubuntu20cis_rule_3_4_1: {{ ubtu20cis_rule_3_4_1 }} +ubuntu20cis_rule_3_4_2: {{ ubtu20cis_rule_3_4_2 }} +ubuntu20cis_rule_3_4_3: {{ ubtu20cis_rule_3_4_3 }} +ubuntu20cis_rule_3_4_4: {{ ubtu20cis_rule_3_4_4 }} +# UFW +ubuntu20cis_rule_3_5_1_1: {{ ubtu20cis_rule_3_5_1_1 }} +ubuntu20cis_rule_3_5_1_2: {{ ubtu20cis_rule_3_5_1_2 }} +ubuntu20cis_rule_3_5_1_3: {{ ubtu20cis_rule_3_5_1_3 }} +ubuntu20cis_rule_3_5_1_4: {{ ubtu20cis_rule_3_5_1_4 }} +ubuntu20cis_rule_3_5_1_5: {{ ubtu20cis_rule_3_5_1_5 }} +ubuntu20cis_rule_3_5_1_6: {{ ubtu20cis_rule_3_5_1_6 }} +ubuntu20cis_rule_3_5_1_7: {{ ubtu20cis_rule_3_5_1_7 }} +# NFTables +ubuntu20cis_rule_3_5_2_1: {{ ubtu20cis_rule_3_5_2_1 }} +ubuntu20cis_rule_3_5_2_2: {{ ubtu20cis_rule_3_5_2_2 }} +ubuntu20cis_rule_3_5_2_3: {{ ubtu20cis_rule_3_5_2_3 }} +ubuntu20cis_rule_3_5_2_4: {{ ubtu20cis_rule_3_5_2_4 }} +ubuntu20cis_rule_3_5_2_5: {{ ubtu20cis_rule_3_5_2_5 }} +ubuntu20cis_rule_3_5_2_6: {{ ubtu20cis_rule_3_5_2_6 }} +ubuntu20cis_rule_3_5_2_7: {{ ubtu20cis_rule_3_5_2_7 }} +ubuntu20cis_rule_3_5_2_8: {{ ubtu20cis_rule_3_5_2_8 }} +ubuntu20cis_rule_3_5_2_9: {{ ubtu20cis_rule_3_5_2_9 }} +ubuntu20cis_rule_3_5_2_10: {{ ubtu20cis_rule_3_5_2_10 }} +# IPTables +ubuntu20cis_rule_3_5_3_1_1: {{ ubtu20cis_rule_3_5_3_1_1 }} +ubuntu20cis_rule_3_5_3_1_2: {{ ubtu20cis_rule_3_5_3_1_2 }} +ubuntu20cis_rule_3_5_3_1_3: {{ ubtu20cis_rule_3_5_3_1_3 }} +ubuntu20cis_rule_3_5_3_2_1: {{ ubtu20cis_rule_3_5_3_2_1 }} +ubuntu20cis_rule_3_5_3_2_2: {{ ubtu20cis_rule_3_5_3_2_2 }} +ubuntu20cis_rule_3_5_3_2_3: {{ ubtu20cis_rule_3_5_3_2_3 }} +ubuntu20cis_rule_3_5_3_2_4: {{ ubtu20cis_rule_3_5_3_2_4 }} +ubuntu20cis_rule_3_5_3_3_1: {{ ubtu20cis_rule_3_5_3_3_1 }} +ubuntu20cis_rule_3_5_3_3_2: {{ ubtu20cis_rule_3_5_3_3_2 }} +ubuntu20cis_rule_3_5_3_3_3: {{ ubtu20cis_rule_3_5_3_3_3 }} +ubuntu20cis_rule_3_5_3_3_4: {{ ubtu20cis_rule_3_5_3_3_4 }} + +# Section 4 rules +ubuntu20cis_rule_4_1_1_1: {{ ubtu20cis_rule_4_1_1_1 }} +ubuntu20cis_rule_4_1_1_2: {{ ubtu20cis_rule_4_1_1_2 }} +ubuntu20cis_rule_4_1_1_3: {{ ubtu20cis_rule_4_1_1_3 }} +ubuntu20cis_rule_4_1_1_4: {{ ubtu20cis_rule_4_1_1_4 }} +ubuntu20cis_rule_4_1_2_1: {{ ubtu20cis_rule_4_1_2_1 }} +ubuntu20cis_rule_4_1_2_2: {{ ubtu20cis_rule_4_1_2_2 }} +ubuntu20cis_rule_4_1_2_3: {{ ubtu20cis_rule_4_1_2_3 }} +ubuntu20cis_rule_4_1_3: {{ ubtu20cis_rule_4_1_3 }} +ubuntu20cis_rule_4_1_4: {{ ubtu20cis_rule_4_1_4 }} +ubuntu20cis_rule_4_1_5: {{ ubtu20cis_rule_4_1_5 }} +ubuntu20cis_rule_4_1_6: {{ ubtu20cis_rule_4_1_6 }} +ubuntu20cis_rule_4_1_7: {{ ubtu20cis_rule_4_1_7 }} +ubuntu20cis_rule_4_1_8: {{ ubtu20cis_rule_4_1_8 }} +ubuntu20cis_rule_4_1_9: {{ ubtu20cis_rule_4_1_9 }} +ubuntu20cis_rule_4_1_10: {{ ubtu20cis_rule_4_1_10 }} +ubuntu20cis_rule_4_1_11: {{ ubtu20cis_rule_4_1_11 }} +ubuntu20cis_rule_4_1_12: {{ ubtu20cis_rule_4_1_12 }} +ubuntu20cis_rule_4_1_13: {{ ubtu20cis_rule_4_1_13}} +ubuntu20cis_rule_4_1_14: {{ ubtu20cis_rule_4_1_14 }} +ubuntu20cis_rule_4_1_15: {{ ubtu20cis_rule_4_1_15 }} +ubuntu20cis_rule_4_1_16: {{ ubtu20cis_rule_4_1_16 }} +ubuntu20cis_rule_4_1_17: {{ ubtu20cis_rule_4_1_17 }} +ubuntu20cis_rule_4_2_1_1: {{ ubtu20cis_rule_4_2_1_1 }} +ubuntu20cis_rule_4_2_1_2: {{ ubtu20cis_rule_4_2_1_2 }} +ubuntu20cis_rule_4_2_1_3: {{ ubtu20cis_rule_4_2_1_3 }} +ubuntu20cis_rule_4_2_1_4: {{ ubtu20cis_rule_4_2_1_4 }} +ubuntu20cis_rule_4_2_1_5: {{ ubtu20cis_rule_4_2_1_5 }} +ubuntu20cis_rule_4_2_1_6: {{ ubtu20cis_rule_4_2_1_6 }} +ubuntu20cis_rule_4_2_2_1: {{ ubtu20cis_rule_4_2_2_1 }} +ubuntu20cis_rule_4_2_2_2: {{ ubtu20cis_rule_4_2_2_2 }} +ubuntu20cis_rule_4_2_2_3: {{ ubtu20cis_rule_4_2_2_3 }} +ubuntu20cis_rule_4_2_3: {{ ubtu20cis_rule_4_2_3 }} +ubuntu20cis_rule_4_3: {{ ubtu20cis_rule_4_3 }} +ubuntu20cis_rule_4_4: {{ ubtu20cis_rule_4_4 }} + +# Section 5 +ubuntu20cis_rule_5_1_1: {{ ubtu20cis_rule_5_1_1 }} +ubuntu20cis_rule_5_1_2: {{ ubtu20cis_rule_5_1_2 }} +ubuntu20cis_rule_5_1_3: {{ ubtu20cis_rule_5_1_3 }} +ubuntu20cis_rule_5_1_4: {{ ubtu20cis_rule_5_1_4 }} +ubuntu20cis_rule_5_1_5: {{ ubtu20cis_rule_5_1_5 }} +ubuntu20cis_rule_5_1_6: {{ ubtu20cis_rule_5_1_6 }} +ubuntu20cis_rule_5_1_7: {{ ubtu20cis_rule_5_1_7 }} +ubuntu20cis_rule_5_1_8: {{ ubtu20cis_rule_5_1_8 }} +ubuntu20cis_rule_5_1_9: {{ ubtu20cis_rule_5_1_9 }} +ubuntu20cis_rule_5_2_1: {{ ubtu20cis_rule_5_2_1 }} +ubuntu20cis_rule_5_2_2: {{ ubtu20cis_rule_5_2_2 }} +ubuntu20cis_rule_5_2_3: {{ ubtu20cis_rule_5_2_3 }} +ubuntu20cis_rule_5_3_1: {{ ubtu20cis_rule_5_3_1 }} +ubuntu20cis_rule_5_3_2: {{ ubtu20cis_rule_5_3_2 }} +ubuntu20cis_rule_5_3_3: {{ ubtu20cis_rule_5_3_3 }} +ubuntu20cis_rule_5_3_4: {{ ubtu20cis_rule_5_3_4 }} +ubuntu20cis_rule_5_3_5: {{ ubtu20cis_rule_5_3_5 }} +ubuntu20cis_rule_5_3_6: {{ ubtu20cis_rule_5_3_6 }} +ubuntu20cis_rule_5_3_7: {{ ubtu20cis_rule_5_3_7 }} +ubuntu20cis_rule_5_3_8: {{ ubtu20cis_rule_5_3_8 }} +ubuntu20cis_rule_5_3_9: {{ ubtu20cis_rule_5_3_9 }} +ubuntu20cis_rule_5_3_10: {{ ubtu20cis_rule_5_3_10 }} +ubuntu20cis_rule_5_3_11: {{ ubtu20cis_rule_5_3_11 }} +ubuntu20cis_rule_5_3_12: {{ ubtu20cis_rule_5_3_12 }} +ubuntu20cis_rule_5_3_13: {{ ubtu20cis_rule_5_3_13 }} +ubuntu20cis_rule_5_3_14: {{ ubtu20cis_rule_5_3_14 }} +ubuntu20cis_rule_5_3_15: {{ ubtu20cis_rule_5_3_15 }} +ubuntu20cis_rule_5_3_16: {{ ubtu20cis_rule_5_3_16 }} +ubuntu20cis_rule_5_3_17: {{ ubtu20cis_rule_5_3_17 }} +ubuntu20cis_rule_5_3_18: {{ ubtu20cis_rule_5_3_18 }} +ubuntu20cis_rule_5_3_19: {{ ubtu20cis_rule_5_3_19 }} +ubuntu20cis_rule_5_3_20: {{ ubtu20cis_rule_5_3_20 }} +ubuntu20cis_rule_5_3_21: {{ ubtu20cis_rule_5_3_21 }} +ubuntu20cis_rule_5_3_22: {{ ubtu20cis_rule_5_3_22 }} +ubuntu20cis_rule_5_4_1: {{ ubtu20cis_rule_5_4_1 }} +ubuntu20cis_rule_5_4_2: {{ ubtu20cis_rule_5_4_2 }} +ubuntu20cis_rule_5_4_3: {{ ubtu20cis_rule_5_4_3 }} +ubuntu20cis_rule_5_4_4: {{ ubtu20cis_rule_5_4_4 }} +ubuntu20cis_rule_5_5_1_1: {{ ubtu20cis_rule_5_5_1_1 }} +ubuntu20cis_rule_5_5_1_2: {{ ubtu20cis_rule_5_5_1_2 }} +ubuntu20cis_rule_5_5_1_3: {{ ubtu20cis_rule_5_5_1_3 }} +ubuntu20cis_rule_5_5_1_4: {{ ubtu20cis_rule_5_5_1_4 }} +ubuntu20cis_rule_5_5_1_5: {{ ubtu20cis_rule_5_5_1_5 }} +ubuntu20cis_rule_5_5_2: {{ ubtu20cis_rule_5_5_2 }} +ubuntu20cis_rule_5_5_3: {{ ubtu20cis_rule_5_5_3 }} +ubuntu20cis_rule_5_5_4: {{ ubtu20cis_rule_5_5_4 }} +ubuntu20cis_rule_5_5_5: {{ ubtu20cis_rule_5_5_5 }} +ubuntu20cis_rule_5_6: {{ ubtu20cis_rule_5_6 }} +ubuntu20cis_rule_5_7: {{ ubtu20cis_rule_5_7 }} + +# Section 6 +ubuntu20cis_rule_6_1_1: {{ ubtu20cis_rule_6_1_1 }} +ubuntu20cis_rule_6_1_2: {{ ubtu20cis_rule_6_1_2 }} +ubuntu20cis_rule_6_1_3: {{ ubtu20cis_rule_6_1_3 }} +ubuntu20cis_rule_6_1_4: {{ ubtu20cis_rule_6_1_4 }} +ubuntu20cis_rule_6_1_5: {{ ubtu20cis_rule_6_1_5 }} +ubuntu20cis_rule_6_1_6: {{ ubtu20cis_rule_6_1_6 }} +ubuntu20cis_rule_6_1_7: {{ ubtu20cis_rule_6_1_7 }} +ubuntu20cis_rule_6_1_8: {{ ubtu20cis_rule_6_1_8 }} +ubuntu20cis_rule_6_1_9: {{ ubtu20cis_rule_6_1_9 }} +ubuntu20cis_rule_6_1_10: {{ ubtu20cis_rule_6_1_10 }} +ubuntu20cis_rule_6_1_11: {{ ubtu20cis_rule_6_1_11 }} +ubuntu20cis_rule_6_1_12: {{ ubtu20cis_rule_6_1_12 }} +ubuntu20cis_rule_6_1_13: {{ ubtu20cis_rule_6_1_13 }} +ubuntu20cis_rule_6_1_14: {{ ubtu20cis_rule_6_1_14 }} + +ubuntu20cis_rule_6_2_1: {{ ubtu20cis_rule_6_2_1 }} +ubuntu20cis_rule_6_2_2: {{ ubtu20cis_rule_6_2_2 }} +ubuntu20cis_rule_6_2_3: {{ ubtu20cis_rule_6_2_3 }} +ubuntu20cis_rule_6_2_4: {{ ubtu20cis_rule_6_2_4 }} +ubuntu20cis_rule_6_2_5: {{ ubtu20cis_rule_6_2_5 }} +ubuntu20cis_rule_6_2_6: {{ ubtu20cis_rule_6_2_6 }} +ubuntu20cis_rule_6_2_7: {{ ubtu20cis_rule_6_2_7 }} +ubuntu20cis_rule_6_2_8: {{ ubtu20cis_rule_6_2_8 }} +ubuntu20cis_rule_6_2_9: {{ ubtu20cis_rule_6_2_9 }} +ubuntu20cis_rule_6_2_10: {{ ubtu20cis_rule_6_2_10 }} +ubuntu20cis_rule_6_2_11: {{ ubtu20cis_rule_6_2_11 }} +ubuntu20cis_rule_6_2_12: {{ ubtu20cis_rule_6_2_12 }} +ubuntu20cis_rule_6_2_13: {{ ubtu20cis_rule_6_2_13 }} +ubuntu20cis_rule_6_2_14: {{ ubtu20cis_rule_6_2_14 }} +ubuntu20cis_rule_6_2_15: {{ ubtu20cis_rule_6_2_15 }} +ubuntu20cis_rule_6_2_16: {{ ubtu20cis_rule_6_2_16 }} +ubuntu20cis_rule_6_2_17: {{ ubtu20cis_rule_6_2_17 }} + +# AIDE +ubuntu20cis_config_aide: true + +# aide setup via - cron, timer +ubuntu20cis_aide_scan: cron + +# AIDE cron settings +ubuntu20_aide_cron: + cron_user: {{ ubtu20cis_aide_cron.cron_user }} + cron_file: {{ ubtu20cis_aide_cron.cron_file }} + aide_job: {{ ubtu20cis_aide_cron.aide_job }} + aide_minute: {{ ubtu20cis_aide_cron.aide_minute }} + aide_hour: {{ ubtu20cis_aide_cron.aide_hour }} + aide_day: '{{ ubtu20cis_aide_cron.aide_day }}' + aide_month: '{{ ubtu20cis_aide_cron.aide_month }}' + aide_weekday: '{{ ubtu20cis_aide_cron.aide_weekday }}' + +# 1.1 +ubuntu20cis_allow_autofs: {{ ubtu20cis_allow_autofs }} + +# 1.4 +ubuntu20cis_grub_conf_file: /boot/grub/grub.cfg +ubuntu20cis_grub_username: root +ubuntu20cis_grub_hash: blah +# 1.5.1 Bootloader password +ubuntu20cis_bootloader_password: {{ ubtu20cis_root_pw }} + +# 1.6 - Only have apparmor enforcing +ubuntu20cis_apparmor_enforce_only: false + +# Warning Banner Content (issue, issue.net, motd) +ubuntu20_warning_banner: {{ ubtu20cis_warning_banner }} +# End Banner + +# Section 2 +# Time sync - can be timesync or chriny or ntp +ubuntu20cis_time_service: {{ ubtu20cis_time_sync_tool }} +ubuntu20cis_ntp_servers: {{ ubtu20cis_ntp_server_list }} +ubuntu20cis_ntp_fallback: {{ ubtu20cis_ntp_fallback_server_list }} +ubuntu20cis_ntp_root_distance: + +# Whether or not to run tasks related to auditing/patching the desktop environment +ubuntu20cis_gui: {{ ubtu20cis_desktop_required }} + +# Service configuration booleans set true to keep service +ubuntu20cis_avahi_server: {{ ubtu20cis_avahi_server }} +ubuntu20cis_cups_server: {{ ubtu20cis_cups_server }} +ubuntu20cis_nfs_server: {{ ubtu20cis_nfs_server }} +ubuntu20cis_dhcp_server: {{ ubtu20cis_dhcp_server }} +ubuntu20cis_ldap_server: {{ ubtu20cis_ldap_server }} +ubuntu20cis_dns_server: {{ ubtu20cis_dns_server }} +ubuntu20cis_vsftpd_server: {{ ubtu20cis_vsftpd_server }} +ubuntu20cis_httpd_server: {{ ubtu20cis_httpd_server }} +ubuntu20cis_is_mail_server: false +ubuntu20cis_dovecot_server: {{ ubtu20cis_dovecot_server }} +ubuntu20cis_samba_server: {{ ubtu20cis_smb_server }} +ubuntu20cis_squid_server: {{ ubtu20cis_squid_server }} +ubuntu20cis_snmp_server: {{ ubtu20cis_snmp_server }} + +# Mail Server config +{% if ubtu20_cis_mail_transfer_agent is defined %} +ubuntu20cis_mailserver: {{ ubtu20_cis_mail_transfer_agent }} +{% else %} +ubuntu20cis_mailserver: Not_defined +{% endif %} +ubuntu20_exim_conf: + - dc_eximconfig_configtype='local' + - dc_local_interfaces='127.0.0.1 ; ::1' + - dc_readhost='' + - dc_relay_domains='' + - dc_minimaldns='false' + - dc_relay_nets='' + - dc_smarthost='' + - dc_use_split_config='false' + - dc_hide_mailname='' + - dc_mailname_in_oh='true' + - dc_localdelivery='mail_spool' + + +ubuntu20cis_rsyncd_server: {{ ubtu20cis_rsync_server }} +ubuntu20cis_nis_server: {{ ubtu20cis_nis_server }} + +ubuntu20cis_xwindows_required: false + +# 2.2 client services +ubuntu20cis_rsh_required: {{ ubtu20cis_rsh_required }} +ubuntu20cis_talk_required: {{ ubtu20cis_talk_required }} +ubuntu20cis_telnet_required: {{ ubtu20cis_telnet_required }} +ubuntu20cis_ldap_clients_required: {{ ubtu20cis_ldap_clients_required }} +ubuntu20cis_rpc_required: {{ ubtu20cis_rpc_required }} + + +# Section 3 +# IPv6 required +ubuntu20cis_ipv6_required: {{ ubtu20cis_ipv6_required }} + +# System network parameters (host only OR host and router) +ubuntu20cis_is_router: false + + +ubuntu20cis_firewall: {{ ubtu20cis_firewall_package }} + +ubuntu20_default_firewall_zone: public +ubuntu20_firewall_interface: + - ['ens224'] + - ['ens192'] +ubuntu20_firewall_services: + - ssh + - dhcpv6-client + +### Section 4 +## auditd settings +ubuntu20cis_auditd: + space_left_action: email + action_mail_acct: root + admin_space_left_action: {{ ubtu20cis_auditd.admin_space_left_action }} + max_log_file_action: {{ ubtu20cis_auditd.max_log_file_action }} + auditd_backlog_limit: {{ ubtu20cis_audit_back_log_limit }} + +## syslog +ubuntu20cis_is_syslog_server: {{ ubtu20cis_system_is_log_server }} +### Section 5 +ubuntu20cis_sshd_limited: false +# Note the following to understand precedence and layout +ubuntu20cis_sshd_access: + - AllowUser + - AllowGroup + - DenyUser + - DenyGroup + +ubuntu20cis_ssh_strong_ciphers: Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr +ubuntu20cis_ssh_weak_ciphers: + - 3des-cbc + - aes128-cbc + - aes192-cbc + - aes256-cbc + - arcfour + - arcfour128 + - arcfour256 + - blowfish-cbc + - cast128-cbc + - rijndael-cbc@lysator.liu.se + +ubuntu20cis_ssh_strong_macs: MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512,hmac-sha2-256 +ubuntu20cis_ssh_weak_macs: + - hmac-md5 + - hmac-md5-96 + - hmac-ripemd160 + - hmac-sha1 + - hmac-sha1-96 + - umac-64@openssh.com + - umac-128@openssh.com + - hmac-md5-etm@openssh.com + - hmac-md5-96-etm@openssh.com + - hmac-ripemd160-etm@openssh.com + - hmac-sha1-etm@openssh.com + - hmac-sha1-96-etm@openssh.com + - umac-64-etm@openssh.com + - umac-128-etm@openssh.com + +ubuntu20cis_ssh_strong_kex: KexAlgorithms curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,ecdh-sha2-nistp521,ecdh-sha2-nistp384,ecdh-sha2-nistp256,diffie-hellman-group-exchange-sha256 +ubuntu20cis_ssh_weak_kex: + - diffie-hellman-group1-sha1 + - diffie-hellman-group14-sha1 + - diffie-hellman-group-exchange-sha1 + + +ubuntu20cis_ssh_aliveinterval: 300 +ubuntu20cis_ssh_countmax: 3 +## PAM +ubuntu20cis_pam_password: + minlen: "14" + minclass: "4" + +ubuntu20cis_pam_passwd_retry: "3" + +# choose one of below +ubuntu20cis_pwhistory_so: "14" +ubuntu20cis_unix_so: false +ubuntu20cis_passwd_remember: {{ ubtu20cis_pamd_pwhistory_remember }} + +# logins.def password settings +ubuntu20cis_pass: + max_days: {{ ubtu20cis_pass.max_days }} + min_days: {{ ubtu20cis_pass.min_days }} + warn_age: {{ ubtu20cis_pass.warn_age }} + +# set sugroup if differs from wheel +ubuntu20cis_sugroup: {{ ubtu20cis_su_group }} + +# sugroup users list +ubuntu20_sugroup_users: "root" + +# var log location variable +ubuntu20_varlog_location: {{ ubtu20cis_sudo_logfile }} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_10_access.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_10_access.rules.j2 new file mode 100644 index 0000000..b28cd45 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_10_access.rules.j2 @@ -0,0 +1,6 @@ +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access +{% endif %} +-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_11_privileged.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_11_privileged.rules.j2 new file mode 100644 index 0000000..a005b3c --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_11_privileged.rules.j2 @@ -0,0 +1,3 @@ +{% for proc in priv_procs.stdout_lines -%} +-a always,exit -F path={{ proc }} -F perm=x -F auid>=1000 -F auid!=4294967295 -k privileged +{% endfor %} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_12_audit.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_12_audit.rules.j2 new file mode 100644 index 0000000..fa95efb --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_12_audit.rules.j2 @@ -0,0 +1,4 @@ +-a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k mounts +{% endif %} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_13_delete.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_13_delete.rules.j2 new file mode 100644 index 0000000..7a97b22 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_13_delete.rules.j2 @@ -0,0 +1,4 @@ +-a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete +{% endif %} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_14_scope.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_14_scope.rules.j2 new file mode 100644 index 0000000..0ae21fd --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_14_scope.rules.j2 @@ -0,0 +1,2 @@ +-w /etc/sudoers -p wa -k scope +-w /etc/sudoers.d/ -p wa -k scope diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_15_actions.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_15_actions.rules.j2 new file mode 100644 index 0000000..a38638e --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_15_actions.rules.j2 @@ -0,0 +1,4 @@ +-a always,exit -F arch=b32 -C euid!=uid -F euid=0 -Fauid>=1000 -F auid!=4294967295 -S execve -k actions +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -C euid!=uid -F euid=0 -Fauid>=1000 -F auid!=4294967295 -S execve -k actions +{% endif %} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_16_modules.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_16_modules.rules.j2 new file mode 100644 index 0000000..bc1813b --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_16_modules.rules.j2 @@ -0,0 +1,9 @@ +-w /sbin/insmod -p x -k modules +-w /sbin/rmmod -p x -k modules +-w /sbin/modprobe -p x -k modules +{% if ansible_architecture != 'x86_64' -%} +-a always,exit -F arch=b32 -S init_module -S delete_module -k modules +{% endif %} +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S init_module -S delete_module -k modules +{% endif %} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_17_99finalize.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_17_99finalize.rules.j2 new file mode 100644 index 0000000..bc95eba --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_17_99finalize.rules.j2 @@ -0,0 +1 @@ +-e 2 diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_3_timechange.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_3_timechange.rules.j2 new file mode 100644 index 0000000..7da9f95 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_3_timechange.rules.j2 @@ -0,0 +1,7 @@ +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change +-a always,exit -F arch=b64 -S clock_settime -k time-change +{% endif %} +-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change +-a always,exit -F arch=b32 -S clock_settime -k time-change +-w /etc/localtime -p wa -k time-change diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_4_identity.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_4_identity.rules.j2 new file mode 100644 index 0000000..358f999 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_4_identity.rules.j2 @@ -0,0 +1,5 @@ +-w /etc/group -p wa -k identity +-w /etc/passwd -p wa -k identity +-w /etc/gshadow -p wa -k identity +-w /etc/shadow -p wa -k identity +-w /etc/security/opasswd -p wa -k identity diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_5_systemlocale.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_5_systemlocale.rules.j2 new file mode 100644 index 0000000..f56b572 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_5_systemlocale.rules.j2 @@ -0,0 +1,8 @@ +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale +{% endif %} +-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale +-w /etc/issue -p wa -k system-locale +-w /etc/issue.net -p wa -k system-locale +-w /etc/hosts -p wa -k system-locale +-w /etc/network -p wa -k system-locale diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_6_macpolicy.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_6_macpolicy.rules.j2 new file mode 100644 index 0000000..10354ae --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_6_macpolicy.rules.j2 @@ -0,0 +1,2 @@ +-w /etc/apparmor/ -p wa -k MAC-policy +-w /etc/apparmor.d/ -p wa -k MAC-policy diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_7_logins.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_7_logins.rules.j2 new file mode 100644 index 0000000..b38f823 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_7_logins.rules.j2 @@ -0,0 +1,3 @@ +-w /var/log/faillog -p wa -k logins +-w /var/log/lastlog -p wa -k logins +-w /var/log/tallylog -p wa -k logins diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_8_session.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_8_session.rules.j2 new file mode 100644 index 0000000..51d7254 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_8_session.rules.j2 @@ -0,0 +1,3 @@ +-w /var/run/utmp -p wa -k session +-w /var/log/wtmp -p wa -k logins +-w /var/log/btmp -p wa -k logins diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_9_permmod.rules.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_9_permmod.rules.j2 new file mode 100644 index 0000000..a397494 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/audit/ubtu20cis_4_1_9_permmod.rules.j2 @@ -0,0 +1,8 @@ +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod +{% endif %} +-a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/chrony.conf.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/chrony.conf.j2 new file mode 100644 index 0000000..119165d --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/chrony.conf.j2 @@ -0,0 +1,93 @@ +# Welcome to the chrony configuration file. See chrony.conf(5) for more +# information about usuable directives. + +# This will use (up to): +# - 4 sources from ntp.ubuntu.com which some are ipv6 enabled +# - 2 sources from 2.ubuntu.pool.ntp.org which is ipv6 enabled as well +# - 1 source from [01].ubuntu.pool.ntp.org each (ipv4 only atm) +# This means by default, up to 6 dual-stack and up to 2 additional IPv4-only +# sources will be used. +# At the same time it retains some protection against one of the entries being +# down (compare to just using one of the lines). See (LP: #1754358) for the +# discussion. +# +# About using servers from the NTP Pool Project in general see (LP: #104525). +# Approved by Ubuntu Technical Board on 2011-02-08. +# See http://www.pool.ntp.org/join.html for more information. + +{% for server in ubtu20cis_time_synchronization_servers -%} +server {{ server }} {{ ubtu20cis_chrony_server_options }} +{% endfor %} + +# This directive specify the location of the file containing ID/key pairs for +# NTP authentication. +keyfile /etc/chrony/chrony.keys + +# Set runtime command key. Note that if you change the key (not the +# password) to anything other than 1 you will need to edit +# /etc/ppp/ip-up.d/chrony, /etc/ppp/ip-down.d/chrony, /etc/init.d/chrony +# and /etc/cron.weekly/chrony as these scripts use it to get the password. + +#commandkey 1 + +# This directive specify the file into which chronyd will store the rate +# information. +driftfile /var/lib/chrony/chrony.drift + +# Uncomment the following line to turn logging on. +#log tracking measurements statistics + +# Log files location. +logdir /var/log/chrony + +# Stop bad estimates upsetting machine clock. +maxupdateskew 100.0 + +# This directive enables kernel synchronisation (every 11 minutes) of the +# real-time clock. Note that it can’t be used along with the 'rtcfile' directive. +rtcsync + +# Dump measurements when daemon exits. +dumponexit + +# Specify directory for dumping measurements. + +dumpdir /var/lib/chrony + +# Let computer be a server when it is unsynchronised. + +local stratum 10 + +# Allow computers on the unrouted nets to use the server. + +#allow 10/8 +#allow 192.168/16 +#allow 172.16/12 + +# This directive forces `chronyd' to send a message to syslog if it +# makes a system clock adjustment larger than a threshold value in seconds. + +logchange 0.5 + +# This directive defines an email address to which mail should be sent +# if chronyd applies a correction exceeding a particular threshold to the +# system clock. + +# mailonchange root@localhost 0.5 + +# This directive tells chrony to regulate the real-time clock and tells it +# Where to store related data. It may not work on some newer motherboards +# that use the HPET real-time clock. It requires enhanced real-time +# support in the kernel. I've commented it out because with certain +# combinations of motherboard and kernel it is reported to cause lockups. + +# rtcfile /var/lib/chrony/chrony.rtc + +# If the last line of this file reads 'rtconutc' chrony will assume that +# the CMOS clock is on UTC (GMT). If it reads '# rtconutc' or is absent +# chrony will assume local time. The line (if any) was written by the +# chrony postinst based on what it found in /etc/default/rcS. You may +# change it if necessary. +rtconutc + +user {{ ubtu20cis_chrony_user }} \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/issue.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/issue.j2 new file mode 100644 index 0000000..cad01c7 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/issue.j2 @@ -0,0 +1 @@ +{{ ubtu20cis_warning_banner }} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/issue.net.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/issue.net.j2 new file mode 100644 index 0000000..cad01c7 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/issue.net.j2 @@ -0,0 +1 @@ +{{ ubtu20cis_warning_banner }} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/motd.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/motd.j2 new file mode 100644 index 0000000..cad01c7 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/etc/motd.j2 @@ -0,0 +1 @@ +{{ ubtu20cis_warning_banner }} diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/templates/ntp.conf.j2 b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/ntp.conf.j2 new file mode 100644 index 0000000..1a8bbec --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/templates/ntp.conf.j2 @@ -0,0 +1,69 @@ +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +driftfile /var/lib/ntp/ntp.drift + +# Leap seconds definition provided by tzdata +leapfile /usr/share/zoneinfo/leap-seconds.list + +# Enable this if you want statistics to be logged. +#statsdir /var/log/ntpstats/ + +statistics loopstats peerstats clockstats +filegen loopstats file loopstats type day enable +filegen peerstats file peerstats type day enable +filegen clockstats file clockstats type day enable + +# Specify one or more NTP servers. + +# Use servers from the NTP Pool Project. Approved by Ubuntu Technical Board +# on 2011-02-08 (LP: #104525). See http://www.pool.ntp.org/join.html for +# more information. +{% for server in ubtu20cis_time_synchronization_servers -%} +server {{ server }} {{ ubtu20cis_ntp_server_options }} +{% endfor %} + +# Use Ubuntu's ntp server as a fallback. +pool ntp.ubuntu.com + +# Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for +# details. The web page +# might also be helpful. +# +# Note that "restrict" applies to both servers and clients, so a configuration +# that might be intended to block requests from certain clients could also end +# up blocking replies from your own upstream servers. + +# By default, exchange time with everybody, but don't allow configuration. +# The two lines below are required to meet CIS requirements +restrict -4 default kod nomodify notrap nopeer noquery +restrict -6 default kod nomodify notrap nopeer noquery + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 + +# Needed for adding pool entries +restrict source notrap nomodify noquery + +# Clients from this (example!) subnet have unlimited access, but only if +# cryptographically authenticated. +#restrict 192.168.123.0 mask 255.255.255.0 notrust + + +# If you want to provide time to your local subnet, change the next line. +# (Again, the address is an example only.) +#broadcast 192.168.123.255 + +# If you want to listen to time broadcasts on your local subnet, de-comment the +# next lines. Please do this only if you trust everybody on the network! +#disable auth +#broadcastclient + +#Changes recquired to use pps synchonisation as explained in documentation: +#http://www.ntp.org/ntpfaq/NTP-s-config-adv.htm#AEN3918 + +#server 127.127.8.1 mode 135 prefer # Meinberg GPS167 with PPS +#fudge 127.127.8.1 time1 0.0042 # relative to PPS for my hardware + +#server 127.127.22.1 # ATOM(PPS) +#fudge 127.127.22.1 flag3 1 # enable PPS API \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tests/inventory b/Linux/ansible-lockdown/UBUNTU20-CIS/tests/inventory new file mode 100644 index 0000000..878877b --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tests/inventory @@ -0,0 +1,2 @@ +localhost + diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/tests/test.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/tests/test.yml new file mode 100644 index 0000000..ff8b7d9 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - UBUNTU20-CIS \ No newline at end of file diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/vagrant b/Linux/ansible-lockdown/UBUNTU20-CIS/vagrant new file mode 100644 index 0000000..044e471 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/vagrant @@ -0,0 +1,13 @@ +vagrant: + hosts: + ubuntu20: + ansible_ssh_port: 2277 + vars: + ansible_host: 127.0.0.1 + ansible_user: vagrant + ansible_password: vagrant + ansible_become_pass: vagrant + setup_audit: true + run_audit: true + audit_content: local + audit_git_version: devel diff --git a/Linux/ansible-lockdown/UBUNTU20-CIS/vars/main.yml b/Linux/ansible-lockdown/UBUNTU20-CIS/vars/main.yml new file mode 100644 index 0000000..8cac2a5 --- /dev/null +++ b/Linux/ansible-lockdown/UBUNTU20-CIS/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for UBUNTU20-CIS \ No newline at end of file diff --git a/Linux/ansible-lockdown/clone-all.sh b/Linux/ansible-lockdown/clone-all.sh old mode 100644 new mode 100755 index f05f5f7..4cc01ca --- a/Linux/ansible-lockdown/clone-all.sh +++ b/Linux/ansible-lockdown/clone-all.sh @@ -10,4 +10,4 @@ git clone https://github.com/ansible-lockdown/APACHE-2.4-CIS git clone https://github.com/ansible-lockdown/POSTGRES-12-CIS # remove all git repository files from each dir -find . -type d | grep -i "\.git" | xargs rm -rf +find . -name ".git" | xargs rm -rf