From 2c421611ae6d20978a219524b798247d8a8a03c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20D=C3=A9siles?= <1536672+cdesiles@users.noreply.github.com> Date: Sun, 2 Nov 2025 21:18:15 +0100 Subject: [PATCH] chore: ansible-lint --- .ansible-lint | 3 + README.md | 19 +++- ansible.cfg | 1 + inventory/host_vars/example.yml | 1 + playbook.yml | 20 +++-- playbooks/example.yml | 1 + requirements.yml | 1 + roles/archlinux/README.md | 7 ++ roles/archlinux/defaults/main.yml | 1 + roles/archlinux/tasks/locales.yml | 12 +-- roles/archlinux/tasks/main.yml | 4 +- roles/archlinux/tasks/pacman.yml | 16 ++-- roles/archlinux/tasks/paru.yml | 124 +++++++++++++------------- roles/archlinux/tasks/yay.yml | 20 ++--- roles/disks/defaults/main.yml | 1 + roles/disks/tasks/main.yml | 4 +- roles/disks/tasks/partitioning.yml | 14 +-- roles/disks/tasks/trim-ssd.yml | 30 +++---- roles/docker/defaults/main.yml | 1 + roles/docker/tasks/main.yml | 38 ++++---- roles/docker/tasks/uninstall.yml | 12 +-- roles/fail2ban/defaults/main.yml | 1 + roles/fail2ban/tasks/main.yml | 28 +++--- roles/monitoring/tasks/main.yml | 1 + roles/net-config/tasks/main.yml | 18 ++-- roles/net-persist/tasks/main.yml | 18 ++-- roles/networking/tasks/main.yml | 12 +-- roles/nfs-server/defaults/main.yml | 1 - roles/nfs-server/handlers/main.yml | 2 +- roles/nfs-server/tasks/main.yml | 12 +-- roles/ntpd/defaults/main.yml | 6 +- roles/ntpd/handlers/main.yml | 2 +- roles/ntpd/tasks/main.yml | 18 ++-- roles/sshd/README.md | 14 +++ roles/sshd/defaults/main.yml | 1 + roles/sshd/tasks/main.yml | 2 +- roles/sshd/vars/archlinux.yml | 1 + roles/sshd/vars/debian.yml | 1 + roles/unbound/defaults/main.yml | 1 + roles/unbound/handlers/main.yml | 2 +- roles/unbound/tasks/main.yml | 66 +++++++------- roles/wireguard/defaults/main.yml | 1 + roles/wireguard/tasks/main.yml | 51 +++++------ roles/zfs/tasks/dataset-ownership.yml | 10 +-- roles/zfs/tasks/datasets.yml | 16 ++-- roles/zfs/tasks/delete-pool.yml | 6 +- roles/zfs/tasks/install.yml | 31 ++++--- roles/zfs/tasks/main.yml | 8 +- roles/zfs/tasks/pools.yml | 24 ++--- roles/zsh/defaults/main.yml | 1 + roles/zsh/tasks/main.yml | 13 ++- roles/zsh/tasks/plugins.yml | 27 ++---- roles/zsh/tasks/user-setup.yml | 15 ++-- 53 files changed, 390 insertions(+), 350 deletions(-) create mode 100644 .ansible-lint create mode 100644 roles/archlinux/README.md create mode 100644 roles/sshd/README.md diff --git a/.ansible-lint b/.ansible-lint new file mode 100644 index 0000000..022cafb --- /dev/null +++ b/.ansible-lint @@ -0,0 +1,3 @@ +--- +skip_list: + - var-naming[no-role-prefix] diff --git a/README.md b/README.md index 0331270..d3bd479 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,27 @@ # Homelab Ansible Playbooks -This repository contains Ansible playbooks and roles I use to manage my NAS and several VMs 👨‍💻. +This repository contains Ansible playbooks and roles I use to manage my NAS and some VMs 👨‍💻. This project is designed for personal/familial scale maintenance, if you find this useful for your use, want to share advises or security concerns, feel free to drop me a line. -This is a good playground to learn and I encourage you to adapt these roles to your needs. While they might not be production-ready for all environments, I'm open to adapting them for [Ansible Galaxy]((https://galaxy.ansible.com)) if there's community interest! +This is a good playground to learn and I encourage you to adapt these roles to your needs. While they might not be production-ready for all environments, I'm open to adapting them for [Ansible Galaxy](<(https://galaxy.ansible.com)>) if there's community interest! ## Requirements +Base tools: + +```sh +# linux +apt-get install ansible ansible-lint ansible-galaxy +pacman -Syu ansible ansible-lint ansible-galaxy +# macos +brew install ansible ansible-lint ansible-galaxy +# windows +choco install ansible ansible-lint ansible-galaxy +``` + +Other roles: + ```sh ansible-galaxy collection install -r requirements.yml ``` @@ -36,5 +50,6 @@ ssh-copy-id -i ~/.ssh/id_rsa.pub username@remote_host Linting: ```sh +ansible-lint npx prettier --write . ``` diff --git a/ansible.cfg b/ansible.cfg index 1e8e054..7341772 100644 --- a/ansible.cfg +++ b/ansible.cfg @@ -1,5 +1,6 @@ [defaults] interpreter_python=/usr/bin/python3 +roles_path=./roles [ssh_connection] ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r diff --git a/inventory/host_vars/example.yml b/inventory/host_vars/example.yml index 2157323..ef80524 100644 --- a/inventory/host_vars/example.yml +++ b/inventory/host_vars/example.yml @@ -1,3 +1,4 @@ +--- # Network configuration # --------------------- network_interfaces: diff --git a/playbook.yml b/playbook.yml index 7741375..f6977a4 100644 --- a/playbook.yml +++ b/playbook.yml @@ -1,10 +1,16 @@ -- hosts: all +--- +# - hosts: all +# become: true +# roles: +# - role: networking +# - role: sshd +# - role: disks +# - role: wireguard +# - role: zsh +# - role: archlinux +# - role: podman + +- hosts: pinwheel become: true roles: - - role: networking - role: sshd - - role: disks - - role: wireguard - - role: zsh - - role: archlinux - - role: podman diff --git a/playbooks/example.yml b/playbooks/example.yml index eda91ff..cbe27fd 100644 --- a/playbooks/example.yml +++ b/playbooks/example.yml @@ -1,3 +1,4 @@ +--- - hosts: marge become: true roles: diff --git a/requirements.yml b/requirements.yml index c6f30fb..aa74901 100644 --- a/requirements.yml +++ b/requirements.yml @@ -1,3 +1,4 @@ +--- collections: - name: ansible.netcommon - name: community.general diff --git a/roles/archlinux/README.md b/roles/archlinux/README.md new file mode 100644 index 0000000..a4ba1dc --- /dev/null +++ b/roles/archlinux/README.md @@ -0,0 +1,7 @@ +Le classique quand on n'a pas mis à jour arch depuis un moment c'est d'avoir une clef PGP manquante. + +``` +sudo pacman -Sy archlinux-keyring +``` + +Avant l'update soigne les petits chats blessés. diff --git a/roles/archlinux/defaults/main.yml b/roles/archlinux/defaults/main.yml index 697e324..caea001 100644 --- a/roles/archlinux/defaults/main.yml +++ b/roles/archlinux/defaults/main.yml @@ -1,3 +1,4 @@ +--- arch_locale: en_US.UTF-8 yay_src_path: /opt/yay yay_git_repo: https://aur.archlinux.org/yay.git diff --git a/roles/archlinux/tasks/locales.yml b/roles/archlinux/tasks/locales.yml index 8da5b81..fc83f7a 100644 --- a/roles/archlinux/tasks/locales.yml +++ b/roles/archlinux/tasks/locales.yml @@ -1,15 +1,15 @@ --- - name: Configure locales block: - - name: activate locale - command: + - name: Activate locale + ansible.builtin.command: cmd: localectl set-locale LANG={{ arch_locale }} - - name: edit /etc/locale.gen - lineinfile: + - name: Edit /etc/locale.gen + ansible.builtin.lineinfile: dest: /etc/locale.gen state: present regexp: "{{ arch_locale }}" line: "{{ arch_locale }} UTF-8" - - name: regenerate locales - command: + - name: Regenerate locales + ansible.builtin.command: cmd: locale-gen diff --git a/roles/archlinux/tasks/main.yml b/roles/archlinux/tasks/main.yml index 21ecce7..4270a10 100644 --- a/roles/archlinux/tasks/main.yml +++ b/roles/archlinux/tasks/main.yml @@ -1,10 +1,10 @@ --- - name: Skip Archlinux installation - meta: end_play + ansible.builtin.meta: end_play when: ansible_facts['os_family'] != 'Archlinux' - name: Archlinux base setup - include_tasks: "{{ item }}" + ansible.builtin.include_tasks: "{{ item }}" loop: - pacman.yml - locales.yml diff --git a/roles/archlinux/tasks/pacman.yml b/roles/archlinux/tasks/pacman.yml index e3e6241..281c24f 100644 --- a/roles/archlinux/tasks/pacman.yml +++ b/roles/archlinux/tasks/pacman.yml @@ -1,6 +1,6 @@ --- - name: Check if pacman is not locked - stat: + ansible.builtin.stat: path: /var/lib/pacman/db.lck register: pacman_lock failed_when: pacman_lock.stat.exists @@ -13,32 +13,30 @@ # state: absent - name: Install reflector (looking for fastest mirror) - pacman: + community.general.pacman: name: reflector state: present - name: Stat pacman mirrorlist - stat: + ansible.builtin.stat: path: /etc/pacman.d/mirrorlist register: mirrorlist # Probably not here if it's a fresh install - name: Stat pacman mirrorlist.bak - stat: + ansible.builtin.stat: path: /etc/pacman.d/mirrorlist.bak register: mirrorlist_bak - name: Backup and update pacman mirrorlist if older than 7 days - shell: > + ansible.builtin.shell: > cp /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.bak && reflector --latest 20 --protocol https --sort rate --save /etc/pacman.d/mirrorlist - when: mirrorlist_bak.stat.exists is false or - (mirrorlist.stat.exists and - (ansible_date_time.epoch | int - mirrorlist.stat.mtime) > 604800) + when: mirrorlist_bak.stat.exists is false or (mirrorlist.stat.exists and (ansible_date_time.epoch | int - mirrorlist.stat.mtime) > 604800) - name: Configure pacman to output colors - lineinfile: + ansible.builtin.lineinfile: dest: /etc/pacman.conf state: present regexp: "^(.*)Color" diff --git a/roles/archlinux/tasks/paru.yml b/roles/archlinux/tasks/paru.yml index e44689b..03d9b43 100644 --- a/roles/archlinux/tasks/paru.yml +++ b/roles/archlinux/tasks/paru.yml @@ -1,10 +1,68 @@ --- - name: Check if paru is already installed - stat: + ansible.builtin.stat: path: /usr/bin/paru register: paru - name: Install paru + when: not paru.stat.exists + ## + ## Deprecated version with compilation + ## + # - name: Install paru + # block: + # - name: Install build dependencies + # package: + # name: + # - base-devel + # - git + # state: present + + # - name: Disable sudo password prompt (makepkg sudoers hack) + # lineinfile: + # dest: /etc/sudoers + # state: present + # regexp: "^#?%wheel" + # line: "%wheel ALL=(ALL) NOPASSWD: ALL" + # validate: /usr/sbin/visudo -cf %s + + # - command: + # cmd: whoami + # no_log: true + # become: false + # register: main_user + + # - set_fact: + # main_user: "{{ main_user.stdout }}" + # no_log: true + + # - name: Create paru sources dir + # file: + # path: "{{ paru_src_path }}" + # state: directory + # owner: "{{ main_user }}" + + # - name: Clone git sources + # become: false + # git: + # repo: "{{ paru_git_repo }}" + # dest: "{{ paru_src_path }}" + + # # note: this only works because SUDOERS password prompt is disabled + # - name: Build and install + # become: false + # command: + # chdir: "{{ paru_src_path }}" + # cmd: "makepkg -si -f --noconfirm" + + # - name: Restore sudo with password prompt + # lineinfile: + # dest: /etc/sudoers + # state: present + # regexp: "^#?%wheel" + # line: "%wheel ALL=(ALL:ALL) ALL" + # validate: /usr/sbin/visudo -cf %s + # when: not paru.stat.exists block: - name: Get the last github release ansible.builtin.uri: @@ -13,14 +71,14 @@ register: paru_release - name: Extract tag_name - set_fact: + ansible.builtin.set_fact: paru_version: "{{ (paru_release.json.tag_name | regex_replace('^v', '')) }}" - name: Get the binary URL ({{ os_arch }}) - set_fact: + ansible.builtin.set_fact: paru_url: "{{ item.browser_download_url }}" loop: "{{ paru_release.json.assets }}" - when: "'{{ os_arch }}.tar.zst' in item.name" + when: "'os_arch.tar.zst' in item.name" - name: Download ansible.builtin.get_url: @@ -45,61 +103,3 @@ ansible.builtin.file: path: "/tmp/paru-{{ os_arch }}.tar.zst" state: absent - when: not paru.stat.exists -## -## Deprecated version with compilation -## -# - name: Install paru -# block: -# - name: Install build dependencies -# package: -# name: -# - base-devel -# - git -# state: present - -# - name: Disable sudo password prompt (makepkg sudoers hack) -# lineinfile: -# dest: /etc/sudoers -# state: present -# regexp: "^#?%wheel" -# line: "%wheel ALL=(ALL) NOPASSWD: ALL" -# validate: /usr/sbin/visudo -cf %s - -# - command: -# cmd: whoami -# no_log: true -# become: false -# register: main_user - -# - set_fact: -# main_user: "{{ main_user.stdout }}" -# no_log: true - -# - name: Create paru sources dir -# file: -# path: "{{ paru_src_path }}" -# state: directory -# owner: "{{ main_user }}" - -# - name: Clone git sources -# become: false -# git: -# repo: "{{ paru_git_repo }}" -# dest: "{{ paru_src_path }}" - -# # note: this only works because SUDOERS password prompt is disabled -# - name: Build and install -# become: false -# command: -# chdir: "{{ paru_src_path }}" -# cmd: "makepkg -si -f --noconfirm" - -# - name: Restore sudo with password prompt -# lineinfile: -# dest: /etc/sudoers -# state: present -# regexp: "^#?%wheel" -# line: "%wheel ALL=(ALL:ALL) ALL" -# validate: /usr/sbin/visudo -cf %s -# when: not paru.stat.exists diff --git a/roles/archlinux/tasks/yay.yml b/roles/archlinux/tasks/yay.yml index 2884f51..378d79a 100644 --- a/roles/archlinux/tasks/yay.yml +++ b/roles/archlinux/tasks/yay.yml @@ -1,60 +1,60 @@ --- - name: Check if yay is already installed - stat: + ansible.builtin.stat: path: /usr/bin/yay register: yay - name: Install yay + when: not yay.stat.exists block: - name: Install build dependencies - package: + ansible.builtin.package: name: - base-devel - git state: present - name: Disable sudo password prompt (makepkg sudoers hack) - lineinfile: + ansible.builtin.lineinfile: dest: /etc/sudoers state: present regexp: "^#?%wheel" line: "%wheel ALL=(ALL) NOPASSWD: ALL" validate: /usr/sbin/visudo -cf %s - - command: + - ansible.builtin.command: cmd: whoami no_log: true become: false register: main_user - - set_fact: + - ansible.builtin.set_fact: main_user: "{{ main_user.stdout }}" no_log: true - name: Create yay sources dir - file: + ansible.builtin.file: path: "{{ yay_src_path }}" state: directory owner: "{{ main_user }}" - name: Clone git sources become: false - git: + ansible.builtin.git: repo: "{{ yay_git_repo }}" dest: "{{ yay_src_path }}" # note: this only works because SUDOERS password prompt is disabled - name: Build and install become: false - command: + ansible.builtin.command: chdir: "{{ yay_src_path }}" cmd: "makepkg -si -f --noconfirm" - name: Restore sudo with password prompt - lineinfile: + ansible.builtin.lineinfile: dest: /etc/sudoers state: present regexp: "^#?%wheel" line: "%wheel ALL=(ALL:ALL) ALL" validate: /usr/sbin/visudo -cf %s - when: not yay.stat.exists diff --git a/roles/disks/defaults/main.yml b/roles/disks/defaults/main.yml index 0f4874e..785ca3b 100644 --- a/roles/disks/defaults/main.yml +++ b/roles/disks/defaults/main.yml @@ -1 +1,2 @@ +--- ssd_trim_periodicity: monthly diff --git a/roles/disks/tasks/main.yml b/roles/disks/tasks/main.yml index 43210fa..bc5c2ee 100644 --- a/roles/disks/tasks/main.yml +++ b/roles/disks/tasks/main.yml @@ -1,7 +1,7 @@ --- - name: Ensure disks are formatted correctly - include_tasks: partitioning.yml + ansible.builtin.include_tasks: partitioning.yml loop: "{{ disk_partitioning | default([]) }}" - name: Enable trim SSD if there is at least one - include_tasks: trim-ssd.yml + ansible.builtin.include_tasks: trim-ssd.yml diff --git a/roles/disks/tasks/partitioning.yml b/roles/disks/tasks/partitioning.yml index 802d3c4..e8d074a 100644 --- a/roles/disks/tasks/partitioning.yml +++ b/roles/disks/tasks/partitioning.yml @@ -1,23 +1,23 @@ --- - name: Install sfdisk - package: + ansible.builtin.package: name: util-linux state: present changed_when: false - name: Install hdparm - package: + ansible.builtin.package: name: hdparm state: present changed_when: false - name: Load expected layout from file (controller side) - set_fact: + ansible.builtin.set_fact: expected_layout: "{{ lookup('file', item.layout_file) }}" changed_when: false - name: Get current layout from remote - command: "sfdisk --dump {{ item.device }}" + ansible.builtin.command: "sfdisk --dump {{ item.device }}" register: current_layout changed_when: false @@ -25,18 +25,18 @@ vars: current_clean: "{{ current_layout.stdout | trim | regex_replace('\\s+', ' ') }}" expected_clean: "{{ expected_layout | trim | regex_replace('\\s+', ' ') }}" - set_fact: + ansible.builtin.set_fact: layout_differs: "{{ current_clean != expected_clean }}" changed_when: false - name: Copy layout file to remote (only if different) - copy: + ansible.builtin.copy: content: "{{ expected_layout }}" dest: "/tmp/expected-{{ item.device | basename }}.sfdisk" mode: "0644" when: layout_differs - name: Apply partition table using sfdisk - command: > + ansible.builtin.command: > sfdisk {{ item.device }} < {{ item.layout_file }} when: layout_differs diff --git a/roles/disks/tasks/trim-ssd.yml b/roles/disks/tasks/trim-ssd.yml index 39c725c..49ce201 100644 --- a/roles/disks/tasks/trim-ssd.yml +++ b/roles/disks/tasks/trim-ssd.yml @@ -1,22 +1,22 @@ --- # see: https://wiki.archlinux.org/title/Solid_state_drive#Periodic_TRIM - name: Check if there is at least one SSD - set_fact: - has_at_least_one_ssd: "{{ ansible_facts.devices | dict2items | selectattr('value.rotational', 'equalto', '0') | list | length > 0}}" + ansible.builtin.set_fact: + has_at_least_one_ssd: "{{ ansible_facts.devices | dict2items | selectattr('value.rotational', 'equalto', '0') | list | length > 0 }}" changed_when: false - name: Skip trim role - meta: end_play + ansible.builtin.meta: end_play when: not has_at_least_one_ssd -- name: install trim tools - package: +- name: Install trim tools + ansible.builtin.package: name: util-linux state: present changed_when: false -- name: edit trim periodicity if needed - template: +- name: Edit trim periodicity if needed + ansible.builtin.template: src: templates/fstrim.timer.j2 dest: "/etc/systemd/system/fstrim.timer.d/override.conf" owner: root @@ -24,20 +24,20 @@ mode: "0644" register: timer_config -- name: systemd daemon reload - systemd: - daemon_reload: yes +- name: Systemd daemon reload + ansible.builtin.systemd: + daemon_reload: true when: timer_config.changed -- name: enable periodic trim - systemd: +- name: Enable periodic trim + ansible.builtin.systemd: name: fstrim.timer - enabled: yes + enabled: true state: started changed_when: false -- name: install nvme-cli - package: +- name: Install nvme-cli + ansible.builtin.package: name: nvme-cli state: present changed_when: false diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml index e65f83e..db2de7a 100644 --- a/roles/docker/defaults/main.yml +++ b/roles/docker/defaults/main.yml @@ -1 +1,2 @@ +--- docker_projects_dir: /opt/docker diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml index 6cd1a33..0cbb0f2 100644 --- a/roles/docker/tasks/main.yml +++ b/roles/docker/tasks/main.yml @@ -5,49 +5,49 @@ # Archlinux: only if your target is meant to frequently build docker images # see: https://stackoverflow.com/a/78352698 -- name: uninstall docker - block: - - name: Include uninstall tasks - include_tasks: uninstall.yml - - name: Skip docker installation - meta: end_play +- name: Uninstall docker when: uninstall_docker | lower in ['yes', 'y'] -- name: install docker - package: + block: + - name: Include uninstall tasks + ansible.builtin.include_tasks: uninstall.yml + - name: Skip docker installation + ansible.builtin.meta: end_play +- name: Install docker + ansible.builtin.package: name: docker -- name: enable the service - service: +- name: Enable the service + ansible.builtin.service: name: "docker" enabled: true state: started -- command: +- ansible.builtin.command: cmd: whoami no_log: true become: false register: main_user -- set_fact: +- ansible.builtin.set_fact: main_user: "{{ main_user.stdout }}" no_log: true -- name: create projects directory - file: +- name: Create projects directory + ansible.builtin.file: path: "{{ docker_projects_dir }}" state: directory owner: "{{ main_user }}" group: "{{ main_user }}" -- name: allow user to use docker - user: +- name: Allow user to use docker + ansible.builtin.user: name: "{{ main_user }}" groups: docker - append: yes + append: true register: docker_group -- name: inform the user that user needs to logout and login again - debug: +- name: Inform the user that user needs to logout and login again + ansible.builtin.debug: msg: "Please logout and login again to make sure the user is added to the docker group" when: docker_group.changed diff --git a/roles/docker/tasks/uninstall.yml b/roles/docker/tasks/uninstall.yml index cf33cc0..3210c2b 100644 --- a/roles/docker/tasks/uninstall.yml +++ b/roles/docker/tasks/uninstall.yml @@ -1,17 +1,17 @@ --- -- name: uninstall docker - package: +- name: Uninstall docker + ansible.builtin.package: name: docker state: absent -- name: prompt the user for confirmation +- name: Prompt the user for confirmation ansible.builtin.pause: prompt: "[IRREVERSIBLE] Are you sure you want to delete {{ docker_projects_dir }}?" - echo: yes + echo: true register: confirmation -- name: remote projects directory - file: +- name: Remote projects directory + ansible.builtin.file: path: "{{ docker_projects_dir }}" state: absent owner: "{{ main_user }}" diff --git a/roles/fail2ban/defaults/main.yml b/roles/fail2ban/defaults/main.yml index 3dcb380..0f75db3 100644 --- a/roles/fail2ban/defaults/main.yml +++ b/roles/fail2ban/defaults/main.yml @@ -1,2 +1,3 @@ +--- fail2ban_firewall: ufw fail2ban_backend: systemd diff --git a/roles/fail2ban/tasks/main.yml b/roles/fail2ban/tasks/main.yml index ae6390c..62fe7ad 100644 --- a/roles/fail2ban/tasks/main.yml +++ b/roles/fail2ban/tasks/main.yml @@ -1,27 +1,27 @@ --- # see: https://wiki.archlinux.org/title/Fail2ban - name: Install fail2ban - package: + ansible.builtin.package: name: fail2ban state: present - name: Ensure fail2ban configuration is only owned by root - file: + ansible.builtin.file: path: /etc/fail2ban owner: root group: root - mode: 0700 - recurse: yes + mode: "0700" + recurse: true - name: Install Fail2ban Config block: - name: General configuration - template: + ansible.builtin.template: src: jail.local.j2 dest: /etc/fail2ban/jail.local mode: "0600" - name: Service custom jail - template: + ansible.builtin.template: src: "{{ item.src }}" dest: "{{ item.dest }}" mode: "0600" @@ -32,28 +32,28 @@ - name: Service hardening (read-only root rights) block: - name: Check if hardening configuration is already applied - stat: + ansible.builtin.stat: path: /etc/systemd/system/fail2ban.service.d/override.conf register: override_conf - name: Create configuration directory - file: + ansible.builtin.file: path: /etc/systemd/system/fail2ban.service.d state: directory owner: root group: root - mode: 0700 + mode: "0700" - name: Apply hardening configuration - template: + ansible.builtin.template: src: hardened.fail2ban.conf.j2 dest: /etc/systemd/system/fail2ban.service.d/override.conf when: not override_conf.stat.exists - name: Reload systemd - systemd: - daemon_reload: yes + ansible.builtin.systemd: + daemon_reload: true when: not override_conf.stat.exists - name: Start and enable fail2ban - service: + ansible.builtin.service: name: fail2ban state: started - enabled: yes + enabled: true diff --git a/roles/monitoring/tasks/main.yml b/roles/monitoring/tasks/main.yml index ab6da49..d12b294 100644 --- a/roles/monitoring/tasks/main.yml +++ b/roles/monitoring/tasks/main.yml @@ -1,3 +1,4 @@ +--- - name: install oryx cmd: paru -S oryx when: ansible_facts['os_family'] == 'Archlinux' diff --git a/roles/net-config/tasks/main.yml b/roles/net-config/tasks/main.yml index 9886e38..b97d481 100644 --- a/roles/net-config/tasks/main.yml +++ b/roles/net-config/tasks/main.yml @@ -1,19 +1,19 @@ --- - name: Check if the interface ipv4 address is defined - block: - - debug: - msg: "Warning: iface {{ interface.name }} has no defined ipv4 address, skipping configuration" - - name: Skip net-config role for {{ interface.name }} - meta: end_play when: interface.ipv4.address is not defined + block: + - ansible.builtin.debug: + msg: "Warning: iface {{ interface.name }} has no defined ipv4 address, skipping configuration" + - name: Skip net-config role for {{ interface.name }} + ansible.builtin.meta: end_play - name: Check if the interface is already configured - stat: + ansible.builtin.stat: path: /etc/systemd/network/20-{{ interface.name }}.network register: network_file - name: What patch is needed - debug: + ansible.builtin.debug: msg: >- {%- if network_file.stat.exists == true -%} iface {{ interface.name }} is already configured, no action needed. @@ -23,7 +23,7 @@ - name: Create systemd-network link file when: network_file.stat.exists != true - template: + ansible.builtin.template: src: systemd.network.j2 dest: /etc/systemd/network/20-{{ interface.name }}.network owner: root @@ -31,6 +31,6 @@ mode: "0644" - name: Notify a reload is required - set_fact: + ansible.builtin.set_fact: network_reload_required: true when: network_file.stat.exists != true diff --git a/roles/net-persist/tasks/main.yml b/roles/net-persist/tasks/main.yml index 1211d9a..2437991 100644 --- a/roles/net-persist/tasks/main.yml +++ b/roles/net-persist/tasks/main.yml @@ -1,17 +1,11 @@ --- - name: "Check {{ interface.name }} ({{ interface.mac_address }}) rule" - set_fact: - interface_original_name: "{{ ansible_facts.interfaces - | select('in', ansible_facts) - | map('extract', ansible_facts) - | selectattr('pciid', 'defined') - | selectattr('macaddress', 'equalto', interface.mac_address) - | map(attribute='device') - | first - }}" + ansible.builtin.set_fact: + interface_original_name: "{{ ansible_facts.interfaces | select('in', ansible_facts) | map('extract', ansible_facts) | selectattr('pciid', 'defined') | selectattr('macaddress', + 'equalto', interface.mac_address) | map(attribute='device') | first }}" - name: What patch is needed - debug: + ansible.builtin.debug: msg: >- {%- if interface_original_name != interface.name -%} iface {{ interface_original_name }} ({{ interface.mac_address }}) will be patched to {{ interface.name }}. @@ -21,7 +15,7 @@ - name: Create persistent-net link file when: interface_original_name != interface.name - template: + ansible.builtin.template: src: persistent-net.link.j2 dest: /etc/systemd/network/10-persistent-net-{{ interface.name }}.link owner: root @@ -29,6 +23,6 @@ mode: "0644" - name: Notify a reboot is required - set_fact: + ansible.builtin.set_fact: reboot_required: true when: interface_original_name != interface.name diff --git a/roles/networking/tasks/main.yml b/roles/networking/tasks/main.yml index 29498e1..d0bfa66 100644 --- a/roles/networking/tasks/main.yml +++ b/roles/networking/tasks/main.yml @@ -1,25 +1,25 @@ --- - name: Setup persistent network interface(s) - include_role: + ansible.builtin.include_role: name: net-persist - public: yes + public: true vars: interface: "{{ item }}" loop: "{{ hostvars[inventory_hostname].network_interfaces | default([]) }}" - name: Configure network interface(s) - include_role: + ansible.builtin.include_role: name: net-config - public: yes + public: true vars: interface: "{{ item }}" loop: "{{ hostvars[inventory_hostname].network_interfaces | default([]) }}" - name: Reload networkd and resolved - systemd: + ansible.builtin.systemd: name: "{{ item }}" state: reloaded - daemon_reload: yes + daemon_reload: true loop: - systemd-networkd - systemd-resolved diff --git a/roles/nfs-server/defaults/main.yml b/roles/nfs-server/defaults/main.yml index f94e31a..528d2d6 100644 --- a/roles/nfs-server/defaults/main.yml +++ b/roles/nfs-server/defaults/main.yml @@ -8,7 +8,6 @@ # - host: "192.168.1.0/24" # readonly access for other lan clients # options: "ro,sync,no_subtree_check" nfs_shares: [] - nfs_configuration_file: "/etc/nfs.conf" nfs_exports_file: "/etc/exports" diff --git a/roles/nfs-server/handlers/main.yml b/roles/nfs-server/handlers/main.yml index 1ee865c..91d80e6 100644 --- a/roles/nfs-server/handlers/main.yml +++ b/roles/nfs-server/handlers/main.yml @@ -3,7 +3,7 @@ ansible.builtin.systemd: name: "nfsv4-server" state: restarted - daemon_reload: yes + daemon_reload: true - name: "Update exportfs" ansible.builtin.command: exportfs -ra diff --git a/roles/nfs-server/tasks/main.yml b/roles/nfs-server/tasks/main.yml index e66f65c..3b8467a 100644 --- a/roles/nfs-server/tasks/main.yml +++ b/roles/nfs-server/tasks/main.yml @@ -1,10 +1,10 @@ --- -- name: install nfs-server - package: +- name: Install nfs-server + ansible.builtin.package: name: "{{ (ansible_facts['os_family'] == 'Archlinux') | ternary('nfs-utils', 'nfs-kernel-server') }}" state: present -- name: configure nfs configuration +- name: Configure nfs configuration ansible.builtin.template: src: templates/nfs.conf.j2 dest: "{{ nfs_configuration_file }}" @@ -13,7 +13,7 @@ mode: "0644" notify: Reload systemd and restart nfs-server -- name: configure nfs-server exports +- name: Configure nfs-server exports ansible.builtin.template: src: templates/exports.j2 dest: "{{ nfs_exports_file }}" @@ -22,13 +22,13 @@ mode: "0644" notify: Update exportfs -- name: systemd service for nfs-server is started and enabled +- name: Systemd service for nfs-server is started and enabled ansible.builtin.systemd: name: nfsv4-server state: started enabled: true -- name: setup firewall rules for nfs on port +- name: Setup firewall rules for nfs on port community.general.ufw: rule: allow src: "{{ item }}" diff --git a/roles/ntpd/defaults/main.yml b/roles/ntpd/defaults/main.yml index 466dcae..fce7dd9 100644 --- a/roles/ntpd/defaults/main.yml +++ b/roles/ntpd/defaults/main.yml @@ -1,11 +1,9 @@ +--- # NTP configuration file ntp_config_file: "/etc/ntp.conf" # NTP servers to use. -ntp_pools: -" 0.uk.pool.ntp.org" - -" 1.uk.pool.ntp.org" - -" 2.uk.pool.ntp.org" - -" 3.uk.pool.ntp.org" +ntp_pools: -" 0.uk.pool.ntp.org" -" 1.uk.pool.ntp.org" -" 2.uk.pool.ntp.org" -" 3.uk.pool.ntp.org" # System timezone ntp_timezone: "Europe/London" diff --git a/roles/ntpd/handlers/main.yml b/roles/ntpd/handlers/main.yml index 5433efb..ab3ae61 100644 --- a/roles/ntpd/handlers/main.yml +++ b/roles/ntpd/handlers/main.yml @@ -3,4 +3,4 @@ ansible.builtin.systemd: name: ntpd state: restarted - daemon_reload: yes + daemon_reload: true diff --git a/roles/ntpd/tasks/main.yml b/roles/ntpd/tasks/main.yml index b934c2f..74f856b 100644 --- a/roles/ntpd/tasks/main.yml +++ b/roles/ntpd/tasks/main.yml @@ -1,16 +1,16 @@ --- -- name: install NTP package - package: +- name: Install NTP package + ansible.builtin.package: name: "ntp" state: present - update_cache: yes + update_cache: true -- name: set system timezone to {{ ntp_timezone }}" +- name: Set system timezone to {{ ntp_timezone }}" community.general.timezone: name: "{{ ntp_timezone }}" notify: "Restart ntpd service" -- name: ensure NTP drift file directory exists +- name: Ensure NTP drift file directory exists ansible.builtin.file: path: "{{ ntp_drift_file | dirname }}" state: directory @@ -18,12 +18,12 @@ group: "ntp" mode: "0750" -- name: setup systems timezone +- name: Setup systems timezone community.general.timezone: name: "{{ ntp_timezone }}" notify: Restart chronyd # Redémarrer chrony peut être utile après un changement de TZ pour qu'il la prenne bien en compte dans ses logs/opérations -- name: "configure {{ ntp_config_file }}" +- name: "Configure {{ ntp_config_file }}" ansible.builtin.template: src: "ntp.conf.j2" dest: "{{ ntp_config_file }}" @@ -32,13 +32,13 @@ mode: "0644" notify: "Restart ntpd service" -- name: "ensure ntpd service is started and enabled" +- name: "Ensure ntpd service is started and enabled" ansible.builtin.systemd: name: "ntpd" state: started enabled: true -- name: "configure ufw firewall" +- name: "Configure ufw firewall" community.general.ufw: rule: allow port: "{{ ntp_port }}" diff --git a/roles/sshd/README.md b/roles/sshd/README.md new file mode 100644 index 0000000..d4d1a4f --- /dev/null +++ b/roles/sshd/README.md @@ -0,0 +1,14 @@ +# SSH server + +## Enable authorized_keys fallback + +When you encrypt your home data, you cannot allow hardened remote SSH connection. +To make this still possible, here is the trick: a fallback authorized_key file: /etc/ssh/authorized_keys/myuser + +Simply enable this setting to get this working: + +``` +ssh_authorized_keys_fallback_enabled: true +``` + +And you're set. diff --git a/roles/sshd/defaults/main.yml b/roles/sshd/defaults/main.yml index f6327dc..fc3fe21 100644 --- a/roles/sshd/defaults/main.yml +++ b/roles/sshd/defaults/main.yml @@ -1,3 +1,4 @@ +--- ssh_port: 22 ssh_allowed_network: "192.168.1.0/24" ssh_allowed_vpn_network: "192.168.27.0/27" diff --git a/roles/sshd/tasks/main.yml b/roles/sshd/tasks/main.yml index 3a5e5bd..675fd8c 100644 --- a/roles/sshd/tasks/main.yml +++ b/roles/sshd/tasks/main.yml @@ -17,7 +17,7 @@ - name: Enable SSH service: name: "{{ ssh_service_name }}" - enabled: yes + enabled: true - name: Allow local network incoming connection ufw: diff --git a/roles/sshd/vars/archlinux.yml b/roles/sshd/vars/archlinux.yml index 208e090..98fa3cc 100644 --- a/roles/sshd/vars/archlinux.yml +++ b/roles/sshd/vars/archlinux.yml @@ -1,2 +1,3 @@ +--- ssh_package_name: "openssh" ssh_service_name: "sshd" diff --git a/roles/sshd/vars/debian.yml b/roles/sshd/vars/debian.yml index d758d79..617aaac 100644 --- a/roles/sshd/vars/debian.yml +++ b/roles/sshd/vars/debian.yml @@ -1,2 +1,3 @@ +--- ssh_package_name: "openssh-server" ssh_service_name: "ssh" diff --git a/roles/unbound/defaults/main.yml b/roles/unbound/defaults/main.yml index c58e332..6ec3ee0 100644 --- a/roles/unbound/defaults/main.yml +++ b/roles/unbound/defaults/main.yml @@ -1,3 +1,4 @@ +--- unbound_config_base_path: /etc/unbound unbound_config_path: "{{ unbound_config_base_path }}/unbound.conf" unbound_root_hints_path: "{{ unbound_config_base_path }}/root.hints" diff --git a/roles/unbound/handlers/main.yml b/roles/unbound/handlers/main.yml index 6686182..a31deb3 100644 --- a/roles/unbound/handlers/main.yml +++ b/roles/unbound/handlers/main.yml @@ -9,7 +9,7 @@ ansible.builtin.systemd: name: unbound state: restarted - daemon_reload: yes + daemon_reload: true - name: Reload AppArmor profile ansible.builtin.command: apparmor_parser -r {{ unbound_apparmor_profile_path }} diff --git a/roles/unbound/tasks/main.yml b/roles/unbound/tasks/main.yml index c7e0305..4b3e7e0 100644 --- a/roles/unbound/tasks/main.yml +++ b/roles/unbound/tasks/main.yml @@ -1,19 +1,19 @@ --- # see: https://calomel.org/unbound_dns.html # see: https://wiki.archlinux.org/title/Unbound -- name: install unbound - package: +- name: Install unbound + ansible.builtin.package: name: unbound state: present # Note: on archlinux this is already shipped within unbound -- name: install unbound-anchor on debian/ubuntu - package: +- name: Install unbound-anchor on debian/ubuntu + ansible.builtin.package: name: unbound-anchor state: present when: ansible_facts['os_family'] == 'Debian' -- name: ensure unbound configuration is owned by unbound +- name: Ensure unbound configuration is owned by unbound ansible.builtin.shell: | find "{{ unbound_config_base_path }}" -type d -exec chmod 755 {} \; find "{{ unbound_config_base_path }}" -type f -exec chmod 644 {} \; @@ -21,7 +21,7 @@ args: executable: /bin/bash -- name: ensure apparmor profile for unbound exists +- name: Ensure apparmor profile for unbound exists ansible.builtin.copy: dest: /etc/apparmor.d/usr.sbin.unbound content: | @@ -34,59 +34,59 @@ notify: - Reload AppArmor profile -- name: check if root.hints exists - stat: +- name: Check if root.hints exists + ansible.builtin.stat: path: "{{ unbound_root_hints_path }}" register: root_hints -- name: update root.hints (if older than 6 months or missing) +- name: Update root.hints (if older than 6 months or missing) + when: > + (not root_hints.stat.exists) or + (ansible_date_time.epoch | int - root_hints.stat.mtime > 15552000) + block: - - name: download latest root hints from internic + - name: Download latest root hints from internic ansible.builtin.get_url: url: https://www.internic.net/domain/named.root dest: "{{ unbound_root_hints_path }}" owner: unbound group: unbound mode: "0644" - when: > - (not root_hints.stat.exists) or - (ansible_date_time.epoch | int - root_hints.stat.mtime > 15552000) - -- name: check if unbound ad_servers configuration exists - stat: +- name: Check if unbound ad_servers configuration exists + ansible.builtin.stat: path: "{{ unbound_ad_servers_config_path }}" register: ad_servers -- name: update the ad_servers list if older than 2 weeks or missing +- name: Update the ad_servers list if older than 2 weeks or missing + when: > + (not ad_servers.stat.exists) or + (ansible_date_time.epoch | int - ad_servers.stat.mtime > 1209600) + block: - - name: download stevenblack's hosts file + - name: Download stevenblack's hosts file ansible.builtin.get_url: url: https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts dest: /tmp/hosts.txt mode: "0644" - - name: convert hosts file to unbound format + - name: Convert hosts file to unbound format ansible.builtin.shell: | grep '^0\.0\.0\.0' /tmp/hosts.txt | awk '{print "local-zone: \""$2"\" always_nxdomain"}' > "{{ unbound_ad_servers_config_path }}" && chown unbound:unbound "{{ unbound_ad_servers_config_path }}" args: executable: /bin/bash - - name: clean up temporary file + - name: Clean up temporary file ansible.builtin.file: path: /tmp/hosts.txt state: absent - when: > - (not ad_servers.stat.exists) or - (ansible_date_time.epoch | int - ad_servers.stat.mtime > 1209600) - -- name: initialize dnssec trust anchor if missing +- name: Initialize dnssec trust anchor if missing ansible.builtin.command: unbound-anchor -a {{ unbound_anchor_root_key }} args: creates: "{{ unbound_anchor_root_key }}" -- name: install unbound config - template: +- name: Install unbound config + ansible.builtin.template: src: "{{ item.src }}" dest: "{{ item.dest }}" owner: unbound @@ -99,14 +99,14 @@ - Check Unbound config syntax - Reload systemd and restart unbound -- name: make sure unbound starts after wg-quick@wg0 +- name: Make sure unbound starts after wg-quick@wg0 block: - - name: ensure unbound.service.d directory exists + - name: Ensure unbound.service.d directory exists ansible.builtin.file: path: /etc/systemd/system/unbound.service.d state: directory mode: "0755" - - name: configure unbound systemd service + - name: Configure unbound systemd service ansible.builtin.copy: dest: /etc/systemd/system/unbound.service.d/override.conf content: | @@ -115,13 +115,13 @@ Requires=wg-quick@wg0.service notify: Reload systemd and restart unbound -- name: enables unbound service +- name: Enables unbound service ansible.builtin.service: name: unbound - enabled: yes + enabled: true state: started -- name: firewall ufw rules for unbound +- name: Firewall ufw rules for unbound community.general.ufw: rule: allow port: "{{ unbound_port }}" diff --git a/roles/wireguard/defaults/main.yml b/roles/wireguard/defaults/main.yml index 0412ee0..6ffaa7a 100644 --- a/roles/wireguard/defaults/main.yml +++ b/roles/wireguard/defaults/main.yml @@ -1,3 +1,4 @@ +--- wireguard_primary_interface: "{{ network_interfaces.0.name }}" wireguard_port: 51820 # static port to receive input connections wireguard_server_mode: true # enables NAT and open port diff --git a/roles/wireguard/tasks/main.yml b/roles/wireguard/tasks/main.yml index b5caaef..b54ebd2 100644 --- a/roles/wireguard/tasks/main.yml +++ b/roles/wireguard/tasks/main.yml @@ -1,58 +1,59 @@ -- name: install wireguard - package: +--- +- name: Install wireguard + ansible.builtin.package: name: "{{ (ansible_facts['os_family'] == 'Archlinux') | ternary('wireguard-tools', 'wireguard') }}" state: present # to support "DNS=" if used in a "client way" -- name: install openresolv/resolveconf - package: +- name: Install openresolv/resolveconf + ansible.builtin.package: name: "{{ (ansible_facts['os_family'] == 'Archlinux') | ternary('openresolv', 'resolvconf') }}" state: present -- name: ensure wireguard configuration is only owned by root - file: +- name: Ensure wireguard configuration is only owned by root + ansible.builtin.file: path: "{{ wireguard_config_base_path }}" owner: root group: root - mode: 0700 - recurse: yes + mode: "0700" + recurse: true -- name: check if private key exists - stat: +- name: Check if private key exists + ansible.builtin.stat: path: "{{ wireguard_config_base_path }}/privatekey" register: pkey_file -- name: generate wireguard keys if not present - shell: wg genkey | tee {{ wireguard_config_base_path }}/privatekey | wg pubkey > {{ wireguard_config_base_path }}/publickey +- name: Generate wireguard keys if not present + ansible.builtin.shell: wg genkey | tee {{ wireguard_config_base_path }}/privatekey | wg pubkey > {{ wireguard_config_base_path }}/publickey when: not pkey_file.stat.exists -- name: retrieve wireguard private key from file - slurp: +- name: Retrieve wireguard private key from file + ansible.builtin.slurp: src: "{{ wireguard_config_base_path }}/privatekey" register: private_key -- name: set wireguard private key - set_fact: +- name: Set wireguard private key + ansible.builtin.set_fact: wireguard_private_key: "{{ private_key['content'] | b64decode }}" -- name: disable "dns=" instruction if unbound is used to avoid race conditions at startup - set_fact: +- name: Disable "dns=" instruction if unbound is used to avoid race conditions at startup + ansible.builtin.set_fact: wireguard_dns: when: unbound_custom_lan_records is defined -- name: install wireguard config - template: +- name: Install wireguard config + ansible.builtin.template: src: wireguard.conf.j2 dest: /etc/wireguard/{{ wireguard_interface }}.conf -- name: start and enable service - service: +- name: Start and enable service + ansible.builtin.service: name: wg-quick@{{ wireguard_interface }} state: started - enabled: yes - daemon_reload: yes + enabled: true + daemon_reload: true -- name: configure the firewall for wireguard +- name: Configure the firewall for wireguard community.general.ufw: rule: allow port: "{{ wireguard_port }}" diff --git a/roles/zfs/tasks/dataset-ownership.yml b/roles/zfs/tasks/dataset-ownership.yml index c9eb71a..f6f9ac4 100644 --- a/roles/zfs/tasks/dataset-ownership.yml +++ b/roles/zfs/tasks/dataset-ownership.yml @@ -1,14 +1,14 @@ --- # due to Ansible limitations, we cannot loop over a block, so we loop over a distinct tasks file... # @see https://stackoverflow.com/a/58911694 -- name: set ownership on dataset mountpoint +- name: Set ownership on dataset mountpoint block: - - name: get the mountpoint - ansible.builtin.shell: "zfs get -H -o value mountpoint {{ dataset.name }}" + - name: Get the mountpoint + ansible.builtin.command: "zfs get -H -o value mountpoint {{ dataset.name }}" register: mountpoint changed_when: false - - name: set owner of mountpoints - file: + - name: Set owner of mountpoints + ansible.builtin.file: path: "{{ mountpoint.stdout }}" owner: "{{ dataset.user | default(main_user) }}" group: "{{ dataset.group | default(main_user) }}" diff --git a/roles/zfs/tasks/datasets.yml b/roles/zfs/tasks/datasets.yml index c3c1961..d6eeaa1 100644 --- a/roles/zfs/tasks/datasets.yml +++ b/roles/zfs/tasks/datasets.yml @@ -1,25 +1,25 @@ --- # see: https://docs.ansible.com/ansible/latest/collections/community/general/zfs_module.html -- name: managing filesystems, volumes, snapshots - zfs: +- name: Managing filesystems, volumes, snapshots + community.general.zfs: name: "{{ item.name }}" state: "{{ item.state }}" - extra_zfs_properties: "{{ item.extra_zfs_properties|default(omit) }}" - origin: "{{ item.origin|default(omit) }}" + extra_zfs_properties: "{{ item.extra_zfs_properties | default(omit) }}" + origin: "{{ item.origin | default(omit) }}" with_items: "{{ zfs_datasets }}" -- command: +- ansible.builtin.command: cmd: whoami no_log: true become: false register: main_user -- set_fact: +- ansible.builtin.set_fact: main_user: "{{ main_user.stdout }}" no_log: true -- name: set dataset ownership - include_tasks: "./dataset-ownership.yml" +- name: Set dataset ownership + ansible.builtin.include_tasks: "./dataset-ownership.yml" loop: "{{ zfs_datasets }}" loop_control: loop_var: dataset diff --git a/roles/zfs/tasks/delete-pool.yml b/roles/zfs/tasks/delete-pool.yml index e4e3eb3..838e1e3 100644 --- a/roles/zfs/tasks/delete-pool.yml +++ b/roles/zfs/tasks/delete-pool.yml @@ -1,12 +1,12 @@ --- # due to Ansible limitations, we cannot loop over a block, so we loop over a distinct tasks file... # @see https://stackoverflow.com/a/58911694 -- name: prompt the user for confirmation +- name: Prompt the user for confirmation ansible.builtin.pause: prompt: "[IRREVERSIBLE] Are you sure you want to delete zpool {{ zpool.name }}?" - echo: yes + echo: true register: confirmation -- name: deleting zpool +- name: Deleting zpool ansible.builtin.command: "zpool destroy {{ zpool.name }}" when: confirmation.user_input | lower in ['yes', 'y'] diff --git a/roles/zfs/tasks/install.yml b/roles/zfs/tasks/install.yml index fadc593..febb2ae 100644 --- a/roles/zfs/tasks/install.yml +++ b/roles/zfs/tasks/install.yml @@ -1,6 +1,6 @@ --- - name: Check if zfs-linux-lts is installed - command: pacman -Qi zfs-dkms + ansible.builtin.command: pacman -Qi zfs-dkms register: zfs_dkms_installed changed_when: false failed_when: false @@ -8,9 +8,9 @@ - name: Install zfs when: zfs_dkms_installed.stderr block: - - name: disable SUDOERS password prompt for makepkg + - name: Disable SUDOERS password prompt for makepkg no_log: true - lineinfile: + ansible.builtin.lineinfile: dest: /etc/sudoers state: present regexp: "^#?%wheel" @@ -22,30 +22,29 @@ # Using dkms with the lts linux kernel is a better approach IMO. - name: Install zfs become: false - command: + ansible.builtin.command: cmd: "paru -S --noconfirm zfs-dkms zfs-utils" - name: Restore SUDOERS password prompt after yay no_log: true - lineinfile: + ansible.builtin.lineinfile: dest: /etc/sudoers state: present regexp: "^#?%wheel" line: "%wheel ALL=(ALL:ALL) ALL" validate: /usr/sbin/visudo -cf %s -- name: check if /etc/hostid is present - stat: +- name: Check if /etc/hostid is present + ansible.builtin.stat: path: /etc/hostid register: hostid changed_when: false -- name: generate /etc/hostid if not present +- name: Generate /etc/hostid if not present when: not hostid.stat.exists - command: zgenhostid $(hostid) - + ansible.builtin.command: zgenhostid $(hostid) - name: Check if zrepl is installed - command: pacman -Qi zrepl + ansible.builtin.command: pacman -Qi zrepl register: zrepl_installed changed_when: false failed_when: false @@ -53,9 +52,9 @@ - name: Install zrepl when: zrepl_installed.stderr block: - - name: disable SUDOERS password prompt for makepkg + - name: Disable SUDOERS password prompt for makepkg no_log: true - lineinfile: + ansible.builtin.lineinfile: dest: /etc/sudoers state: present regexp: "^#?%wheel" @@ -64,12 +63,12 @@ - name: Install zrepl become: false - command: + ansible.builtin.command: cmd: "paru -S --noconfirm zrepl" - name: Restore SUDOERS password prompt after paru no_log: true - lineinfile: + ansible.builtin.lineinfile: dest: /etc/sudoers state: present regexp: "^#?%wheel" @@ -77,7 +76,7 @@ validate: /usr/sbin/visudo -cf %s - name: Enable zfs services - service: + ansible.builtin.service: name: "{{ item }}" enabled: true state: started diff --git a/roles/zfs/tasks/main.yml b/roles/zfs/tasks/main.yml index 32b3d48..48d24f3 100644 --- a/roles/zfs/tasks/main.yml +++ b/roles/zfs/tasks/main.yml @@ -1,9 +1,7 @@ --- - name: Install ZFS - include_tasks: install.yml - + ansible.builtin.include_tasks: install.yml - name: Configure Zpools - include_tasks: pools.yml - + ansible.builtin.include_tasks: pools.yml - name: "Setup ZFS datasets: filesystems, snapshots, volumes" - include_tasks: datasets.yml + ansible.builtin.include_tasks: datasets.yml diff --git a/roles/zfs/tasks/pools.yml b/roles/zfs/tasks/pools.yml index 6bf000f..8551ed4 100644 --- a/roles/zfs/tasks/pools.yml +++ b/roles/zfs/tasks/pools.yml @@ -3,21 +3,22 @@ # Based on: https://github.com/mrlesmithjr/ansible-zfs/blob/master/tasks/manage_zfs.yml # Expected variables in your inventory: zfs_pools. -- name: checking existing zpool(s) - ansible.builtin.shell: "zpool list -H -o name" +- name: Checking existing zpool(s) + ansible.builtin.command: "zpool list -H -o name" changed_when: false register: current_zp_state - check_mode: no + check_mode: false when: zfs_pools is defined -- name: gather zpool status - ansible.builtin.shell: zpool status +- name: Gather zpool status + ansible.builtin.command: zpool status changed_when: false register: zpool_devices when: zfs_pools is defined -- name: creating basic zpool(s) - ansible.builtin.command: "zpool create {{ '-o '+ item.options.items() |map('join', '=') | join (' -o ') if item.options is defined else '' }} {{ item.name }} {{ item.devices|join (' ') }}" +- name: Creating basic zpool(s) + ansible.builtin.command: "zpool create {{ '-o '+ item.options.items() |map('join', '=') | join (' -o ') if item.options is defined else '' }} {{ item.name }} {{ + item.devices|join (' ') }}" with_items: "{{ zfs_pools }}" when: - zfs_pools is defined @@ -26,8 +27,9 @@ - item.state == "present" - item.devices[0] not in zpool_devices.stdout -- name: creating mirror/zraid zpool(s) - ansible.builtin.command: "zpool create {{ '-o '+ item.options.items() |map('join', '=') | join (' -o ') if item.options is defined else '' }} {{ item.name }} {{ item.type }} {{ item.devices|join (' ') }}" +- name: Creating mirror/zraid zpool(s) + ansible.builtin.command: "zpool create {{ '-o '+ item.options.items() |map('join', '=') | join (' -o ') if item.options is defined else '' }} {{ item.name }} {{ + item.type }} {{ item.devices|join (' ') }}" with_items: "{{ zfs_pools }}" when: - zfs_pools is defined @@ -36,8 +38,8 @@ - item.state == "present" - item.devices[0] not in zpool_devices.stdout -- name: deleting zpool(s) with care - include_tasks: "./delete-pool.yml" +- name: Deleting zpool(s) with care + ansible.builtin.include_tasks: "./delete-pool.yml" when: - zfs_pools is defined - zpool.name in current_zp_state.stdout_lines diff --git a/roles/zsh/defaults/main.yml b/roles/zsh/defaults/main.yml index 2fe63ce..1e2a731 100644 --- a/roles/zsh/defaults/main.yml +++ b/roles/zsh/defaults/main.yml @@ -1,3 +1,4 @@ +--- zsh_home: "{{ '/root' if zsh_user == 'root' else '/home/' + zsh_user }}" zsh_base_config: "{{ zsh_home }}/.zshrc" zsh_config_path: "{{ zsh_home }}/.config/zsh" diff --git a/roles/zsh/tasks/main.yml b/roles/zsh/tasks/main.yml index 6539612..81e14c3 100644 --- a/roles/zsh/tasks/main.yml +++ b/roles/zsh/tasks/main.yml @@ -1,14 +1,13 @@ --- -- name: install zsh - package: +- name: Install zsh + ansible.builtin.package: name: zsh state: present -- name: install zsh plugins - include_tasks: plugins.yml - -- name: setup zsh for the user(s) - include_tasks: user-setup.yml +- name: Install zsh plugins + ansible.builtin.include_tasks: plugins.yml +- name: Setup zsh for the user(s) + ansible.builtin.include_tasks: user-setup.yml vars: zsh_user: "{{ item }}" loop: "{{ zsh_users | default([]) }}" diff --git a/roles/zsh/tasks/plugins.yml b/roles/zsh/tasks/plugins.yml index a510205..e653d87 100644 --- a/roles/zsh/tasks/plugins.yml +++ b/roles/zsh/tasks/plugins.yml @@ -1,5 +1,5 @@ --- -- name: ensure plugins directory exists +- name: Ensure plugins directory exists ansible.builtin.file: path: "{{ zsh_plugins_path }}" state: directory @@ -7,7 +7,7 @@ group: users mode: "0755" -- name: add a readme file to advice from where this comes +- name: Add a readme file to advice from where this comes ansible.builtin.copy: dest: "{{ zsh_plugins_path }}/README.md" content: | @@ -17,27 +17,18 @@ group: users owner: root -- name: "git clone plugins" - git: +- name: "Git clone plugins" + ansible.builtin.git: repo: "{{ item.repo }}" dest: "{{ item.dest }}" - update: yes + update: true version: master loop: - - { - repo: https://github.com/zsh-users/zsh-syntax-highlighting.git, - dest: "{{ zsh_plugins_path }}/zsh-syntax-highlighting", - } - - { - repo: https://github.com/zsh-users/zsh-autosuggestions.git, - dest: "{{ zsh_plugins_path }}/zsh-autosuggestions", - } - - { - repo: https://github.com/romkatv/powerlevel10k.git, - dest: "{{ zsh_plugins_path }}/powerlevel10k", - } + - { repo: https://github.com/zsh-users/zsh-syntax-highlighting.git, dest: "{{ zsh_plugins_path }}/zsh-syntax-highlighting" } + - { repo: https://github.com/zsh-users/zsh-autosuggestions.git, dest: "{{ zsh_plugins_path }}/zsh-autosuggestions" } + - { repo: https://github.com/romkatv/powerlevel10k.git, dest: "{{ zsh_plugins_path }}/powerlevel10k" } -- name: assert plugins are available for any user +- name: Assert plugins are available for any user ansible.builtin.file: path: "{{ item }}" owner: root diff --git a/roles/zsh/tasks/user-setup.yml b/roles/zsh/tasks/user-setup.yml index ad8c570..d5992ce 100644 --- a/roles/zsh/tasks/user-setup.yml +++ b/roles/zsh/tasks/user-setup.yml @@ -1,4 +1,5 @@ -- name: setup zsh base config +--- +- name: Setup zsh base config ansible.builtin.template: src: main.zshrc.j2 dest: "{{ zsh_base_config }}" @@ -6,7 +7,7 @@ group: "{{ zsh_user }}" mode: "0600" -- name: setup .config/zsh directory +- name: Setup .config/zsh directory ansible.builtin.file: path: "{{ zsh_config_path }}" state: directory @@ -14,7 +15,7 @@ group: "{{ zsh_user }}" mode: "0700" -- name: configure zsh config +- name: Configure zsh config ansible.builtin.template: src: zshrc.j2 dest: "{{ zsh_config_file }}" @@ -22,7 +23,7 @@ group: "{{ zsh_user }}" mode: "0600" -- name: copy aliases +- name: Copy aliases ansible.builtin.copy: src: ./templates/aliases dest: "{{ zsh_config_path }}/aliases" @@ -30,12 +31,12 @@ group: "{{ zsh_user }}" mode: "0600" -- name: change default shell to zsh - user: +- name: Change default shell to zsh + ansible.builtin.user: name: "{{ zsh_user }}" shell: /bin/zsh -- name: configure powerlevel10k theme +- name: Configure powerlevel10k theme ansible.builtin.copy: src: "./templates/{{ 'root.p10k.zsh' if zsh_user == 'root' else 'user.p10k.zsh' }}" dest: "{{ zsh_p10k_theme_config }}"