chore: ansible-lint
This commit is contained in:
parent
2c6da106b2
commit
2c421611ae
3
.ansible-lint
Normal file
3
.ansible-lint
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
skip_list:
|
||||
- var-naming[no-role-prefix]
|
||||
19
README.md
19
README.md
@ -1,13 +1,27 @@
|
||||
# Homelab Ansible Playbooks
|
||||
|
||||
This repository contains Ansible playbooks and roles I use to manage my NAS and several VMs 👨💻.
|
||||
This repository contains Ansible playbooks and roles I use to manage my NAS and some VMs 👨💻.
|
||||
|
||||
This project is designed for personal/familial scale maintenance, if you find this useful for your use, want to share advises or security concerns, feel free to drop me a line.
|
||||
|
||||
This is a good playground to learn and I encourage you to adapt these roles to your needs. While they might not be production-ready for all environments, I'm open to adapting them for [Ansible Galaxy]((https://galaxy.ansible.com)) if there's community interest!
|
||||
This is a good playground to learn and I encourage you to adapt these roles to your needs. While they might not be production-ready for all environments, I'm open to adapting them for [Ansible Galaxy](<(https://galaxy.ansible.com)>) if there's community interest!
|
||||
|
||||
## Requirements
|
||||
|
||||
Base tools:
|
||||
|
||||
```sh
|
||||
# linux
|
||||
apt-get install ansible ansible-lint ansible-galaxy
|
||||
pacman -Syu ansible ansible-lint ansible-galaxy
|
||||
# macos
|
||||
brew install ansible ansible-lint ansible-galaxy
|
||||
# windows
|
||||
choco install ansible ansible-lint ansible-galaxy
|
||||
```
|
||||
|
||||
Other roles:
|
||||
|
||||
```sh
|
||||
ansible-galaxy collection install -r requirements.yml
|
||||
```
|
||||
@ -36,5 +50,6 @@ ssh-copy-id -i ~/.ssh/id_rsa.pub username@remote_host
|
||||
Linting:
|
||||
|
||||
```sh
|
||||
ansible-lint
|
||||
npx prettier --write .
|
||||
```
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
[defaults]
|
||||
interpreter_python=/usr/bin/python3
|
||||
roles_path=./roles
|
||||
|
||||
[ssh_connection]
|
||||
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o ControlPath=/tmp/ansible-ssh-%h-%p-%r
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
# Network configuration
|
||||
# ---------------------
|
||||
network_interfaces:
|
||||
|
||||
20
playbook.yml
20
playbook.yml
@ -1,10 +1,16 @@
|
||||
- hosts: all
|
||||
---
|
||||
# - hosts: all
|
||||
# become: true
|
||||
# roles:
|
||||
# - role: networking
|
||||
# - role: sshd
|
||||
# - role: disks
|
||||
# - role: wireguard
|
||||
# - role: zsh
|
||||
# - role: archlinux
|
||||
# - role: podman
|
||||
|
||||
- hosts: pinwheel
|
||||
become: true
|
||||
roles:
|
||||
- role: networking
|
||||
- role: sshd
|
||||
- role: disks
|
||||
- role: wireguard
|
||||
- role: zsh
|
||||
- role: archlinux
|
||||
- role: podman
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
- hosts: marge
|
||||
become: true
|
||||
roles:
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
collections:
|
||||
- name: ansible.netcommon
|
||||
- name: community.general
|
||||
|
||||
7
roles/archlinux/README.md
Normal file
7
roles/archlinux/README.md
Normal file
@ -0,0 +1,7 @@
|
||||
Le classique quand on n'a pas mis à jour arch depuis un moment c'est d'avoir une clef PGP manquante.
|
||||
|
||||
```
|
||||
sudo pacman -Sy archlinux-keyring
|
||||
```
|
||||
|
||||
Avant l'update soigne les petits chats blessés.
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
arch_locale: en_US.UTF-8
|
||||
yay_src_path: /opt/yay
|
||||
yay_git_repo: https://aur.archlinux.org/yay.git
|
||||
|
||||
@ -1,15 +1,15 @@
|
||||
---
|
||||
- name: Configure locales
|
||||
block:
|
||||
- name: activate locale
|
||||
command:
|
||||
- name: Activate locale
|
||||
ansible.builtin.command:
|
||||
cmd: localectl set-locale LANG={{ arch_locale }}
|
||||
- name: edit /etc/locale.gen
|
||||
lineinfile:
|
||||
- name: Edit /etc/locale.gen
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/locale.gen
|
||||
state: present
|
||||
regexp: "{{ arch_locale }}"
|
||||
line: "{{ arch_locale }} UTF-8"
|
||||
- name: regenerate locales
|
||||
command:
|
||||
- name: Regenerate locales
|
||||
ansible.builtin.command:
|
||||
cmd: locale-gen
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
---
|
||||
- name: Skip Archlinux installation
|
||||
meta: end_play
|
||||
ansible.builtin.meta: end_play
|
||||
when: ansible_facts['os_family'] != 'Archlinux'
|
||||
|
||||
- name: Archlinux base setup
|
||||
include_tasks: "{{ item }}"
|
||||
ansible.builtin.include_tasks: "{{ item }}"
|
||||
loop:
|
||||
- pacman.yml
|
||||
- locales.yml
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Check if pacman is not locked
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /var/lib/pacman/db.lck
|
||||
register: pacman_lock
|
||||
failed_when: pacman_lock.stat.exists
|
||||
@ -13,32 +13,30 @@
|
||||
# state: absent
|
||||
|
||||
- name: Install reflector (looking for fastest mirror)
|
||||
pacman:
|
||||
community.general.pacman:
|
||||
name: reflector
|
||||
state: present
|
||||
|
||||
- name: Stat pacman mirrorlist
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /etc/pacman.d/mirrorlist
|
||||
register: mirrorlist
|
||||
|
||||
# Probably not here if it's a fresh install
|
||||
- name: Stat pacman mirrorlist.bak
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /etc/pacman.d/mirrorlist.bak
|
||||
register: mirrorlist_bak
|
||||
|
||||
- name: Backup and update pacman mirrorlist if older than 7 days
|
||||
shell: >
|
||||
ansible.builtin.shell: >
|
||||
cp /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.bak &&
|
||||
reflector --latest 20 --protocol https --sort rate
|
||||
--save /etc/pacman.d/mirrorlist
|
||||
when: mirrorlist_bak.stat.exists is false or
|
||||
(mirrorlist.stat.exists and
|
||||
(ansible_date_time.epoch | int - mirrorlist.stat.mtime) > 604800)
|
||||
when: mirrorlist_bak.stat.exists is false or (mirrorlist.stat.exists and (ansible_date_time.epoch | int - mirrorlist.stat.mtime) > 604800)
|
||||
|
||||
- name: Configure pacman to output colors
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/pacman.conf
|
||||
state: present
|
||||
regexp: "^(.*)Color"
|
||||
|
||||
@ -1,50 +1,10 @@
|
||||
---
|
||||
- name: Check if paru is already installed
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /usr/bin/paru
|
||||
register: paru
|
||||
|
||||
- name: Install paru
|
||||
block:
|
||||
- name: Get the last github release
|
||||
ansible.builtin.uri:
|
||||
url: "https://api.github.com/repos/{{ paru_git_repo }}/releases/latest"
|
||||
return_content: true
|
||||
register: paru_release
|
||||
|
||||
- name: Extract tag_name
|
||||
set_fact:
|
||||
paru_version: "{{ (paru_release.json.tag_name | regex_replace('^v', '')) }}"
|
||||
|
||||
- name: Get the binary URL ({{ os_arch }})
|
||||
set_fact:
|
||||
paru_url: "{{ item.browser_download_url }}"
|
||||
loop: "{{ paru_release.json.assets }}"
|
||||
when: "'{{ os_arch }}.tar.zst' in item.name"
|
||||
|
||||
- name: Download
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ paru_url }}"
|
||||
dest: "/tmp/paru-{{ os_arch }}.tar.zst"
|
||||
mode: "0644"
|
||||
|
||||
- name: Extract paru
|
||||
ansible.builtin.command:
|
||||
cmd: "tar -xf /tmp/paru-{{ os_arch }}.tar.zst paru -C /tmp"
|
||||
|
||||
- name: Install paru binary
|
||||
ansible.builtin.command:
|
||||
cmd: "mv /tmp/paru /usr/bin/paru"
|
||||
|
||||
- name: Ensure permissions
|
||||
ansible.builtin.file:
|
||||
path: /usr/bin/paru
|
||||
mode: "0755"
|
||||
|
||||
- name: Cleanup
|
||||
ansible.builtin.file:
|
||||
path: "/tmp/paru-{{ os_arch }}.tar.zst"
|
||||
state: absent
|
||||
when: not paru.stat.exists
|
||||
##
|
||||
## Deprecated version with compilation
|
||||
@ -103,3 +63,43 @@
|
||||
# line: "%wheel ALL=(ALL:ALL) ALL"
|
||||
# validate: /usr/sbin/visudo -cf %s
|
||||
# when: not paru.stat.exists
|
||||
block:
|
||||
- name: Get the last github release
|
||||
ansible.builtin.uri:
|
||||
url: "https://api.github.com/repos/{{ paru_git_repo }}/releases/latest"
|
||||
return_content: true
|
||||
register: paru_release
|
||||
|
||||
- name: Extract tag_name
|
||||
ansible.builtin.set_fact:
|
||||
paru_version: "{{ (paru_release.json.tag_name | regex_replace('^v', '')) }}"
|
||||
|
||||
- name: Get the binary URL ({{ os_arch }})
|
||||
ansible.builtin.set_fact:
|
||||
paru_url: "{{ item.browser_download_url }}"
|
||||
loop: "{{ paru_release.json.assets }}"
|
||||
when: "'os_arch.tar.zst' in item.name"
|
||||
|
||||
- name: Download
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ paru_url }}"
|
||||
dest: "/tmp/paru-{{ os_arch }}.tar.zst"
|
||||
mode: "0644"
|
||||
|
||||
- name: Extract paru
|
||||
ansible.builtin.command:
|
||||
cmd: "tar -xf /tmp/paru-{{ os_arch }}.tar.zst paru -C /tmp"
|
||||
|
||||
- name: Install paru binary
|
||||
ansible.builtin.command:
|
||||
cmd: "mv /tmp/paru /usr/bin/paru"
|
||||
|
||||
- name: Ensure permissions
|
||||
ansible.builtin.file:
|
||||
path: /usr/bin/paru
|
||||
mode: "0755"
|
||||
|
||||
- name: Cleanup
|
||||
ansible.builtin.file:
|
||||
path: "/tmp/paru-{{ os_arch }}.tar.zst"
|
||||
state: absent
|
||||
|
||||
@ -1,60 +1,60 @@
|
||||
---
|
||||
- name: Check if yay is already installed
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /usr/bin/yay
|
||||
register: yay
|
||||
|
||||
- name: Install yay
|
||||
when: not yay.stat.exists
|
||||
block:
|
||||
- name: Install build dependencies
|
||||
package:
|
||||
ansible.builtin.package:
|
||||
name:
|
||||
- base-devel
|
||||
- git
|
||||
state: present
|
||||
|
||||
- name: Disable sudo password prompt (makepkg sudoers hack)
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/sudoers
|
||||
state: present
|
||||
regexp: "^#?%wheel"
|
||||
line: "%wheel ALL=(ALL) NOPASSWD: ALL"
|
||||
validate: /usr/sbin/visudo -cf %s
|
||||
|
||||
- command:
|
||||
- ansible.builtin.command:
|
||||
cmd: whoami
|
||||
no_log: true
|
||||
become: false
|
||||
register: main_user
|
||||
|
||||
- set_fact:
|
||||
- ansible.builtin.set_fact:
|
||||
main_user: "{{ main_user.stdout }}"
|
||||
no_log: true
|
||||
|
||||
- name: Create yay sources dir
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: "{{ yay_src_path }}"
|
||||
state: directory
|
||||
owner: "{{ main_user }}"
|
||||
|
||||
- name: Clone git sources
|
||||
become: false
|
||||
git:
|
||||
ansible.builtin.git:
|
||||
repo: "{{ yay_git_repo }}"
|
||||
dest: "{{ yay_src_path }}"
|
||||
|
||||
# note: this only works because SUDOERS password prompt is disabled
|
||||
- name: Build and install
|
||||
become: false
|
||||
command:
|
||||
ansible.builtin.command:
|
||||
chdir: "{{ yay_src_path }}"
|
||||
cmd: "makepkg -si -f --noconfirm"
|
||||
|
||||
- name: Restore sudo with password prompt
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/sudoers
|
||||
state: present
|
||||
regexp: "^#?%wheel"
|
||||
line: "%wheel ALL=(ALL:ALL) ALL"
|
||||
validate: /usr/sbin/visudo -cf %s
|
||||
when: not yay.stat.exists
|
||||
|
||||
@ -1 +1,2 @@
|
||||
---
|
||||
ssd_trim_periodicity: monthly
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
---
|
||||
- name: Ensure disks are formatted correctly
|
||||
include_tasks: partitioning.yml
|
||||
ansible.builtin.include_tasks: partitioning.yml
|
||||
loop: "{{ disk_partitioning | default([]) }}"
|
||||
|
||||
- name: Enable trim SSD if there is at least one
|
||||
include_tasks: trim-ssd.yml
|
||||
ansible.builtin.include_tasks: trim-ssd.yml
|
||||
|
||||
@ -1,23 +1,23 @@
|
||||
---
|
||||
- name: Install sfdisk
|
||||
package:
|
||||
ansible.builtin.package:
|
||||
name: util-linux
|
||||
state: present
|
||||
changed_when: false
|
||||
|
||||
- name: Install hdparm
|
||||
package:
|
||||
ansible.builtin.package:
|
||||
name: hdparm
|
||||
state: present
|
||||
changed_when: false
|
||||
|
||||
- name: Load expected layout from file (controller side)
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
expected_layout: "{{ lookup('file', item.layout_file) }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Get current layout from remote
|
||||
command: "sfdisk --dump {{ item.device }}"
|
||||
ansible.builtin.command: "sfdisk --dump {{ item.device }}"
|
||||
register: current_layout
|
||||
changed_when: false
|
||||
|
||||
@ -25,18 +25,18 @@
|
||||
vars:
|
||||
current_clean: "{{ current_layout.stdout | trim | regex_replace('\\s+', ' ') }}"
|
||||
expected_clean: "{{ expected_layout | trim | regex_replace('\\s+', ' ') }}"
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
layout_differs: "{{ current_clean != expected_clean }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Copy layout file to remote (only if different)
|
||||
copy:
|
||||
ansible.builtin.copy:
|
||||
content: "{{ expected_layout }}"
|
||||
dest: "/tmp/expected-{{ item.device | basename }}.sfdisk"
|
||||
mode: "0644"
|
||||
when: layout_differs
|
||||
|
||||
- name: Apply partition table using sfdisk
|
||||
command: >
|
||||
ansible.builtin.command: >
|
||||
sfdisk {{ item.device }} < {{ item.layout_file }}
|
||||
when: layout_differs
|
||||
|
||||
@ -1,22 +1,22 @@
|
||||
---
|
||||
# see: https://wiki.archlinux.org/title/Solid_state_drive#Periodic_TRIM
|
||||
- name: Check if there is at least one SSD
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
has_at_least_one_ssd: "{{ ansible_facts.devices | dict2items | selectattr('value.rotational', 'equalto', '0') | list | length > 0 }}"
|
||||
changed_when: false
|
||||
|
||||
- name: Skip trim role
|
||||
meta: end_play
|
||||
ansible.builtin.meta: end_play
|
||||
when: not has_at_least_one_ssd
|
||||
|
||||
- name: install trim tools
|
||||
package:
|
||||
- name: Install trim tools
|
||||
ansible.builtin.package:
|
||||
name: util-linux
|
||||
state: present
|
||||
changed_when: false
|
||||
|
||||
- name: edit trim periodicity if needed
|
||||
template:
|
||||
- name: Edit trim periodicity if needed
|
||||
ansible.builtin.template:
|
||||
src: templates/fstrim.timer.j2
|
||||
dest: "/etc/systemd/system/fstrim.timer.d/override.conf"
|
||||
owner: root
|
||||
@ -24,20 +24,20 @@
|
||||
mode: "0644"
|
||||
register: timer_config
|
||||
|
||||
- name: systemd daemon reload
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
- name: Systemd daemon reload
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
when: timer_config.changed
|
||||
|
||||
- name: enable periodic trim
|
||||
systemd:
|
||||
- name: Enable periodic trim
|
||||
ansible.builtin.systemd:
|
||||
name: fstrim.timer
|
||||
enabled: yes
|
||||
enabled: true
|
||||
state: started
|
||||
changed_when: false
|
||||
|
||||
- name: install nvme-cli
|
||||
package:
|
||||
- name: Install nvme-cli
|
||||
ansible.builtin.package:
|
||||
name: nvme-cli
|
||||
state: present
|
||||
changed_when: false
|
||||
|
||||
@ -1 +1,2 @@
|
||||
---
|
||||
docker_projects_dir: /opt/docker
|
||||
|
||||
@ -5,49 +5,49 @@
|
||||
# Archlinux: only if your target is meant to frequently build docker images
|
||||
# see: https://stackoverflow.com/a/78352698
|
||||
|
||||
- name: uninstall docker
|
||||
block:
|
||||
- name: Include uninstall tasks
|
||||
include_tasks: uninstall.yml
|
||||
- name: Skip docker installation
|
||||
meta: end_play
|
||||
- name: Uninstall docker
|
||||
when: uninstall_docker | lower in ['yes', 'y']
|
||||
|
||||
- name: install docker
|
||||
package:
|
||||
block:
|
||||
- name: Include uninstall tasks
|
||||
ansible.builtin.include_tasks: uninstall.yml
|
||||
- name: Skip docker installation
|
||||
ansible.builtin.meta: end_play
|
||||
- name: Install docker
|
||||
ansible.builtin.package:
|
||||
name: docker
|
||||
|
||||
- name: enable the service
|
||||
service:
|
||||
- name: Enable the service
|
||||
ansible.builtin.service:
|
||||
name: "docker"
|
||||
enabled: true
|
||||
state: started
|
||||
|
||||
- command:
|
||||
- ansible.builtin.command:
|
||||
cmd: whoami
|
||||
no_log: true
|
||||
become: false
|
||||
register: main_user
|
||||
|
||||
- set_fact:
|
||||
- ansible.builtin.set_fact:
|
||||
main_user: "{{ main_user.stdout }}"
|
||||
no_log: true
|
||||
|
||||
- name: create projects directory
|
||||
file:
|
||||
- name: Create projects directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_projects_dir }}"
|
||||
state: directory
|
||||
owner: "{{ main_user }}"
|
||||
group: "{{ main_user }}"
|
||||
|
||||
- name: allow user to use docker
|
||||
user:
|
||||
- name: Allow user to use docker
|
||||
ansible.builtin.user:
|
||||
name: "{{ main_user }}"
|
||||
groups: docker
|
||||
append: yes
|
||||
append: true
|
||||
register: docker_group
|
||||
|
||||
- name: inform the user that user needs to logout and login again
|
||||
debug:
|
||||
- name: Inform the user that user needs to logout and login again
|
||||
ansible.builtin.debug:
|
||||
msg: "Please logout and login again to make sure the user is added to the docker group"
|
||||
when: docker_group.changed
|
||||
|
||||
@ -1,17 +1,17 @@
|
||||
---
|
||||
- name: uninstall docker
|
||||
package:
|
||||
- name: Uninstall docker
|
||||
ansible.builtin.package:
|
||||
name: docker
|
||||
state: absent
|
||||
|
||||
- name: prompt the user for confirmation
|
||||
- name: Prompt the user for confirmation
|
||||
ansible.builtin.pause:
|
||||
prompt: "[IRREVERSIBLE] Are you sure you want to delete {{ docker_projects_dir }}?"
|
||||
echo: yes
|
||||
echo: true
|
||||
register: confirmation
|
||||
|
||||
- name: remote projects directory
|
||||
file:
|
||||
- name: Remote projects directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ docker_projects_dir }}"
|
||||
state: absent
|
||||
owner: "{{ main_user }}"
|
||||
|
||||
@ -1,2 +1,3 @@
|
||||
---
|
||||
fail2ban_firewall: ufw
|
||||
fail2ban_backend: systemd
|
||||
|
||||
@ -1,27 +1,27 @@
|
||||
---
|
||||
# see: https://wiki.archlinux.org/title/Fail2ban
|
||||
- name: Install fail2ban
|
||||
package:
|
||||
ansible.builtin.package:
|
||||
name: fail2ban
|
||||
state: present
|
||||
|
||||
- name: Ensure fail2ban configuration is only owned by root
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /etc/fail2ban
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0700
|
||||
recurse: yes
|
||||
mode: "0700"
|
||||
recurse: true
|
||||
|
||||
- name: Install Fail2ban Config
|
||||
block:
|
||||
- name: General configuration
|
||||
template:
|
||||
ansible.builtin.template:
|
||||
src: jail.local.j2
|
||||
dest: /etc/fail2ban/jail.local
|
||||
mode: "0600"
|
||||
- name: Service custom jail
|
||||
template:
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
mode: "0600"
|
||||
@ -32,28 +32,28 @@
|
||||
- name: Service hardening (read-only root rights)
|
||||
block:
|
||||
- name: Check if hardening configuration is already applied
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /etc/systemd/system/fail2ban.service.d/override.conf
|
||||
register: override_conf
|
||||
- name: Create configuration directory
|
||||
file:
|
||||
ansible.builtin.file:
|
||||
path: /etc/systemd/system/fail2ban.service.d
|
||||
state: directory
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0700
|
||||
mode: "0700"
|
||||
- name: Apply hardening configuration
|
||||
template:
|
||||
ansible.builtin.template:
|
||||
src: hardened.fail2ban.conf.j2
|
||||
dest: /etc/systemd/system/fail2ban.service.d/override.conf
|
||||
when: not override_conf.stat.exists
|
||||
- name: Reload systemd
|
||||
systemd:
|
||||
daemon_reload: yes
|
||||
ansible.builtin.systemd:
|
||||
daemon_reload: true
|
||||
when: not override_conf.stat.exists
|
||||
|
||||
- name: Start and enable fail2ban
|
||||
service:
|
||||
ansible.builtin.service:
|
||||
name: fail2ban
|
||||
state: started
|
||||
enabled: yes
|
||||
enabled: true
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
- name: install oryx
|
||||
cmd: paru -S oryx
|
||||
when: ansible_facts['os_family'] == 'Archlinux'
|
||||
|
||||
@ -1,19 +1,19 @@
|
||||
---
|
||||
- name: Check if the interface ipv4 address is defined
|
||||
block:
|
||||
- debug:
|
||||
msg: "Warning: iface {{ interface.name }} has no defined ipv4 address, skipping configuration"
|
||||
- name: Skip net-config role for {{ interface.name }}
|
||||
meta: end_play
|
||||
when: interface.ipv4.address is not defined
|
||||
|
||||
block:
|
||||
- ansible.builtin.debug:
|
||||
msg: "Warning: iface {{ interface.name }} has no defined ipv4 address, skipping configuration"
|
||||
- name: Skip net-config role for {{ interface.name }}
|
||||
ansible.builtin.meta: end_play
|
||||
- name: Check if the interface is already configured
|
||||
stat:
|
||||
ansible.builtin.stat:
|
||||
path: /etc/systemd/network/20-{{ interface.name }}.network
|
||||
register: network_file
|
||||
|
||||
- name: What patch is needed
|
||||
debug:
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{%- if network_file.stat.exists == true -%}
|
||||
iface {{ interface.name }} is already configured, no action needed.
|
||||
@ -23,7 +23,7 @@
|
||||
|
||||
- name: Create systemd-network link file
|
||||
when: network_file.stat.exists != true
|
||||
template:
|
||||
ansible.builtin.template:
|
||||
src: systemd.network.j2
|
||||
dest: /etc/systemd/network/20-{{ interface.name }}.network
|
||||
owner: root
|
||||
@ -31,6 +31,6 @@
|
||||
mode: "0644"
|
||||
|
||||
- name: Notify a reload is required
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
network_reload_required: true
|
||||
when: network_file.stat.exists != true
|
||||
|
||||
@ -1,17 +1,11 @@
|
||||
---
|
||||
- name: "Check {{ interface.name }} ({{ interface.mac_address }}) rule"
|
||||
set_fact:
|
||||
interface_original_name: "{{ ansible_facts.interfaces
|
||||
| select('in', ansible_facts)
|
||||
| map('extract', ansible_facts)
|
||||
| selectattr('pciid', 'defined')
|
||||
| selectattr('macaddress', 'equalto', interface.mac_address)
|
||||
| map(attribute='device')
|
||||
| first
|
||||
}}"
|
||||
ansible.builtin.set_fact:
|
||||
interface_original_name: "{{ ansible_facts.interfaces | select('in', ansible_facts) | map('extract', ansible_facts) | selectattr('pciid', 'defined') | selectattr('macaddress',
|
||||
'equalto', interface.mac_address) | map(attribute='device') | first }}"
|
||||
|
||||
- name: What patch is needed
|
||||
debug:
|
||||
ansible.builtin.debug:
|
||||
msg: >-
|
||||
{%- if interface_original_name != interface.name -%}
|
||||
iface {{ interface_original_name }} ({{ interface.mac_address }}) will be patched to {{ interface.name }}.
|
||||
@ -21,7 +15,7 @@
|
||||
|
||||
- name: Create persistent-net link file
|
||||
when: interface_original_name != interface.name
|
||||
template:
|
||||
ansible.builtin.template:
|
||||
src: persistent-net.link.j2
|
||||
dest: /etc/systemd/network/10-persistent-net-{{ interface.name }}.link
|
||||
owner: root
|
||||
@ -29,6 +23,6 @@
|
||||
mode: "0644"
|
||||
|
||||
- name: Notify a reboot is required
|
||||
set_fact:
|
||||
ansible.builtin.set_fact:
|
||||
reboot_required: true
|
||||
when: interface_original_name != interface.name
|
||||
|
||||
@ -1,25 +1,25 @@
|
||||
---
|
||||
- name: Setup persistent network interface(s)
|
||||
include_role:
|
||||
ansible.builtin.include_role:
|
||||
name: net-persist
|
||||
public: yes
|
||||
public: true
|
||||
vars:
|
||||
interface: "{{ item }}"
|
||||
loop: "{{ hostvars[inventory_hostname].network_interfaces | default([]) }}"
|
||||
|
||||
- name: Configure network interface(s)
|
||||
include_role:
|
||||
ansible.builtin.include_role:
|
||||
name: net-config
|
||||
public: yes
|
||||
public: true
|
||||
vars:
|
||||
interface: "{{ item }}"
|
||||
loop: "{{ hostvars[inventory_hostname].network_interfaces | default([]) }}"
|
||||
|
||||
- name: Reload networkd and resolved
|
||||
systemd:
|
||||
ansible.builtin.systemd:
|
||||
name: "{{ item }}"
|
||||
state: reloaded
|
||||
daemon_reload: yes
|
||||
daemon_reload: true
|
||||
loop:
|
||||
- systemd-networkd
|
||||
- systemd-resolved
|
||||
|
||||
@ -8,7 +8,6 @@
|
||||
# - host: "192.168.1.0/24" # readonly access for other lan clients
|
||||
# options: "ro,sync,no_subtree_check"
|
||||
nfs_shares: []
|
||||
|
||||
nfs_configuration_file: "/etc/nfs.conf"
|
||||
|
||||
nfs_exports_file: "/etc/exports"
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
ansible.builtin.systemd:
|
||||
name: "nfsv4-server"
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
daemon_reload: true
|
||||
|
||||
- name: "Update exportfs"
|
||||
ansible.builtin.command: exportfs -ra
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
---
|
||||
- name: install nfs-server
|
||||
package:
|
||||
- name: Install nfs-server
|
||||
ansible.builtin.package:
|
||||
name: "{{ (ansible_facts['os_family'] == 'Archlinux') | ternary('nfs-utils', 'nfs-kernel-server') }}"
|
||||
state: present
|
||||
|
||||
- name: configure nfs configuration
|
||||
- name: Configure nfs configuration
|
||||
ansible.builtin.template:
|
||||
src: templates/nfs.conf.j2
|
||||
dest: "{{ nfs_configuration_file }}"
|
||||
@ -13,7 +13,7 @@
|
||||
mode: "0644"
|
||||
notify: Reload systemd and restart nfs-server
|
||||
|
||||
- name: configure nfs-server exports
|
||||
- name: Configure nfs-server exports
|
||||
ansible.builtin.template:
|
||||
src: templates/exports.j2
|
||||
dest: "{{ nfs_exports_file }}"
|
||||
@ -22,13 +22,13 @@
|
||||
mode: "0644"
|
||||
notify: Update exportfs
|
||||
|
||||
- name: systemd service for nfs-server is started and enabled
|
||||
- name: Systemd service for nfs-server is started and enabled
|
||||
ansible.builtin.systemd:
|
||||
name: nfsv4-server
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: setup firewall rules for nfs on port
|
||||
- name: Setup firewall rules for nfs on port
|
||||
community.general.ufw:
|
||||
rule: allow
|
||||
src: "{{ item }}"
|
||||
|
||||
@ -1,11 +1,9 @@
|
||||
---
|
||||
# NTP configuration file
|
||||
ntp_config_file: "/etc/ntp.conf"
|
||||
|
||||
# NTP servers to use.
|
||||
ntp_pools: -" 0.uk.pool.ntp.org"
|
||||
-" 1.uk.pool.ntp.org"
|
||||
-" 2.uk.pool.ntp.org"
|
||||
-" 3.uk.pool.ntp.org"
|
||||
ntp_pools: -" 0.uk.pool.ntp.org" -" 1.uk.pool.ntp.org" -" 2.uk.pool.ntp.org" -" 3.uk.pool.ntp.org"
|
||||
|
||||
# System timezone
|
||||
ntp_timezone: "Europe/London"
|
||||
|
||||
@ -3,4 +3,4 @@
|
||||
ansible.builtin.systemd:
|
||||
name: ntpd
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
daemon_reload: true
|
||||
|
||||
@ -1,16 +1,16 @@
|
||||
---
|
||||
- name: install NTP package
|
||||
package:
|
||||
- name: Install NTP package
|
||||
ansible.builtin.package:
|
||||
name: "ntp"
|
||||
state: present
|
||||
update_cache: yes
|
||||
update_cache: true
|
||||
|
||||
- name: set system timezone to {{ ntp_timezone }}"
|
||||
- name: Set system timezone to {{ ntp_timezone }}"
|
||||
community.general.timezone:
|
||||
name: "{{ ntp_timezone }}"
|
||||
notify: "Restart ntpd service"
|
||||
|
||||
- name: ensure NTP drift file directory exists
|
||||
- name: Ensure NTP drift file directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ ntp_drift_file | dirname }}"
|
||||
state: directory
|
||||
@ -18,12 +18,12 @@
|
||||
group: "ntp"
|
||||
mode: "0750"
|
||||
|
||||
- name: setup systems timezone
|
||||
- name: Setup systems timezone
|
||||
community.general.timezone:
|
||||
name: "{{ ntp_timezone }}"
|
||||
notify: Restart chronyd # Redémarrer chrony peut être utile après un changement de TZ pour qu'il la prenne bien en compte dans ses logs/opérations
|
||||
|
||||
- name: "configure {{ ntp_config_file }}"
|
||||
- name: "Configure {{ ntp_config_file }}"
|
||||
ansible.builtin.template:
|
||||
src: "ntp.conf.j2"
|
||||
dest: "{{ ntp_config_file }}"
|
||||
@ -32,13 +32,13 @@
|
||||
mode: "0644"
|
||||
notify: "Restart ntpd service"
|
||||
|
||||
- name: "ensure ntpd service is started and enabled"
|
||||
- name: "Ensure ntpd service is started and enabled"
|
||||
ansible.builtin.systemd:
|
||||
name: "ntpd"
|
||||
state: started
|
||||
enabled: true
|
||||
|
||||
- name: "configure ufw firewall"
|
||||
- name: "Configure ufw firewall"
|
||||
community.general.ufw:
|
||||
rule: allow
|
||||
port: "{{ ntp_port }}"
|
||||
|
||||
14
roles/sshd/README.md
Normal file
14
roles/sshd/README.md
Normal file
@ -0,0 +1,14 @@
|
||||
# SSH server
|
||||
|
||||
## Enable authorized_keys fallback
|
||||
|
||||
When you encrypt your home data, you cannot allow hardened remote SSH connection.
|
||||
To make this still possible, here is the trick: a fallback authorized_key file: /etc/ssh/authorized_keys/myuser
|
||||
|
||||
Simply enable this setting to get this working:
|
||||
|
||||
```
|
||||
ssh_authorized_keys_fallback_enabled: true
|
||||
```
|
||||
|
||||
And you're set.
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
ssh_port: 22
|
||||
ssh_allowed_network: "192.168.1.0/24"
|
||||
ssh_allowed_vpn_network: "192.168.27.0/27"
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
- name: Enable SSH
|
||||
service:
|
||||
name: "{{ ssh_service_name }}"
|
||||
enabled: yes
|
||||
enabled: true
|
||||
|
||||
- name: Allow local network incoming connection
|
||||
ufw:
|
||||
|
||||
@ -1,2 +1,3 @@
|
||||
---
|
||||
ssh_package_name: "openssh"
|
||||
ssh_service_name: "sshd"
|
||||
|
||||
@ -1,2 +1,3 @@
|
||||
---
|
||||
ssh_package_name: "openssh-server"
|
||||
ssh_service_name: "ssh"
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
unbound_config_base_path: /etc/unbound
|
||||
unbound_config_path: "{{ unbound_config_base_path }}/unbound.conf"
|
||||
unbound_root_hints_path: "{{ unbound_config_base_path }}/root.hints"
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
ansible.builtin.systemd:
|
||||
name: unbound
|
||||
state: restarted
|
||||
daemon_reload: yes
|
||||
daemon_reload: true
|
||||
|
||||
- name: Reload AppArmor profile
|
||||
ansible.builtin.command: apparmor_parser -r {{ unbound_apparmor_profile_path }}
|
||||
|
||||
@ -1,19 +1,19 @@
|
||||
---
|
||||
# see: https://calomel.org/unbound_dns.html
|
||||
# see: https://wiki.archlinux.org/title/Unbound
|
||||
- name: install unbound
|
||||
package:
|
||||
- name: Install unbound
|
||||
ansible.builtin.package:
|
||||
name: unbound
|
||||
state: present
|
||||
|
||||
# Note: on archlinux this is already shipped within unbound
|
||||
- name: install unbound-anchor on debian/ubuntu
|
||||
package:
|
||||
- name: Install unbound-anchor on debian/ubuntu
|
||||
ansible.builtin.package:
|
||||
name: unbound-anchor
|
||||
state: present
|
||||
when: ansible_facts['os_family'] == 'Debian'
|
||||
|
||||
- name: ensure unbound configuration is owned by unbound
|
||||
- name: Ensure unbound configuration is owned by unbound
|
||||
ansible.builtin.shell: |
|
||||
find "{{ unbound_config_base_path }}" -type d -exec chmod 755 {} \;
|
||||
find "{{ unbound_config_base_path }}" -type f -exec chmod 644 {} \;
|
||||
@ -21,7 +21,7 @@
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: ensure apparmor profile for unbound exists
|
||||
- name: Ensure apparmor profile for unbound exists
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/apparmor.d/usr.sbin.unbound
|
||||
content: |
|
||||
@ -34,59 +34,59 @@
|
||||
notify:
|
||||
- Reload AppArmor profile
|
||||
|
||||
- name: check if root.hints exists
|
||||
stat:
|
||||
- name: Check if root.hints exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ unbound_root_hints_path }}"
|
||||
register: root_hints
|
||||
|
||||
- name: update root.hints (if older than 6 months or missing)
|
||||
- name: Update root.hints (if older than 6 months or missing)
|
||||
when: >
|
||||
(not root_hints.stat.exists) or
|
||||
(ansible_date_time.epoch | int - root_hints.stat.mtime > 15552000)
|
||||
|
||||
block:
|
||||
- name: download latest root hints from internic
|
||||
- name: Download latest root hints from internic
|
||||
ansible.builtin.get_url:
|
||||
url: https://www.internic.net/domain/named.root
|
||||
dest: "{{ unbound_root_hints_path }}"
|
||||
owner: unbound
|
||||
group: unbound
|
||||
mode: "0644"
|
||||
when: >
|
||||
(not root_hints.stat.exists) or
|
||||
(ansible_date_time.epoch | int - root_hints.stat.mtime > 15552000)
|
||||
|
||||
- name: check if unbound ad_servers configuration exists
|
||||
stat:
|
||||
- name: Check if unbound ad_servers configuration exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ unbound_ad_servers_config_path }}"
|
||||
register: ad_servers
|
||||
|
||||
- name: update the ad_servers list if older than 2 weeks or missing
|
||||
- name: Update the ad_servers list if older than 2 weeks or missing
|
||||
when: >
|
||||
(not ad_servers.stat.exists) or
|
||||
(ansible_date_time.epoch | int - ad_servers.stat.mtime > 1209600)
|
||||
|
||||
block:
|
||||
- name: download stevenblack's hosts file
|
||||
- name: Download stevenblack's hosts file
|
||||
ansible.builtin.get_url:
|
||||
url: https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts
|
||||
dest: /tmp/hosts.txt
|
||||
mode: "0644"
|
||||
|
||||
- name: convert hosts file to unbound format
|
||||
- name: Convert hosts file to unbound format
|
||||
ansible.builtin.shell: |
|
||||
grep '^0\.0\.0\.0' /tmp/hosts.txt | awk '{print "local-zone: \""$2"\" always_nxdomain"}' > "{{ unbound_ad_servers_config_path }}" &&
|
||||
chown unbound:unbound "{{ unbound_ad_servers_config_path }}"
|
||||
args:
|
||||
executable: /bin/bash
|
||||
|
||||
- name: clean up temporary file
|
||||
- name: Clean up temporary file
|
||||
ansible.builtin.file:
|
||||
path: /tmp/hosts.txt
|
||||
state: absent
|
||||
when: >
|
||||
(not ad_servers.stat.exists) or
|
||||
(ansible_date_time.epoch | int - ad_servers.stat.mtime > 1209600)
|
||||
|
||||
- name: initialize dnssec trust anchor if missing
|
||||
- name: Initialize dnssec trust anchor if missing
|
||||
ansible.builtin.command: unbound-anchor -a {{ unbound_anchor_root_key }}
|
||||
args:
|
||||
creates: "{{ unbound_anchor_root_key }}"
|
||||
|
||||
- name: install unbound config
|
||||
template:
|
||||
- name: Install unbound config
|
||||
ansible.builtin.template:
|
||||
src: "{{ item.src }}"
|
||||
dest: "{{ item.dest }}"
|
||||
owner: unbound
|
||||
@ -99,14 +99,14 @@
|
||||
- Check Unbound config syntax
|
||||
- Reload systemd and restart unbound
|
||||
|
||||
- name: make sure unbound starts after wg-quick@wg0
|
||||
- name: Make sure unbound starts after wg-quick@wg0
|
||||
block:
|
||||
- name: ensure unbound.service.d directory exists
|
||||
- name: Ensure unbound.service.d directory exists
|
||||
ansible.builtin.file:
|
||||
path: /etc/systemd/system/unbound.service.d
|
||||
state: directory
|
||||
mode: "0755"
|
||||
- name: configure unbound systemd service
|
||||
- name: Configure unbound systemd service
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/systemd/system/unbound.service.d/override.conf
|
||||
content: |
|
||||
@ -115,13 +115,13 @@
|
||||
Requires=wg-quick@wg0.service
|
||||
notify: Reload systemd and restart unbound
|
||||
|
||||
- name: enables unbound service
|
||||
- name: Enables unbound service
|
||||
ansible.builtin.service:
|
||||
name: unbound
|
||||
enabled: yes
|
||||
enabled: true
|
||||
state: started
|
||||
|
||||
- name: firewall ufw rules for unbound
|
||||
- name: Firewall ufw rules for unbound
|
||||
community.general.ufw:
|
||||
rule: allow
|
||||
port: "{{ unbound_port }}"
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
wireguard_primary_interface: "{{ network_interfaces.0.name }}"
|
||||
wireguard_port: 51820 # static port to receive input connections
|
||||
wireguard_server_mode: true # enables NAT and open port
|
||||
|
||||
@ -1,58 +1,59 @@
|
||||
- name: install wireguard
|
||||
package:
|
||||
---
|
||||
- name: Install wireguard
|
||||
ansible.builtin.package:
|
||||
name: "{{ (ansible_facts['os_family'] == 'Archlinux') | ternary('wireguard-tools', 'wireguard') }}"
|
||||
state: present
|
||||
|
||||
# to support "DNS=" if used in a "client way"
|
||||
- name: install openresolv/resolveconf
|
||||
package:
|
||||
- name: Install openresolv/resolveconf
|
||||
ansible.builtin.package:
|
||||
name: "{{ (ansible_facts['os_family'] == 'Archlinux') | ternary('openresolv', 'resolvconf') }}"
|
||||
state: present
|
||||
|
||||
- name: ensure wireguard configuration is only owned by root
|
||||
file:
|
||||
- name: Ensure wireguard configuration is only owned by root
|
||||
ansible.builtin.file:
|
||||
path: "{{ wireguard_config_base_path }}"
|
||||
owner: root
|
||||
group: root
|
||||
mode: 0700
|
||||
recurse: yes
|
||||
mode: "0700"
|
||||
recurse: true
|
||||
|
||||
- name: check if private key exists
|
||||
stat:
|
||||
- name: Check if private key exists
|
||||
ansible.builtin.stat:
|
||||
path: "{{ wireguard_config_base_path }}/privatekey"
|
||||
register: pkey_file
|
||||
|
||||
- name: generate wireguard keys if not present
|
||||
shell: wg genkey | tee {{ wireguard_config_base_path }}/privatekey | wg pubkey > {{ wireguard_config_base_path }}/publickey
|
||||
- name: Generate wireguard keys if not present
|
||||
ansible.builtin.shell: wg genkey | tee {{ wireguard_config_base_path }}/privatekey | wg pubkey > {{ wireguard_config_base_path }}/publickey
|
||||
when: not pkey_file.stat.exists
|
||||
|
||||
- name: retrieve wireguard private key from file
|
||||
slurp:
|
||||
- name: Retrieve wireguard private key from file
|
||||
ansible.builtin.slurp:
|
||||
src: "{{ wireguard_config_base_path }}/privatekey"
|
||||
register: private_key
|
||||
|
||||
- name: set wireguard private key
|
||||
set_fact:
|
||||
- name: Set wireguard private key
|
||||
ansible.builtin.set_fact:
|
||||
wireguard_private_key: "{{ private_key['content'] | b64decode }}"
|
||||
|
||||
- name: disable "dns=" instruction if unbound is used to avoid race conditions at startup
|
||||
set_fact:
|
||||
- name: Disable "dns=" instruction if unbound is used to avoid race conditions at startup
|
||||
ansible.builtin.set_fact:
|
||||
wireguard_dns:
|
||||
when: unbound_custom_lan_records is defined
|
||||
|
||||
- name: install wireguard config
|
||||
template:
|
||||
- name: Install wireguard config
|
||||
ansible.builtin.template:
|
||||
src: wireguard.conf.j2
|
||||
dest: /etc/wireguard/{{ wireguard_interface }}.conf
|
||||
|
||||
- name: start and enable service
|
||||
service:
|
||||
- name: Start and enable service
|
||||
ansible.builtin.service:
|
||||
name: wg-quick@{{ wireguard_interface }}
|
||||
state: started
|
||||
enabled: yes
|
||||
daemon_reload: yes
|
||||
enabled: true
|
||||
daemon_reload: true
|
||||
|
||||
- name: configure the firewall for wireguard
|
||||
- name: Configure the firewall for wireguard
|
||||
community.general.ufw:
|
||||
rule: allow
|
||||
port: "{{ wireguard_port }}"
|
||||
|
||||
@ -1,14 +1,14 @@
|
||||
---
|
||||
# due to Ansible limitations, we cannot loop over a block, so we loop over a distinct tasks file...
|
||||
# @see https://stackoverflow.com/a/58911694
|
||||
- name: set ownership on dataset mountpoint
|
||||
- name: Set ownership on dataset mountpoint
|
||||
block:
|
||||
- name: get the mountpoint
|
||||
ansible.builtin.shell: "zfs get -H -o value mountpoint {{ dataset.name }}"
|
||||
- name: Get the mountpoint
|
||||
ansible.builtin.command: "zfs get -H -o value mountpoint {{ dataset.name }}"
|
||||
register: mountpoint
|
||||
changed_when: false
|
||||
- name: set owner of mountpoints
|
||||
file:
|
||||
- name: Set owner of mountpoints
|
||||
ansible.builtin.file:
|
||||
path: "{{ mountpoint.stdout }}"
|
||||
owner: "{{ dataset.user | default(main_user) }}"
|
||||
group: "{{ dataset.group | default(main_user) }}"
|
||||
|
||||
@ -1,25 +1,25 @@
|
||||
---
|
||||
# see: https://docs.ansible.com/ansible/latest/collections/community/general/zfs_module.html
|
||||
- name: managing filesystems, volumes, snapshots
|
||||
zfs:
|
||||
- name: Managing filesystems, volumes, snapshots
|
||||
community.general.zfs:
|
||||
name: "{{ item.name }}"
|
||||
state: "{{ item.state }}"
|
||||
extra_zfs_properties: "{{ item.extra_zfs_properties | default(omit) }}"
|
||||
origin: "{{ item.origin | default(omit) }}"
|
||||
with_items: "{{ zfs_datasets }}"
|
||||
|
||||
- command:
|
||||
- ansible.builtin.command:
|
||||
cmd: whoami
|
||||
no_log: true
|
||||
become: false
|
||||
register: main_user
|
||||
|
||||
- set_fact:
|
||||
- ansible.builtin.set_fact:
|
||||
main_user: "{{ main_user.stdout }}"
|
||||
no_log: true
|
||||
|
||||
- name: set dataset ownership
|
||||
include_tasks: "./dataset-ownership.yml"
|
||||
- name: Set dataset ownership
|
||||
ansible.builtin.include_tasks: "./dataset-ownership.yml"
|
||||
loop: "{{ zfs_datasets }}"
|
||||
loop_control:
|
||||
loop_var: dataset
|
||||
|
||||
@ -1,12 +1,12 @@
|
||||
---
|
||||
# due to Ansible limitations, we cannot loop over a block, so we loop over a distinct tasks file...
|
||||
# @see https://stackoverflow.com/a/58911694
|
||||
- name: prompt the user for confirmation
|
||||
- name: Prompt the user for confirmation
|
||||
ansible.builtin.pause:
|
||||
prompt: "[IRREVERSIBLE] Are you sure you want to delete zpool {{ zpool.name }}?"
|
||||
echo: yes
|
||||
echo: true
|
||||
register: confirmation
|
||||
|
||||
- name: deleting zpool
|
||||
- name: Deleting zpool
|
||||
ansible.builtin.command: "zpool destroy {{ zpool.name }}"
|
||||
when: confirmation.user_input | lower in ['yes', 'y']
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
---
|
||||
- name: Check if zfs-linux-lts is installed
|
||||
command: pacman -Qi zfs-dkms
|
||||
ansible.builtin.command: pacman -Qi zfs-dkms
|
||||
register: zfs_dkms_installed
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
@ -8,9 +8,9 @@
|
||||
- name: Install zfs
|
||||
when: zfs_dkms_installed.stderr
|
||||
block:
|
||||
- name: disable SUDOERS password prompt for makepkg
|
||||
- name: Disable SUDOERS password prompt for makepkg
|
||||
no_log: true
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/sudoers
|
||||
state: present
|
||||
regexp: "^#?%wheel"
|
||||
@ -22,30 +22,29 @@
|
||||
# Using dkms with the lts linux kernel is a better approach IMO.
|
||||
- name: Install zfs
|
||||
become: false
|
||||
command:
|
||||
ansible.builtin.command:
|
||||
cmd: "paru -S --noconfirm zfs-dkms zfs-utils"
|
||||
|
||||
- name: Restore SUDOERS password prompt after yay
|
||||
no_log: true
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/sudoers
|
||||
state: present
|
||||
regexp: "^#?%wheel"
|
||||
line: "%wheel ALL=(ALL:ALL) ALL"
|
||||
validate: /usr/sbin/visudo -cf %s
|
||||
|
||||
- name: check if /etc/hostid is present
|
||||
stat:
|
||||
- name: Check if /etc/hostid is present
|
||||
ansible.builtin.stat:
|
||||
path: /etc/hostid
|
||||
register: hostid
|
||||
changed_when: false
|
||||
|
||||
- name: generate /etc/hostid if not present
|
||||
- name: Generate /etc/hostid if not present
|
||||
when: not hostid.stat.exists
|
||||
command: zgenhostid $(hostid)
|
||||
|
||||
ansible.builtin.command: zgenhostid $(hostid)
|
||||
- name: Check if zrepl is installed
|
||||
command: pacman -Qi zrepl
|
||||
ansible.builtin.command: pacman -Qi zrepl
|
||||
register: zrepl_installed
|
||||
changed_when: false
|
||||
failed_when: false
|
||||
@ -53,9 +52,9 @@
|
||||
- name: Install zrepl
|
||||
when: zrepl_installed.stderr
|
||||
block:
|
||||
- name: disable SUDOERS password prompt for makepkg
|
||||
- name: Disable SUDOERS password prompt for makepkg
|
||||
no_log: true
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/sudoers
|
||||
state: present
|
||||
regexp: "^#?%wheel"
|
||||
@ -64,12 +63,12 @@
|
||||
|
||||
- name: Install zrepl
|
||||
become: false
|
||||
command:
|
||||
ansible.builtin.command:
|
||||
cmd: "paru -S --noconfirm zrepl"
|
||||
|
||||
- name: Restore SUDOERS password prompt after paru
|
||||
no_log: true
|
||||
lineinfile:
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/sudoers
|
||||
state: present
|
||||
regexp: "^#?%wheel"
|
||||
@ -77,7 +76,7 @@
|
||||
validate: /usr/sbin/visudo -cf %s
|
||||
|
||||
- name: Enable zfs services
|
||||
service:
|
||||
ansible.builtin.service:
|
||||
name: "{{ item }}"
|
||||
enabled: true
|
||||
state: started
|
||||
|
||||
@ -1,9 +1,7 @@
|
||||
---
|
||||
- name: Install ZFS
|
||||
include_tasks: install.yml
|
||||
|
||||
ansible.builtin.include_tasks: install.yml
|
||||
- name: Configure Zpools
|
||||
include_tasks: pools.yml
|
||||
|
||||
ansible.builtin.include_tasks: pools.yml
|
||||
- name: "Setup ZFS datasets: filesystems, snapshots, volumes"
|
||||
include_tasks: datasets.yml
|
||||
ansible.builtin.include_tasks: datasets.yml
|
||||
|
||||
@ -3,21 +3,22 @@
|
||||
# Based on: https://github.com/mrlesmithjr/ansible-zfs/blob/master/tasks/manage_zfs.yml
|
||||
# Expected variables in your inventory: zfs_pools.
|
||||
|
||||
- name: checking existing zpool(s)
|
||||
ansible.builtin.shell: "zpool list -H -o name"
|
||||
- name: Checking existing zpool(s)
|
||||
ansible.builtin.command: "zpool list -H -o name"
|
||||
changed_when: false
|
||||
register: current_zp_state
|
||||
check_mode: no
|
||||
check_mode: false
|
||||
when: zfs_pools is defined
|
||||
|
||||
- name: gather zpool status
|
||||
ansible.builtin.shell: zpool status
|
||||
- name: Gather zpool status
|
||||
ansible.builtin.command: zpool status
|
||||
changed_when: false
|
||||
register: zpool_devices
|
||||
when: zfs_pools is defined
|
||||
|
||||
- name: creating basic zpool(s)
|
||||
ansible.builtin.command: "zpool create {{ '-o '+ item.options.items() |map('join', '=') | join (' -o ') if item.options is defined else '' }} {{ item.name }} {{ item.devices|join (' ') }}"
|
||||
- name: Creating basic zpool(s)
|
||||
ansible.builtin.command: "zpool create {{ '-o '+ item.options.items() |map('join', '=') | join (' -o ') if item.options is defined else '' }} {{ item.name }} {{
|
||||
item.devices|join (' ') }}"
|
||||
with_items: "{{ zfs_pools }}"
|
||||
when:
|
||||
- zfs_pools is defined
|
||||
@ -26,8 +27,9 @@
|
||||
- item.state == "present"
|
||||
- item.devices[0] not in zpool_devices.stdout
|
||||
|
||||
- name: creating mirror/zraid zpool(s)
|
||||
ansible.builtin.command: "zpool create {{ '-o '+ item.options.items() |map('join', '=') | join (' -o ') if item.options is defined else '' }} {{ item.name }} {{ item.type }} {{ item.devices|join (' ') }}"
|
||||
- name: Creating mirror/zraid zpool(s)
|
||||
ansible.builtin.command: "zpool create {{ '-o '+ item.options.items() |map('join', '=') | join (' -o ') if item.options is defined else '' }} {{ item.name }} {{
|
||||
item.type }} {{ item.devices|join (' ') }}"
|
||||
with_items: "{{ zfs_pools }}"
|
||||
when:
|
||||
- zfs_pools is defined
|
||||
@ -36,8 +38,8 @@
|
||||
- item.state == "present"
|
||||
- item.devices[0] not in zpool_devices.stdout
|
||||
|
||||
- name: deleting zpool(s) with care
|
||||
include_tasks: "./delete-pool.yml"
|
||||
- name: Deleting zpool(s) with care
|
||||
ansible.builtin.include_tasks: "./delete-pool.yml"
|
||||
when:
|
||||
- zfs_pools is defined
|
||||
- zpool.name in current_zp_state.stdout_lines
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
---
|
||||
zsh_home: "{{ '/root' if zsh_user == 'root' else '/home/' + zsh_user }}"
|
||||
zsh_base_config: "{{ zsh_home }}/.zshrc"
|
||||
zsh_config_path: "{{ zsh_home }}/.config/zsh"
|
||||
|
||||
@ -1,14 +1,13 @@
|
||||
---
|
||||
- name: install zsh
|
||||
package:
|
||||
- name: Install zsh
|
||||
ansible.builtin.package:
|
||||
name: zsh
|
||||
state: present
|
||||
|
||||
- name: install zsh plugins
|
||||
include_tasks: plugins.yml
|
||||
|
||||
- name: setup zsh for the user(s)
|
||||
include_tasks: user-setup.yml
|
||||
- name: Install zsh plugins
|
||||
ansible.builtin.include_tasks: plugins.yml
|
||||
- name: Setup zsh for the user(s)
|
||||
ansible.builtin.include_tasks: user-setup.yml
|
||||
vars:
|
||||
zsh_user: "{{ item }}"
|
||||
loop: "{{ zsh_users | default([]) }}"
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
---
|
||||
- name: ensure plugins directory exists
|
||||
- name: Ensure plugins directory exists
|
||||
ansible.builtin.file:
|
||||
path: "{{ zsh_plugins_path }}"
|
||||
state: directory
|
||||
@ -7,7 +7,7 @@
|
||||
group: users
|
||||
mode: "0755"
|
||||
|
||||
- name: add a readme file to advice from where this comes
|
||||
- name: Add a readme file to advice from where this comes
|
||||
ansible.builtin.copy:
|
||||
dest: "{{ zsh_plugins_path }}/README.md"
|
||||
content: |
|
||||
@ -17,27 +17,18 @@
|
||||
group: users
|
||||
owner: root
|
||||
|
||||
- name: "git clone plugins"
|
||||
git:
|
||||
- name: "Git clone plugins"
|
||||
ansible.builtin.git:
|
||||
repo: "{{ item.repo }}"
|
||||
dest: "{{ item.dest }}"
|
||||
update: yes
|
||||
update: true
|
||||
version: master
|
||||
loop:
|
||||
- {
|
||||
repo: https://github.com/zsh-users/zsh-syntax-highlighting.git,
|
||||
dest: "{{ zsh_plugins_path }}/zsh-syntax-highlighting",
|
||||
}
|
||||
- {
|
||||
repo: https://github.com/zsh-users/zsh-autosuggestions.git,
|
||||
dest: "{{ zsh_plugins_path }}/zsh-autosuggestions",
|
||||
}
|
||||
- {
|
||||
repo: https://github.com/romkatv/powerlevel10k.git,
|
||||
dest: "{{ zsh_plugins_path }}/powerlevel10k",
|
||||
}
|
||||
- { repo: https://github.com/zsh-users/zsh-syntax-highlighting.git, dest: "{{ zsh_plugins_path }}/zsh-syntax-highlighting" }
|
||||
- { repo: https://github.com/zsh-users/zsh-autosuggestions.git, dest: "{{ zsh_plugins_path }}/zsh-autosuggestions" }
|
||||
- { repo: https://github.com/romkatv/powerlevel10k.git, dest: "{{ zsh_plugins_path }}/powerlevel10k" }
|
||||
|
||||
- name: assert plugins are available for any user
|
||||
- name: Assert plugins are available for any user
|
||||
ansible.builtin.file:
|
||||
path: "{{ item }}"
|
||||
owner: root
|
||||
|
||||
@ -1,4 +1,5 @@
|
||||
- name: setup zsh base config
|
||||
---
|
||||
- name: Setup zsh base config
|
||||
ansible.builtin.template:
|
||||
src: main.zshrc.j2
|
||||
dest: "{{ zsh_base_config }}"
|
||||
@ -6,7 +7,7 @@
|
||||
group: "{{ zsh_user }}"
|
||||
mode: "0600"
|
||||
|
||||
- name: setup .config/zsh directory
|
||||
- name: Setup .config/zsh directory
|
||||
ansible.builtin.file:
|
||||
path: "{{ zsh_config_path }}"
|
||||
state: directory
|
||||
@ -14,7 +15,7 @@
|
||||
group: "{{ zsh_user }}"
|
||||
mode: "0700"
|
||||
|
||||
- name: configure zsh config
|
||||
- name: Configure zsh config
|
||||
ansible.builtin.template:
|
||||
src: zshrc.j2
|
||||
dest: "{{ zsh_config_file }}"
|
||||
@ -22,7 +23,7 @@
|
||||
group: "{{ zsh_user }}"
|
||||
mode: "0600"
|
||||
|
||||
- name: copy aliases
|
||||
- name: Copy aliases
|
||||
ansible.builtin.copy:
|
||||
src: ./templates/aliases
|
||||
dest: "{{ zsh_config_path }}/aliases"
|
||||
@ -30,12 +31,12 @@
|
||||
group: "{{ zsh_user }}"
|
||||
mode: "0600"
|
||||
|
||||
- name: change default shell to zsh
|
||||
user:
|
||||
- name: Change default shell to zsh
|
||||
ansible.builtin.user:
|
||||
name: "{{ zsh_user }}"
|
||||
shell: /bin/zsh
|
||||
|
||||
- name: configure powerlevel10k theme
|
||||
- name: Configure powerlevel10k theme
|
||||
ansible.builtin.copy:
|
||||
src: "./templates/{{ 'root.p10k.zsh' if zsh_user == 'root' else 'user.p10k.zsh' }}"
|
||||
dest: "{{ zsh_p10k_theme_config }}"
|
||||
|
||||
Loading…
Reference in New Issue
Block a user