Compare commits

...

12 Commits

Author SHA1 Message Date
Clément Désiles
61c88045f7
feat: add more tooling 2026-03-17 23:13:02 +01:00
Clément Désiles
525868caaf
fix: wireguard on archlinux 2026-03-17 23:10:08 +01:00
Clément Désiles
235881aba7
fix: commit bootstrap playbook 2026-03-17 23:09:47 +01:00
Clément Désiles
a6878c0b7d
fix: dhcpd ipv4 service 2026-03-17 23:09:29 +01:00
Clément Désiles
e209a93a78
feat: BREAKING unbound configuration 2026-03-17 23:08:44 +01:00
Clément Désiles
869727d364
fix: add bootstrap for new hosts 2026-03-17 23:06:42 +01:00
Clément Désiles
6393ff6ed3
fix: force images pull and change default ports 2026-02-14 21:02:51 +01:00
Clément Désiles
23c7da84bb
fix: minor doc 2026-02-03 22:11:35 +01:00
Clément Désiles
34da95f8be
fix: ntfy timezone 2026-02-03 22:08:48 +01:00
Clément Désiles
321a14a108
fix: increase unbound ttl&cache 2026-02-03 22:08:20 +01:00
Clément Désiles
1f758deb82
feat: add dhcpd server role 2026-02-03 22:07:40 +01:00
Clément Désiles
5fb027c446
fix: open wg port only in server mode 2026-01-22 07:31:45 +01:00
27 changed files with 399 additions and 43 deletions

1
.gitignore vendored
View File

@ -7,4 +7,5 @@ inventory_data/
playbook.yml
playbooks/*
!playbooks/example.yml
!playbooks/bootstrap.yml
TODO.md

View File

@ -73,18 +73,34 @@ ansible-playbook -i inventory/hosts.yml playbook.yml \
--ask-become-pass
```
## Target configuration
## Bootstrapping a new host
Requirements:
- sshd up and running
- public key copied:
For fresh hosts (only `root` available, no admin user yet):
```sh
ssh-copy-id -i ~/.ssh/id_rsa.pub username@remote_host
ansible-playbook playbooks/bootstrap.yml -l <hostname> --ask-pass
```
- python3 installed (`pacman -Syu python3`)
This installs Python and sudo, creates `{{ ansible_user }}` with sudo rights, and copies your local `~/.ssh/id_ed25519.pub`. Supports Arch Linux and Debian/Ubuntu.
To use a different SSH key:
```sh
ansible-playbook playbooks/bootstrap.yml -l <hostname> --ask-pass \
--extra-vars 'bootstrap_ssh_public_key="ssh-ed25519 AAAA..."'
```
Then set a password for the new user (required for sudo `--ask-become-pass`):
```sh
ssh root@<hostname> passwd jambon
```
After that, run the host playbook normally:
```sh
ansible-playbook playbooks/<hostname>.yml --ask-become-pass
```
## Developping

View File

@ -21,12 +21,15 @@ network_interfaces:
# Unbound DNS resolver configuration
# ----------------------------------
# Interface IPs for binding
unbound_interface_lan: 192.168.1.2
unbound_interface_vpn: 192.168.20.4
# unbound LAN configuration
unbound_custom_lan_domain: "example.lan"
unbound_interfaces:
- { address: "192.168.1.2", comment: "lan0" }
- { address: "192.168.20.4", comment: "wg0" }
unbound_access_control:
- { subnet: "192.168.1.0/24", action: "allow", view: "lan", comment: "lan0" }
- { subnet: "192.168.20.0/27", action: "allow", view: "vpn", comment: "wg0" }
unbound_custom_lan_config_path: "{{ unbound_config_base_path }}/lan.conf"
unbound_custom_lan_records:
"server.example.lan":

73
playbooks/bootstrap.yml Normal file
View File

@ -0,0 +1,73 @@
---
# Bootstrap a fresh host: create the admin user with sudo and SSH access.
# Run this before any other playbook, when only root access is available:
#
# ansible-playbook playbooks/bootstrap.yml -l somehost
#
# After this, run other playbooks normally.
- name: Bootstrap admin user
hosts: "{{ target | default('all') }}"
gather_facts: false
vars:
ansible_user: root
ansible_become: false
# bootstrap_user: jambon
# bootstrap_ssh_public_key: "ssh-ed25519 AAAA..."
tasks:
- name: Detect OS and install python3 + sudo
ansible.builtin.raw: |
if command -v pacman > /dev/null 2>&1; then
pacman -Sy --noconfirm python sudo
elif command -v apt-get > /dev/null 2>&1; then
apt-get update -qq && apt-get install -y python3 sudo
else
echo "Unsupported OS" && exit 1
fi
changed_when: true
- name: Gather facts
ansible.builtin.setup:
- name: Create admin user
ansible.builtin.user:
name: "{{ bootstrap_user }}"
groups: "{{ 'wheel' if ansible_facts['os_family'] == 'Archlinux' else 'sudo' }}"
append: true
shell: /bin/bash
create_home: true
state: present
- name: Allow sudo group to use sudo (Debian)
ansible.builtin.copy:
content: "%sudo ALL=(ALL:ALL) ALL\n"
dest: /etc/sudoers.d/sudo
owner: root
group: root
mode: "0440"
validate: visudo -cf %s
when: ansible_facts['os_family'] == 'Debian'
- name: Allow wheel group to use sudo (Arch)
ansible.builtin.copy:
content: "%wheel ALL=(ALL:ALL) ALL\n"
dest: /etc/sudoers.d/wheel
owner: root
group: root
mode: "0440"
validate: visudo -cf %s
when: ansible_facts['os_family'] == 'Archlinux'
- name: Create .ssh directory
ansible.builtin.file:
path: "/home/{{ bootstrap_user }}/.ssh"
state: directory
owner: "{{ bootstrap_user }}"
group: "{{ bootstrap_user }}"
mode: "0700"
- name: Add SSH authorized key
ansible.posix.authorized_key:
user: "{{ bootstrap_user }}"
key: "{{ bootstrap_ssh_public_key | default(lookup('file', '~/.ssh/id_ed25519.pub')) }}"
state: present

29
roles/dhcpd/README.md Normal file
View File

@ -0,0 +1,29 @@
# dhcpd
ISC DHCP server role for Arch Linux and Debian/Ubuntu.
## Requirements
- `dhcpd_interface` must be defined in inventory
## Configuration
See [defaults/main.yml](defaults/main.yml) for all available variables.
## Example
```yaml
dhcpd_interface: "lan0"
dhcpd_subnet: "192.168.1.0"
dhcpd_range_start: "192.168.1.20"
dhcpd_range_end: "192.168.1.200"
dhcpd_gateway: "192.168.1.1"
dhcpd_dns_servers:
- "192.168.1.2"
dhcpd_domain_name: "home.lan"
dhcpd_reservations:
- hostname: printer
mac: "aa:bb:cc:dd:ee:ff"
ip: "192.168.1.10"
```

View File

@ -0,0 +1,27 @@
# Network configuration
dhcpd_subnet: "192.168.1.0"
dhcpd_netmask: "255.255.255.0"
dhcpd_range_start: "192.168.1.20"
dhcpd_range_end: "192.168.1.200"
dhcpd_gateway: "192.168.1.1"
dhcpd_dns_servers:
- "1.1.1.1"
# Lease times (in seconds)
dhcpd_default_lease_time: 86400 # 24 hours
dhcpd_max_lease_time: 172800 # 48 hours
# Interface to listen on (required)
# dhcpd_interface: "lan0"
# Domain name (optional)
# dhcpd_domain_name: "home.lan"
# Static reservations
# dhcpd_reservations:
# - hostname: printer
# mac: "aa:bb:cc:dd:ee:ff"
# ip: "192.168.1.10"
# - hostname: nas
# mac: "11:22:33:44:55:66"
# ip: "192.168.1.2"

View File

@ -0,0 +1,9 @@
---
- name: Reload systemd
ansible.builtin.systemd:
daemon_reload: true
- name: Restart dhcpd
ansible.builtin.systemd:
name: "{{ dhcpd_service }}"
state: restarted

View File

@ -0,0 +1,81 @@
---
- name: Validate required variables
ansible.builtin.assert:
that:
- dhcpd_interface is defined
- dhcpd_interface | length > 0
fail_msg: |
dhcpd_interface is required.
See roles/dhcpd/defaults/main.yml for configuration instructions.
success_msg: "Variable validation passed"
- name: Load OS-specific variables
ansible.builtin.include_vars: "{{ item }}"
with_first_found:
- "{{ ansible_facts['os_family'] | lower }}.yml"
- "debian.yml"
- name: Install DHCP server
ansible.builtin.package:
name: "{{ dhcpd_package }}"
state: present
- name: Deploy DHCP server configuration
ansible.builtin.template:
src: dhcpd.conf.j2
dest: "{{ dhcpd_config_path }}"
owner: root
group: root
mode: "0644"
notify: Restart dhcpd
- name: Configure interface for DHCP server (Debian)
ansible.builtin.template:
src: isc-dhcp-server.j2
dest: "{{ dhcpd_defaults_path }}"
owner: root
group: root
mode: "0644"
when: ansible_facts['os_family'] | lower == 'debian'
notify: Restart dhcpd
- name: Deploy dhcpd4@ systemd template unit (Arch)
ansible.builtin.template:
src: dhcpd4@.service.j2
dest: /usr/lib/systemd/system/dhcpd4@.service
owner: root
group: root
mode: "0644"
when: ansible_facts['os_family'] == 'Archlinux'
notify:
- Reload systemd
- Restart dhcpd
- name: Disable generic dhcpd4.service (Arch)
ansible.builtin.systemd:
name: "{{ dhcpd_service_generic }}"
enabled: false
state: stopped
when:
- ansible_facts['os_family'] == 'Archlinux'
- dhcpd_service_generic is defined
failed_when: false
- name: Enable and start DHCP server
ansible.builtin.systemd:
name: "{{ dhcpd_service }}"
enabled: true
state: started
- name: Allow DHCP traffic on {{ dhcpd_interface }}
community.general.ufw:
rule: allow
port: "67"
proto: udp
direction: in
interface: "{{ dhcpd_interface }}"
comment: "DHCP on {{ dhcpd_interface }}"
retries: 5
delay: 2
register: ufw_dhcp_result
until: ufw_dhcp_result is succeeded

View File

@ -0,0 +1,28 @@
# {{ ansible_managed }}
# Global options
default-lease-time {{ dhcpd_default_lease_time }};
max-lease-time {{ dhcpd_max_lease_time }};
authoritative;
{% if dhcpd_domain_name is defined %}
option domain-name "{{ dhcpd_domain_name }}";
{% endif %}
option domain-name-servers {{ dhcpd_dns_servers | join(', ') }};
# Subnet configuration
subnet {{ dhcpd_subnet }} netmask {{ dhcpd_netmask }} {
range {{ dhcpd_range_start }} {{ dhcpd_range_end }};
option routers {{ dhcpd_gateway }};
}
# Static reservations
{% if dhcpd_reservations is defined %}
{% for host in dhcpd_reservations %}
host {{ host.hostname }} {
hardware ethernet {{ host.mac }};
fixed-address {{ host.ip }};
}
{% endfor %}
{% endif %}

View File

@ -0,0 +1,15 @@
# {{ ansible_managed }}
[Unit]
Description=IPv4 DHCP server on %I
After=sys-subsystem-net-devices-%i.device
BindsTo=sys-subsystem-net-devices-%i.device
[Service]
Type=forking
ExecStart=/usr/bin/dhcpd -4 -q -cf /etc/dhcpd.conf -pf /run/dhcpd4/dhcpd-%i.pid %I
RuntimeDirectory=dhcpd4
PIDFile=/run/dhcpd4/dhcpd-%i.pid
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,5 @@
# {{ ansible_managed }}
# Defaults for isc-dhcp-server
INTERFACESv4="{{ dhcpd_interface }}"
INTERFACESv6=""

View File

@ -0,0 +1,4 @@
dhcpd_package: dhcp
dhcpd_service: "dhcpd4@{{ dhcpd_interface }}"
dhcpd_service_generic: dhcpd4
dhcpd_config_path: /etc/dhcpd.conf

View File

@ -0,0 +1,4 @@
dhcpd_package: isc-dhcp-server
dhcpd_service: isc-dhcp-server
dhcpd_config_path: /etc/dhcp/dhcpd.conf
dhcpd_defaults_path: /etc/default/isc-dhcp-server

View File

@ -1,9 +1,9 @@
---
# Gitea version
gitea_version: "1.22"
gitea_version: "latest"
# Network configuration
gitea_port: 3000
gitea_port: 3100
# Container image
gitea_image: gitea/gitea

View File

@ -79,6 +79,13 @@
group: "{{ ansible_user }}"
mode: "0755"
- name: Pull Gitea container image
ansible.builtin.command: "podman pull {{ gitea_image }}:{{ gitea_version }}"
changed_when: pull_result.stdout is search('Writing manifest')
register: pull_result
become: false
become_user: "{{ ansible_user }}"
- name: Deploy Kubernetes YAML for Gitea
ansible.builtin.template:
src: gitea.yaml.j2

View File

@ -86,6 +86,16 @@
loop:
- "{{ immich_upload_location }}"
- name: Pull Immich container images
ansible.builtin.command: "podman pull {{ item }}"
loop:
- "{{ immich_server_image }}:{{ immich_version }}"
- "{{ immich_ml_image }}:{{ immich_version }}"
changed_when: pull_result.stdout is search('Writing manifest')
register: pull_result
become: false
become_user: "{{ ansible_user }}"
- name: Deploy Kubernetes YAML for Immich
ansible.builtin.template:
src: immich.yaml.j2

View File

@ -12,14 +12,11 @@ ntfy_admin_user: admin
# ntfy_admin_password: "" # Intentionally undefined - role will fail if not set
# Network configuration
ntfy_port: 8080
ntfy_port: 8090
# Container image
ntfy_image: binwiederhier/ntfy
# Timezone
ntfy_timezone: UTC
# Server configuration
ntfy_base_url: http://localhost:{{ ntfy_port }}
ntfy_behind_proxy: false

View File

@ -37,6 +37,13 @@
mode: "0644"
notify: Restart ntfy
- name: Pull ntfy container image
ansible.builtin.command: "podman pull {{ ntfy_image }}:{{ ntfy_version }}"
changed_when: pull_result.stdout is search('Writing manifest')
register: pull_result
become: false
become_user: "{{ ansible_user }}"
- name: Deploy Kubernetes YAML for ntfy
ansible.builtin.template:
src: ntfy.yaml.j2

View File

@ -14,9 +14,6 @@ spec:
ports:
- containerPort: 80
hostPort: {{ ntfy_port }}
env:
- name: TZ
value: "{{ ntfy_timezone }}"
volumeMounts:
- name: localtime
mountPath: /etc/localtime

View File

@ -28,3 +28,15 @@
name: "{{ (ansible_facts['os_family'] == 'Archlinux') | ternary('bind', 'dnsutils') }}"
state: present
changed_when: false
- name: Install ethtool
package:
name: ethtool
state: present
changed_when: false
- name: Install iperf3
package:
name: iperf3
state: present
changed_when: false

View File

@ -10,8 +10,13 @@ unbound_custom_lan_domain: "example.lan"
unbound_port: 53
unbound_apparmor_profile_path: /etc/apparmor.d/usr.sbin.unbound
unbound_interface_lan: 192.168.1.1
unbound_interface_vpn: 192.168.27.1
unbound_interfaces:
- { address: "192.168.1.1", comment: "lan" }
- { address: "192.168.27.1", comment: "vpn" }
unbound_access_control:
- { subnet: "192.168.1.0/24", action: "allow", view: "lan", comment: "lan" }
- { subnet: "192.168.27.0/27", action: "allow", view: "vpn", comment: "vpn" }
unbound_firewall_allowed_sources:
- { src: "192.168.1.0/24", comment: "DNS from LAN" }

View File

@ -5,9 +5,9 @@ view:
{% if unbound_custom_vpn_records is defined %}
{% for host, ips in unbound_custom_vpn_records.items() %}
local-data: "{{ host }}. IN A {{ ips.v4 }}"
{% if ips.v6 is defined %}
{% if ips.v6 is defined %}
local-data: "{{ host }}. IN AAAA {{ ips.v6 }}"
{% endif %}
{% endif %}
{% endfor %}
{% endif %}

View File

@ -14,20 +14,25 @@ server:
# Define interface binds by IP address
interface: 127.0.0.1 # lo (IPv4)
interface: ::1 # lo (IPv6)
interface: {{ unbound_interface_lan }} # lan0
interface: {{ unbound_interface_vpn }} # wg0
{% for iface in unbound_interfaces %}
interface: {{ iface.address }} # {{ iface.comment | default('') }}
{% endfor %}
# Define access controls (note that ufw might be also configured)
access-control: 0.0.0.0/0 refuse
access-control: 127.0.0.0/8 allow # lo interface
access-control: 192.168.1.0/24 allow # lan0 interface
access-control: 192.168.27.0/27 allow # wg0 interface
{% for acl in unbound_access_control %}
access-control: {{ acl.subnet }} {{ acl.action }} # {{ acl.comment | default('') }}
{% endfor %}
access-control: ::0/0 refuse
access-control: ::1 allow
# Specify custom local answers for each interface by using views:
access-control-view: 192.168.1.56/24 lan
access-control-view: 192.168.27.1/27 vpn
{% for acl in unbound_access_control %}
{% if acl.view is defined %}
access-control-view: {{ acl.subnet }} {{ acl.view }}
{% endif %}
{% endfor %}
do-ip4: yes
do-udp: yes
@ -59,13 +64,19 @@ server:
use-caps-for-id: no
# the time to live (TTL) value lower bound, in seconds. Default 0.
# If more than an hour could easily give trouble due to stale data.
cache-min-ttl: 3600
# Setting to 0 respects upstream TTLs, avoiding outages on fast-flux services.
cache-min-ttl: 0
# the time to live (TTL) value cap for RRsets and messages in the
# cache. Items are not cached for longer. In seconds.
cache-max-ttl: 86400
# Serve expired records while fetching fresh ones in the background.
# Improves reliability when upstream resolvers are temporarily unavailable.
serve-expired: yes
serve-expired-ttl: 86400
serve-expired-client-timeout: 0
# Reduce EDNS reassembly buffer size.
# IP fragmentation is unreliable on the Internet today, and can cause
# transmission failures when large DNS messages are sent via UDP. Even
@ -89,14 +100,20 @@ server:
# see: https://unbound.docs.nlnetlabs.nl/en/latest/manpages/unbound.conf.html#unbound-conf-prefetch
prefetch: no
# One thread should be sufficient, can be increased on beefy machines.
# In reality for most users running on small networks or on a single machine,
# it should be unnecessary to seek performance enhancement by increasing num-threads above 1.
num-threads: 1
# Threads and cache slabs optimized for 4-core CPU
num-threads: 4
msg-cache-slabs: 4
rrset-cache-slabs: 4
infra-cache-slabs: 4
key-cache-slabs: 4
# Cache sizes optimized for 64GB RAM
msg-cache-size: 512m
rrset-cache-size: 1g
# Ensure kernel buffer is large enough to not lose messages in traffic spikes
so-rcvbuf: 1m
so-sndbuf: 0
so-rcvbuf: 4m
so-sndbuf: 4m
# Ensure privacy of local IP ranges
private-address: 192.168.0.0/16

View File

@ -7,7 +7,7 @@ uptime_kuma_version: "2"
uptime_kuma_data_dir: "{{ podman_projects_dir }}/uptime-kuma/data"
# Network configuration
uptime_kuma_port: 3001
uptime_kuma_port: 3300
# Container image
uptime_kuma_image: louislam/uptime-kuma

View File

@ -15,6 +15,13 @@
group: "{{ ansible_user }}"
mode: "0755"
- name: Pull Uptime Kuma container image
ansible.builtin.command: "podman pull {{ uptime_kuma_image }}:{{ uptime_kuma_version }}"
changed_when: pull_result.stdout is search('Writing manifest')
register: pull_result
become: false
become_user: "{{ ansible_user }}"
- name: Deploy Kubernetes YAML for uptime-kuma
ansible.builtin.template:
src: uptime-kuma.yaml.j2

View File

@ -1,9 +1,9 @@
---
wireguard_primary_interface: "{{ network_interfaces.0.name }}"
wireguard_port: 51820 # static port to receive input connections
wireguard_server_mode: true # enables NAT and open port
wireguard_port: 51820
wireguard_interface: wg0
wireguard_config_base_path: /etc/wireguard
wireguard_server_mode: true # enables NAT and open port
# wireguard_address: 192.168.27.1/27 # Intentionally undefined - role will fail if not set
# wireguard_dns: 192.168.27.1 # Intentionally undefined - role will fail if not set
wireguard_peers: []

View File

@ -19,10 +19,11 @@
# Use systemd-resolved for DNS management (modern approach on all distributions)
# Install systemd-resolvconf to provide resolvconf compatibility wrapper
# "systemd-resolved" is prefered over "openresolv"
- name: Install systemd-resolvconf
- name: Install systemd-resolvconf (Debian only, built into systemd on Arch)
ansible.builtin.package:
name: systemd-resolvconf
state: present
when: ansible_facts['os_family'] == 'Debian'
- name: Ensure systemd-resolved is enabled and started
ansible.builtin.systemd:
@ -90,6 +91,7 @@
delay: 2
register: ufw_result
until: ufw_result is succeeded
when: wireguard_server_mode | default(false)
- name: Start and enable service
ansible.builtin.service: